repo_name
stringlengths 6
130
| hexsha
list | file_path
list | code
list | apis
list | possible_versions
list |
---|---|---|---|---|---|
alvinlin-pn/tensorflow
|
[
"c9cd1784bf287543d89593ca1432170cdbf694de",
"c9cd1784bf287543d89593ca1432170cdbf694de",
"c9cd1784bf287543d89593ca1432170cdbf694de"
] |
[
"tensorflow/python/keras/layers/preprocessing/text_vectorization_test.py",
"tensorflow/python/keras/layers/pooling_test.py",
"tensorflow/python/data/ops/multi_device_iterator_ops.py"
] |
[
"# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for Keras text vectorization preprocessing layer.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\n\nfrom absl.testing import parameterized\nimport numpy as np\n\nfrom tensorflow.python.keras.layers.preprocessing import text_vectorization\nfrom tensorflow.python.keras.layers.preprocessing import text_vectorization_v1\n\nfrom tensorflow.python import keras\n\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.keras import keras_parameterized\nfrom tensorflow.python.keras import testing_utils\nfrom tensorflow.python.keras.layers.preprocessing import preprocessing_test_utils\nfrom tensorflow.python.keras.saving import saved_model_experimental as saving\nfrom tensorflow.python.keras.utils import generic_utils\nfrom tensorflow.python.keras.utils.generic_utils import CustomObjectScope\nfrom tensorflow.python.ops import gen_string_ops\nfrom tensorflow.python.ops.ragged import ragged_string_ops\nfrom tensorflow.python.platform import test\n\n\ndef get_layer_class():\n if context.executing_eagerly():\n return text_vectorization.TextVectorization\n else:\n return text_vectorization_v1.TextVectorization\n\n\n# TODO(askerryryan): Update all tests to providee one string per example since\n# pre-tokenized input is not officially supported by the API.\n@keras_parameterized.run_all_keras_modes\nclass TextVectorizationLayerTest(keras_parameterized.TestCase,\n preprocessing_test_utils.PreprocessingLayerTest\n ):\n\n # TODO(askerryryan): Fix tf-idf weight setting and add test for TFIDF mode.\n @parameterized.named_parameters(\n {\n \"testcase_name\":\n \"test_simple_tokens_int_mode\",\n # Create an array where 'earth' is the most frequent term, followed by\n # 'wind', then 'and', then 'fire'. This ensures that the vocab accumulator\n # is sorting by frequency.\n \"vocab_data\":\n np.array([[\"fire\"], [\"earth\"], [\"earth\"], [\"earth\"], [\"earth\"],\n [\"wind\"], [\"wind\"], [\"wind\"], [\"and\"], [\"and\"]]),\n \"input_data\":\n np.array([[\"earth\"], [\"wind\"], [\"and\"], [\"fire\"], [\"fire\"],\n [\"and\"], [\"earth\"], [\"michigan\"]]),\n \"kwargs\": {\n \"max_tokens\": None,\n \"standardize\": None,\n \"split\": None,\n \"output_mode\": text_vectorization.INT\n },\n \"expected_output\": [[2], [3], [4], [5], [5], [4], [2], [1]],\n },\n {\n \"testcase_name\":\n \"test_documents_int_mode\",\n \"vocab_data\":\n np.array([[\"fire earth earth\"], [\"earth earth\"], [\"wind wind\"],\n [\"and wind and\"]]),\n \"input_data\":\n np.array([[\"earth wind and\"], [\"fire fire\"], [\"and earth\"],\n [\"michigan\"]]),\n \"kwargs\": {\n \"max_tokens\": None,\n \"standardize\": None,\n \"split\": text_vectorization.SPLIT_ON_WHITESPACE,\n \"output_mode\": text_vectorization.INT\n },\n \"expected_output\": [[2, 3, 4], [5, 5, 0], [4, 2, 0], [1, 0, 0]],\n },\n {\n \"testcase_name\":\n \"test_simple_tokens_binary_mode\",\n \"vocab_data\":\n np.array([[\"fire\"], [\"earth\"], [\"earth\"], [\"earth\"], [\"earth\"],\n [\"wind\"], [\"wind\"], [\"wind\"], [\"and\"], [\"and\"]]),\n \"input_data\":\n np.array([[\"earth\"], [\"wind\"], [\"and\"], [\"fire\"], [\"fire\"],\n [\"and\"], [\"earth\"], [\"michigan\"]]),\n \"kwargs\": {\n \"max_tokens\": 5,\n \"standardize\": None,\n \"split\": None,\n \"output_mode\": text_vectorization.BINARY\n },\n \"expected_output\": [[0, 1, 0, 0, 0], [0, 0, 1, 0, 0], [0, 0, 0, 1, 0],\n [0, 0, 0, 0, 1], [0, 0, 0, 0, 1], [0, 0, 0, 1, 0],\n [0, 1, 0, 0, 0], [1, 0, 0, 0, 0]],\n },\n {\n \"testcase_name\":\n \"test_documents_binary_mode\",\n \"vocab_data\":\n np.array([[\"fire earth earth\"], [\"earth earth\"], [\"wind wind\"],\n [\"and wind and\"]]),\n \"input_data\":\n np.array([[\"earth wind\"], [\"and\"], [\"fire fire\"],\n [\"earth michigan\"]]),\n \"kwargs\": {\n \"max_tokens\": 5,\n \"standardize\": None,\n \"split\": text_vectorization.SPLIT_ON_WHITESPACE,\n \"output_mode\": text_vectorization.BINARY\n },\n \"expected_output\": [[0, 1, 1, 0, 0], [0, 0, 0, 1, 0], [0, 0, 0, 0, 1],\n [1, 1, 0, 0, 0]],\n },\n {\n \"testcase_name\":\n \"test_simple_tokens_count_mode\",\n \"vocab_data\":\n np.array([[\"fire\"], [\"earth\"], [\"earth\"], [\"earth\"], [\"earth\"],\n [\"wind\"], [\"wind\"], [\"wind\"], [\"and\"], [\"and\"]]),\n \"input_data\":\n np.array([[\"earth\"], [\"wind\"], [\"and\"], [\"fire\"], [\"fire\"],\n [\"and\"], [\"earth\"], [\"michigan\"]]),\n \"kwargs\": {\n \"max_tokens\": 5,\n \"standardize\": None,\n \"split\": None,\n \"output_mode\": text_vectorization.COUNT\n },\n \"expected_output\": [[0, 1, 0, 0, 0], [0, 0, 1, 0, 0], [0, 0, 0, 1, 0],\n [0, 0, 0, 0, 1], [0, 0, 0, 0, 1], [0, 0, 0, 1, 0],\n [0, 1, 0, 0, 0], [1, 0, 0, 0, 0]],\n },\n {\n \"testcase_name\":\n \"test_documents_count_mode\",\n \"vocab_data\":\n np.array([[\"fire earth earth\"], [\"earth earth\"], [\"wind wind\"],\n [\"and wind and\"]]),\n \"input_data\":\n np.array([[\"earth wind\"], [\"and\"], [\"fire fire\"],\n [\"earth michigan\"]]),\n \"kwargs\": {\n \"max_tokens\": 5,\n \"standardize\": None,\n \"split\": text_vectorization.SPLIT_ON_WHITESPACE,\n \"output_mode\": text_vectorization.COUNT\n },\n \"expected_output\": [[0, 1, 1, 0, 0], [0, 0, 0, 1, 0], [0, 0, 0, 0, 2],\n [1, 1, 0, 0, 0]],\n },\n {\n \"testcase_name\":\n \"test_documents_idf_mode\",\n \"vocab_data\":\n np.array([[\"fire earth earth\"], [\"earth earth\"], [\"wind wind\"],\n [\"and wind and\"]]),\n \"input_data\":\n np.array([[\"earth wind\"], [\"and\"], [\"fire fire\"],\n [\"earth michigan\"]]),\n \"kwargs\": {\n \"max_tokens\": 5,\n \"standardize\": None,\n \"split\": text_vectorization.SPLIT_ON_WHITESPACE,\n \"output_mode\": text_vectorization.TFIDF\n },\n \"expected_output\":\n [[0., 0.847298, 0.847298, 0., 0.], [0., 0., 0., 1.098612, 0.],\n [0., 0., 0., 0., 2.197225], [1.609438, 0.847298, 0., 0., 0.]],\n },\n )\n def test_layer_end_to_end_with_adapt(self, vocab_data, input_data, kwargs,\n expected_output):\n cls = get_layer_class()\n if kwargs.get(\"output_mode\") == text_vectorization.TFIDF:\n expected_output_dtype = dtypes.float32\n else:\n expected_output_dtype = dtypes.int64\n\n with CustomObjectScope({\"TextVectorization\": cls}):\n output_data = testing_utils.layer_test(\n cls,\n kwargs=kwargs,\n input_shape=(None),\n input_data=input_data,\n input_dtype=dtypes.string,\n expected_output_dtype=expected_output_dtype,\n validate_training=False,\n adapt_data=vocab_data)\n self.assertAllClose(expected_output, output_data)\n\n\n@keras_parameterized.run_all_keras_modes\nclass TextVectorizationPreprocessingTest(\n keras_parameterized.TestCase,\n preprocessing_test_utils.PreprocessingLayerTest):\n\n def test_normalization(self):\n input_array = np.array([[\"Earth\", \"wInD\", \"aNd\", \"firE\"],\n [\"fire|\", \"an<>d\", \"{earth}\", \"michigan@%$\"]])\n expected_output = np.array([[b\"earth\", b\"wind\", b\"and\", b\"fire\"],\n [b\"fire\", b\"and\", b\"earth\", b\"michigan\"]])\n\n input_data = keras.Input(shape=(None,), dtype=dtypes.string)\n layer = get_layer_class()(\n max_tokens=None,\n standardize=text_vectorization.LOWER_AND_STRIP_PUNCTUATION,\n split=None,\n ngrams=None,\n output_mode=None)\n int_data = layer(input_data)\n model = keras.Model(inputs=input_data, outputs=int_data)\n output_dataset = model.predict(input_array)\n self.assertAllEqual(expected_output, output_dataset)\n\n def test_custom_normalization(self):\n input_array = np.array([[\"Earth\", \"wInD\", \"aNd\", \"firE\"],\n [\"fire|\", \"an<>d\", \"{earth}\", \"michigan@%$\"]])\n expected_output = np.array(\n [[b\"earth\", b\"wind\", b\"and\", b\"fire\"],\n [b\"fire|\", b\"an<>d\", b\"{earth}\", b\"michigan@%$\"]])\n\n custom_standardization = gen_string_ops.string_lower\n input_data = keras.Input(shape=(None,), dtype=dtypes.string)\n layer = get_layer_class()(\n max_tokens=None,\n standardize=custom_standardization,\n split=None,\n ngrams=None,\n output_mode=None)\n int_data = layer(input_data)\n model = keras.Model(inputs=input_data, outputs=int_data)\n output_dataset = model.predict(input_array)\n self.assertAllEqual(expected_output, output_dataset)\n\n def test_string_splitting(self):\n input_array = np.array([[\"earth wind and fire\"],\n [\"\\tfire\\tand\\nearth michigan \"]])\n expected_output = [[b\"earth\", b\"wind\", b\"and\", b\"fire\"],\n [b\"fire\", b\"and\", b\"earth\", b\"michigan\"]]\n\n input_data = keras.Input(shape=(1,), dtype=dtypes.string)\n layer = get_layer_class()(\n max_tokens=None,\n standardize=None,\n split=text_vectorization.SPLIT_ON_WHITESPACE,\n ngrams=None,\n output_mode=None)\n int_data = layer(input_data)\n model = keras.Model(inputs=input_data, outputs=int_data)\n output_dataset = model.predict(input_array)\n self.assertAllEqual(expected_output, output_dataset)\n\n def test_custom_string_splitting(self):\n input_array = np.array([[\"earth>wind>and fire\"],\n [\"\\tfire>and\\nearth>michigan\"]])\n expected_output = [[b\"earth\", b\"wind\", b\"and fire\"],\n [b\"\\tfire\", b\"and\\nearth\", b\"michigan\"]]\n\n custom_split = lambda x: ragged_string_ops.string_split_v2(x, sep=\">\")\n input_data = keras.Input(shape=(1,), dtype=dtypes.string)\n layer = get_layer_class()(\n max_tokens=None,\n standardize=None,\n split=custom_split,\n ngrams=None,\n output_mode=None)\n int_data = layer(input_data)\n model = keras.Model(inputs=input_data, outputs=int_data)\n output_dataset = model.predict(input_array)\n self.assertAllEqual(expected_output, output_dataset)\n\n def test_single_ngram_value(self):\n input_array = np.array([[\"earth\", \"wind\", \"and\", \"fire\"],\n [\"fire\", \"and\", \"earth\", \"michigan\"]])\n # pyformat: disable\n expected_output = [[b\"earth\", b\"wind\", b\"and\", b\"fire\",\n b\"earth wind\", b\"wind and\", b\"and fire\",\n b\"earth wind and\", b\"wind and fire\"],\n [b\"fire\", b\"and\", b\"earth\", b\"michigan\",\n b\"fire and\", b\"and earth\", b\"earth michigan\",\n b\"fire and earth\", b\"and earth michigan\"]]\n # pyformat: enable\n\n input_data = keras.Input(shape=(4,), dtype=dtypes.string)\n layer = get_layer_class()(\n max_tokens=None,\n standardize=None,\n split=None,\n ngrams=3,\n output_mode=None)\n int_data = layer(input_data)\n model = keras.Model(inputs=input_data, outputs=int_data)\n output_dataset = model.predict(input_array)\n self.assertAllEqual(expected_output, output_dataset)\n\n def test_multiple_ngram_values(self):\n input_array = np.array([[\"earth\", \"wind\", \"and\", \"fire\"],\n [\"fire\", \"and\", \"earth\", \"michigan\"]])\n # pyformat: disable\n expected_output = [[b\"earth wind\", b\"wind and\", b\"and fire\",\n b\"earth wind and\", b\"wind and fire\"],\n [b\"fire and\", b\"and earth\", b\"earth michigan\",\n b\"fire and earth\", b\"and earth michigan\"]]\n # pyformat: enable\n\n input_data = keras.Input(shape=(4,), dtype=dtypes.string)\n layer = get_layer_class()(\n max_tokens=None,\n standardize=None,\n split=None,\n ngrams=(2, 3),\n output_mode=None)\n int_data = layer(input_data)\n model = keras.Model(inputs=input_data, outputs=int_data)\n output_dataset = model.predict(input_array)\n self.assertAllEqual(expected_output, output_dataset)\n\n def test_string_splitting_with_non_1d_array_fails(self):\n input_data = keras.Input(shape=(None,), dtype=dtypes.string)\n layer = get_layer_class()(\n max_tokens=None,\n standardize=None,\n split=text_vectorization.SPLIT_ON_WHITESPACE,\n output_mode=None)\n with self.assertRaisesRegex(RuntimeError,\n \".*tokenize strings, the first dimension.*\"):\n _ = layer(input_data)\n\n\n@keras_parameterized.run_all_keras_modes\nclass TextVectorizationOutputTest(\n keras_parameterized.TestCase,\n preprocessing_test_utils.PreprocessingLayerTest):\n\n def test_int_output(self):\n vocab_data = [\"earth\", \"wind\", \"and\", \"fire\"]\n input_array = np.array([[\"earth\", \"wind\", \"and\", \"fire\"],\n [\"fire\", \"and\", \"earth\", \"michigan\"]])\n expected_output = [[2, 3, 4, 5], [5, 4, 2, 1]]\n\n input_data = keras.Input(shape=(None,), dtype=dtypes.string)\n layer = get_layer_class()(\n max_tokens=None,\n standardize=None,\n split=None,\n output_mode=text_vectorization.INT)\n layer.set_vocabulary(vocab_data)\n int_data = layer(input_data)\n model = keras.Model(inputs=input_data, outputs=int_data)\n output_dataset = model.predict(input_array)\n self.assertAllEqual(expected_output, output_dataset)\n\n def test_vocab_appending(self):\n vocab_data = [[\"earth\", \"wind\"], [\"and\", \"fire\"]]\n input_array = np.array([[\"earth\", \"wind\", \"and\", \"fire\"],\n [\"fire\", \"and\", \"earth\", \"michigan\"]])\n expected_output = [[2, 3, 4, 5], [5, 4, 2, 1]]\n\n input_data = keras.Input(shape=(None,), dtype=dtypes.string)\n layer = get_layer_class()(\n max_tokens=5,\n standardize=None,\n split=None,\n output_mode=text_vectorization.INT)\n layer.set_vocabulary(vocab_data[0])\n layer.set_vocabulary(vocab_data[1], append=True)\n int_data = layer(input_data)\n model = keras.Model(inputs=input_data, outputs=int_data)\n output_dataset = model.predict(input_array)\n self.assertAllClose(expected_output, output_dataset)\n\n def test_int_output_densifies_with_zeros(self):\n vocab_data = [\"earth\", \"wind\", \"and\", \"fire\"]\n # Create an input array that has 5 elements in the first example and 4 in\n # the second. This should output a 2x5 tensor with a padding value in the\n # second example.\n input_array = np.array([[\"earth wind and also fire\"],\n [\"fire and earth michigan\"]])\n expected_output = [[2, 3, 4, 1, 5], [5, 4, 2, 1, 0]]\n\n # The input shape here is explicitly 1 because we're tokenizing.\n input_data = keras.Input(shape=(1,), dtype=dtypes.string)\n layer = get_layer_class()(\n max_tokens=None,\n standardize=None,\n split=text_vectorization.SPLIT_ON_WHITESPACE,\n output_mode=text_vectorization.INT)\n layer.set_vocabulary(vocab_data)\n int_data = layer(input_data)\n model = keras.Model(inputs=input_data, outputs=int_data)\n output_dataset = model.predict(input_array)\n self.assertAllEqual(expected_output, output_dataset)\n\n def test_int_output_densifies_with_zeros_and_pads(self):\n vocab_data = [\"earth\", \"wind\", \"and\", \"fire\"]\n # Create an input array that has 5 elements in the first example and 4 in\n # the second. This should output a 2x6 tensor with a padding value in the\n # second example, since output_sequence_length is set to 6.\n input_array = np.array([[\"earth wind and also fire\"],\n [\"fire and earth michigan\"]])\n expected_output = [[2, 3, 4, 1, 5, 0], [5, 4, 2, 1, 0, 0]]\n\n # The input shape here is explicitly 1 because we're tokenizing.\n input_data = keras.Input(shape=(1,), dtype=dtypes.string)\n layer = get_layer_class()(\n max_tokens=None,\n standardize=None,\n split=text_vectorization.SPLIT_ON_WHITESPACE,\n output_mode=text_vectorization.INT,\n output_sequence_length=6)\n layer.set_vocabulary(vocab_data)\n int_data = layer(input_data)\n model = keras.Model(inputs=input_data, outputs=int_data)\n output_dataset = model.predict(input_array)\n self.assertAllEqual(expected_output, output_dataset)\n\n def test_int_output_densifies_with_zeros_and_strips(self):\n vocab_data = [\"earth\", \"wind\", \"and\", \"fire\"]\n # Create an input array that has 5 elements in the first example and 4 in\n # the second. This should output a 2x3 tensor with a padding value in the\n # second example, since output_sequence_length is set to 3.\n input_array = np.array([[\"earth wind and also fire\"],\n [\"fire and earth michigan\"]])\n expected_output = [[2, 3, 4], [5, 4, 2]]\n\n # The input shape here is explicitly 1 because we're tokenizing.\n input_data = keras.Input(shape=(1,), dtype=dtypes.string)\n layer = get_layer_class()(\n max_tokens=None,\n standardize=None,\n split=text_vectorization.SPLIT_ON_WHITESPACE,\n output_mode=text_vectorization.INT,\n output_sequence_length=3)\n layer.set_vocabulary(vocab_data)\n int_data = layer(input_data)\n model = keras.Model(inputs=input_data, outputs=int_data)\n output_dataset = model.predict(input_array)\n self.assertAllEqual(expected_output, output_dataset)\n\n def test_int_output_dynamically_strips_and_pads(self):\n vocab_data = [\"earth\", \"wind\", \"and\", \"fire\"]\n # Create an input array that has 5 elements in the first example and 4 in\n # the second. This should output a 2x3 tensor with a padding value in the\n # second example, since output_sequence_length is set to 3.\n input_array = np.array([[\"earth wind and also fire\"],\n [\"fire and earth michigan\"]])\n expected_output = [[2, 3, 4], [5, 4, 2]]\n\n # The input shape here is explicitly 1 because we're tokenizing.\n input_data = keras.Input(shape=(1,), dtype=dtypes.string)\n layer = get_layer_class()(\n max_tokens=None,\n standardize=None,\n split=text_vectorization.SPLIT_ON_WHITESPACE,\n output_mode=text_vectorization.INT,\n output_sequence_length=3)\n layer.set_vocabulary(vocab_data)\n int_data = layer(input_data)\n model = keras.Model(inputs=input_data, outputs=int_data)\n output_dataset = model.predict(input_array)\n self.assertAllEqual(expected_output, output_dataset)\n\n # Create an input array that has 1 element in the first example and 2 in\n # the second. This should output a 2x3 tensor with a padding value in the\n # second example, since output_sequence_length is set to 3.\n input_array_2 = np.array([[\"wind\"], [\"fire and\"]])\n expected_output_2 = [[3, 0, 0], [5, 4, 0]]\n output_dataset = model.predict(input_array_2)\n self.assertAllEqual(expected_output_2, output_dataset)\n\n def test_binary_output_hard_maximum(self):\n vocab_data = [\"earth\", \"wind\", \"and\", \"fire\"]\n input_array = np.array([[\"earth\", \"wind\", \"and\", \"earth\"],\n [\"ohio\", \"and\", \"earth\", \"michigan\"]])\n\n # pyformat: disable\n expected_output = [[0, 1, 1, 1, 0, 0],\n [1, 1, 0, 1, 0, 0]]\n # pyformat: enable\n\n input_data = keras.Input(shape=(None,), dtype=dtypes.string)\n layer = get_layer_class()(\n max_tokens=6,\n standardize=None,\n split=None,\n output_mode=text_vectorization.BINARY,\n pad_to_max_tokens=True)\n layer.set_vocabulary(vocab_data)\n int_data = layer(input_data)\n model = keras.Model(inputs=input_data, outputs=int_data)\n output_dataset = model.predict(input_array)\n self.assertAllEqual(expected_output, output_dataset)\n\n def test_binary_output_soft_maximum(self):\n vocab_data = [\"earth\", \"wind\", \"and\", \"fire\"]\n input_array = np.array([[\"earth\", \"wind\", \"and\", \"earth\"],\n [\"ohio\", \"and\", \"earth\", \"michigan\"]])\n\n # pyformat: disable\n expected_output = [[0, 1, 1, 1, 0],\n [1, 1, 0, 1, 0]]\n # pyformat: enable\n\n input_data = keras.Input(shape=(None,), dtype=dtypes.string)\n layer = get_layer_class()(\n max_tokens=10,\n standardize=None,\n split=None,\n output_mode=text_vectorization.BINARY,\n pad_to_max_tokens=False)\n layer.set_vocabulary(vocab_data)\n int_data = layer(input_data)\n model = keras.Model(inputs=input_data, outputs=int_data)\n output_dataset = model.predict(input_array)\n self.assertAllEqual(expected_output, output_dataset)\n\n def test_count_output_hard_maximum(self):\n vocab_data = [\"earth\", \"wind\", \"and\", \"fire\"]\n input_array = np.array([[\"earth\", \"wind\", \"and\", \"earth\"],\n [\"ohio\", \"and\", \"earth\", \"michigan\"]])\n\n # pyformat: disable\n expected_output = [[0, 2, 1, 1, 0, 0],\n [2, 1, 0, 1, 0, 0]]\n # pyformat: enable\n\n input_data = keras.Input(shape=(None,), dtype=dtypes.string)\n layer = get_layer_class()(\n max_tokens=6,\n standardize=None,\n split=None,\n output_mode=text_vectorization.COUNT)\n layer.set_vocabulary(vocab_data)\n int_data = layer(input_data)\n model = keras.Model(inputs=input_data, outputs=int_data)\n output_dataset = model.predict(input_array)\n self.assertAllEqual(expected_output, output_dataset)\n\n def test_count_output_soft_maximum(self):\n vocab_data = [\"earth\", \"wind\", \"and\", \"fire\"]\n input_array = np.array([[\"earth\", \"wind\", \"and\", \"earth\"],\n [\"ohio\", \"and\", \"earth\", \"michigan\"]])\n\n # pyformat: disable\n expected_output = [[0, 2, 1, 1, 0],\n [2, 1, 0, 1, 0]]\n # pyformat: enable\n\n input_data = keras.Input(shape=(None,), dtype=dtypes.string)\n layer = get_layer_class()(\n max_tokens=10,\n standardize=None,\n split=None,\n output_mode=text_vectorization.COUNT,\n pad_to_max_tokens=False)\n layer.set_vocabulary(vocab_data)\n int_data = layer(input_data)\n model = keras.Model(inputs=input_data, outputs=int_data)\n output_dataset = model.predict(input_array)\n self.assertAllEqual(expected_output, output_dataset)\n\n def test_tfidf_output_hard_maximum(self):\n vocab_data = [\"earth\", \"wind\", \"and\", \"fire\"]\n tfidf_data = [.5, .25, .2, .125]\n input_array = np.array([[\"earth\", \"wind\", \"and\", \"earth\"],\n [\"ohio\", \"fire\", \"earth\", \"michigan\"]])\n\n # pyformat: disable\n # pylint: disable=bad-whitespace\n expected_output = [[ 0, 1, .25, .2, 0, 0],\n [.1, .5, 0, 0, .125, 0]]\n # pylint: enable=bad-whitespace\n # pyformat: enable\n\n input_data = keras.Input(shape=(None,), dtype=dtypes.string)\n layer = get_layer_class()(\n max_tokens=6,\n standardize=None,\n split=None,\n output_mode=text_vectorization.TFIDF,\n pad_to_max_tokens=True)\n layer.set_vocabulary(vocab_data, df_data=tfidf_data, oov_df_value=.05)\n int_data = layer(input_data)\n model = keras.Model(inputs=input_data, outputs=int_data)\n output_dataset = model.predict(input_array)\n self.assertAllClose(expected_output, output_dataset)\n\n def test_tfidf_output_soft_maximum(self):\n vocab_data = [\"earth\", \"wind\", \"and\", \"fire\"]\n tfidf_data = [.5, .25, .2, .125]\n input_array = np.array([[\"earth\", \"wind\", \"and\", \"earth\"],\n [\"ohio\", \"fire\", \"earth\", \"michigan\"]])\n\n # pyformat: disable\n # pylint: disable=bad-whitespace\n expected_output = [[ 0, 1, .25, .2, 0],\n [.1, .5, 0, 0, .125]]\n # pylint: enable=bad-whitespace\n # pyformat: enable\n\n input_data = keras.Input(shape=(None,), dtype=dtypes.string)\n layer = get_layer_class()(\n max_tokens=10,\n standardize=None,\n split=None,\n output_mode=text_vectorization.TFIDF,\n pad_to_max_tokens=False)\n layer.set_vocabulary(vocab_data, df_data=tfidf_data, oov_df_value=.05)\n int_data = layer(input_data)\n model = keras.Model(inputs=input_data, outputs=int_data)\n output_dataset = model.predict(input_array)\n self.assertAllClose(expected_output, output_dataset)\n\n def test_tfidf_appending(self):\n vocab_data = [[\"earth\", \"wind\"], [\"and\", \"fire\"]]\n tfidf_data = [[.5, .25], [.2, .125]]\n input_array = np.array([[\"earth\", \"wind\", \"and\", \"earth\"],\n [\"ohio\", \"fire\", \"earth\", \"michigan\"]])\n\n # pyformat: disable\n # pylint: disable=bad-whitespace\n expected_output = [[ 0, 1, .25, .2, 0],\n [.1, .5, 0, 0, .125]]\n # pylint: enable=bad-whitespace\n # pyformat: enable\n\n input_data = keras.Input(shape=(None,), dtype=dtypes.string)\n layer = get_layer_class()(\n max_tokens=5,\n standardize=None,\n split=None,\n output_mode=text_vectorization.TFIDF)\n layer.set_vocabulary(vocab_data[0], df_data=tfidf_data[0], oov_df_value=.05)\n layer.set_vocabulary(vocab_data[1], df_data=tfidf_data[1], append=True)\n int_data = layer(input_data)\n model = keras.Model(inputs=input_data, outputs=int_data)\n output_dataset = model.predict(input_array)\n self.assertAllClose(expected_output, output_dataset)\n\n def test_tfidf_appending_with_oov_replacement(self):\n vocab_data = [[\"earth\", \"wind\"], [\"and\", \"fire\"]]\n tfidf_data = [[.5, .25], [.2, .125]]\n input_array = np.array([[\"earth\", \"wind\", \"and\", \"earth\"],\n [\"ohio\", \"fire\", \"earth\", \"michigan\"]])\n\n # pyformat: disable\n # pylint: disable=bad-whitespace\n expected_output = [[ 0, 1, .25, .2, 0],\n [1.5, .5, 0, 0, .125]]\n # pylint: enable=bad-whitespace\n # pyformat: enable\n\n input_data = keras.Input(shape=(None,), dtype=dtypes.string)\n layer = get_layer_class()(\n max_tokens=5,\n standardize=None,\n split=None,\n output_mode=text_vectorization.TFIDF)\n layer.set_vocabulary(vocab_data[0], df_data=tfidf_data[0], oov_df_value=.05)\n # Note that here we've replaced the OOV vaue.\n layer.set_vocabulary(\n vocab_data[1], df_data=tfidf_data[1], oov_df_value=.75, append=True)\n int_data = layer(input_data)\n model = keras.Model(inputs=input_data, outputs=int_data)\n output_dataset = model.predict(input_array)\n self.assertAllClose(expected_output, output_dataset)\n\n\n@keras_parameterized.run_all_keras_modes(always_skip_eager=True)\nclass TextVectorizationSaveableTest(\n keras_parameterized.TestCase,\n preprocessing_test_utils.PreprocessingLayerTest):\n\n def test_ops_are_not_added_with_multiple_saves(self):\n vocab_data = [\"earth\", \"wind\", \"and\", \"fire\"]\n\n input_data = keras.Input(shape=(None,), dtype=dtypes.string)\n layer = get_layer_class()(\n max_tokens=10,\n standardize=None,\n split=None,\n output_mode=text_vectorization.COUNT,\n pad_to_max_tokens=False)\n layer.set_vocabulary(vocab_data)\n int_data = layer(input_data)\n model = keras.Model(inputs=input_data, outputs=int_data)\n weights = model.get_weights()\n model.set_weights(weights)\n keras.backend.get_session().graph.finalize()\n weights = model.get_weights()\n model.set_weights(weights)\n\n\n@keras_parameterized.run_all_keras_modes\nclass TextVectorizationErrorTest(keras_parameterized.TestCase,\n preprocessing_test_utils.PreprocessingLayerTest\n ):\n\n def test_too_long_vocab_fails_in_single_setting(self):\n vocab_data = [\"earth\", \"wind\", \"and\", \"fire\"]\n\n layer = get_layer_class()(\n max_tokens=4,\n standardize=None,\n split=None,\n output_mode=text_vectorization.INT)\n with self.assertRaisesRegex(ValueError,\n \"vocabulary larger than the maximum vocab.*\"):\n layer.set_vocabulary(vocab_data)\n\n def test_too_long_vocab_fails_in_multiple_settings(self):\n vocab_data = [[\"earth\", \"wind\"], [\"and\", \"fire\"]]\n\n layer = get_layer_class()(\n max_tokens=4,\n standardize=None,\n split=None,\n output_mode=text_vectorization.INT)\n\n # The first time we call set_vocabulary, we're under the max_tokens limit\n # so it should be fine.\n layer.set_vocabulary(vocab_data[0])\n with self.assertRaisesRegex(ValueError,\n \"vocabulary larger than the maximum vocab.*\"):\n layer.set_vocabulary(vocab_data[1], append=True)\n\n def test_setting_vocab_without_tfidf_data_fails_in_tfidf_mode(self):\n vocab_data = [\"earth\", \"wind\", \"and\", \"fire\"]\n\n layer = get_layer_class()(\n max_tokens=5,\n standardize=None,\n split=None,\n output_mode=text_vectorization.TFIDF)\n with self.assertRaisesRegex(ValueError,\n \"df_data must be set if output_mode is TFIDF\"):\n layer.set_vocabulary(vocab_data)\n\n def test_tfidf_data_length_mismatch_fails(self):\n vocab_data = [\"earth\", \"wind\", \"and\", \"fire\"]\n df_data = [1, 2, 3]\n layer = get_layer_class()(\n max_tokens=5,\n standardize=None,\n split=None,\n output_mode=text_vectorization.TFIDF)\n with self.assertRaisesRegex(ValueError,\n \"df_data must be the same length as vocab.*\"):\n layer.set_vocabulary(vocab_data, df_data)\n\n def test_tfidf_set_vocab_with_no_oov_fails(self):\n vocab_data = [\"earth\", \"wind\", \"and\", \"fire\"]\n df_data = [1, 2, 3, 4]\n layer = get_layer_class()(\n max_tokens=5,\n standardize=None,\n split=None,\n output_mode=text_vectorization.TFIDF)\n with self.assertRaisesRegex(ValueError,\n \"You must pass an oov_df_value.*\"):\n layer.set_vocabulary(vocab_data, df_data)\n\n def test_tfidf_set_vocab_with_no_oov_fails_with_append_set(self):\n vocab_data = [\"earth\", \"wind\", \"and\", \"fire\"]\n df_data = [1, 2, 3, 4]\n layer = get_layer_class()(\n max_tokens=5,\n standardize=None,\n split=None,\n output_mode=text_vectorization.TFIDF)\n with self.assertRaisesRegex(ValueError,\n \"You must pass an oov_df_value.*\"):\n layer.set_vocabulary(vocab_data, df_data, append=True)\n\n def test_set_tfidf_in_non_tfidf_fails(self):\n vocab_data = [\"earth\", \"wind\", \"and\", \"fire\"]\n df_data = [1, 2, 3, 4]\n layer = get_layer_class()(\n max_tokens=5,\n standardize=None,\n split=None,\n output_mode=text_vectorization.BINARY)\n with self.assertRaisesRegex(ValueError,\n \"df_data should only be set if.*\"):\n layer.set_vocabulary(vocab_data, df_data)\n\n\n# Custom functions for the custom callable serialization test. Declared here\n# to avoid multiple registrations from run_all_keras_modes().\n@generic_utils.register_keras_serializable(package=\"Test\")\ndef custom_standardize_fn(x):\n return gen_string_ops.string_lower(x)\n\n\n@generic_utils.register_keras_serializable(package=\"Test\")\ndef custom_split_fn(x):\n return ragged_string_ops.string_split_v2(x, sep=\">\")\n\n\n@keras_parameterized.run_all_keras_modes\nclass TextVectorizationSavingTest(\n keras_parameterized.TestCase,\n preprocessing_test_utils.PreprocessingLayerTest):\n\n def test_serialization_with_custom_callables(self):\n input_array = np.array([[\"earth>wind>and Fire\"],\n [\"\\tfire>And\\nearth>michigan\"]])\n expected_output = [[b\"earth\", b\"wind\", b\"and fire\"],\n [b\"\\tfire\", b\"and\\nearth\", b\"michigan\"]]\n\n input_data = keras.Input(shape=(1,), dtype=dtypes.string)\n layer = get_layer_class()(\n max_tokens=None,\n standardize=custom_standardize_fn,\n split=custom_split_fn,\n ngrams=None,\n output_mode=None)\n int_data = layer(input_data)\n model = keras.Model(inputs=input_data, outputs=int_data)\n output_dataset = model.predict(input_array)\n self.assertAllEqual(expected_output, output_dataset)\n\n serialized_model_data = model.get_config()\n with CustomObjectScope({\"TextVectorization\": get_layer_class()}):\n new_model = keras.Model.from_config(serialized_model_data)\n new_output_dataset = new_model.predict(input_array)\n self.assertAllEqual(expected_output, new_output_dataset)\n\n def test_vocabulary_persistence_across_saving(self):\n vocab_data = [\"earth\", \"wind\", \"and\", \"fire\"]\n input_array = np.array([[\"earth\", \"wind\", \"and\", \"fire\"],\n [\"fire\", \"and\", \"earth\", \"michigan\"]])\n expected_output = [[2, 3, 4, 5], [5, 4, 2, 1]]\n\n # Build and validate a golden model.\n input_data = keras.Input(shape=(None,), dtype=dtypes.string)\n layer = get_layer_class()(\n max_tokens=None,\n standardize=None,\n split=None,\n output_mode=text_vectorization.INT)\n layer.set_vocabulary(vocab_data)\n int_data = layer(input_data)\n model = keras.Model(inputs=input_data, outputs=int_data)\n output_dataset = model.predict(input_array)\n self.assertAllEqual(output_dataset, expected_output)\n\n # Save the model to disk.\n output_path = os.path.join(self.get_temp_dir(), \"tf_keras_saved_model\")\n saving.export_saved_model(model, output_path)\n loaded_model = saving.load_from_saved_model(\n output_path, custom_objects={\"TextVectorization\": get_layer_class()})\n\n # Ensure that the loaded model is unique (so that the save/load is real)\n self.assertIsNot(model, loaded_model)\n\n # Validate correctness of the new model.\n new_output_dataset = loaded_model.predict(input_array)\n self.assertAllEqual(new_output_dataset, expected_output)\n\n def test_vocabulary_persistence_across_saving_with_tfidf(self):\n vocab_data = [\"earth\", \"wind\", \"and\", \"fire\"]\n tfidf_data = [.5, .25, .2, .125]\n input_array = np.array([[\"earth\", \"wind\", \"and\", \"earth\"],\n [\"ohio\", \"fire\", \"earth\", \"michigan\"]])\n\n # pyformat: disable\n # pylint: disable=bad-whitespace\n expected_output = [[ 0, 1, .25, .2, 0],\n [.1, .5, 0, 0, .125]]\n # pylint: enable=bad-whitespace\n # pyformat: enable\n\n # Build and validate a golden model.\n input_data = keras.Input(shape=(None,), dtype=dtypes.string)\n layer = get_layer_class()(\n max_tokens=5,\n standardize=None,\n split=None,\n output_mode=text_vectorization.TFIDF)\n layer.set_vocabulary(vocab_data, df_data=tfidf_data, oov_df_value=.05)\n\n int_data = layer(input_data)\n model = keras.Model(inputs=input_data, outputs=int_data)\n output_dataset = model.predict(input_array)\n self.assertAllClose(output_dataset, expected_output)\n\n # Save the model to disk.\n output_path = os.path.join(self.get_temp_dir(), \"tf_keras_saved_model\")\n saving.export_saved_model(model, output_path)\n loaded_model = saving.load_from_saved_model(\n output_path, custom_objects={\"TextVectorization\": get_layer_class()})\n\n # Ensure that the loaded model is unique (so that the save/load is real)\n self.assertIsNot(model, loaded_model)\n\n # Validate correctness of the new model.\n new_output_dataset = loaded_model.predict(input_array)\n self.assertAllClose(new_output_dataset, expected_output)\n\n\n@keras_parameterized.run_all_keras_modes\nclass TextVectorizationCombinerTest(\n keras_parameterized.TestCase,\n preprocessing_test_utils.PreprocessingLayerTest):\n\n def test_combiner_api_compatibility_int_mode(self):\n data = np.array([[\"earth\", \"wind\", \"and\", \"fire\"],\n [\"earth\", \"wind\", \"and\", \"michigan\"]])\n combiner = text_vectorization._TextVectorizationCombiner(compute_idf=False)\n expected = {\n \"vocab\": np.array([\"and\", \"earth\", \"wind\", \"fire\", \"michigan\"]),\n }\n self.validate_accumulator_serialize_and_deserialize(combiner, data,\n expected)\n self.validate_accumulator_uniqueness(combiner, data)\n\n def test_combiner_api_compatibility_tfidf_mode(self):\n data = np.array([[\"earth\", \"wind\", \"and\", \"fire\"],\n [\"earth\", \"wind\", \"and\", \"michigan\"]])\n combiner = text_vectorization._TextVectorizationCombiner(compute_idf=True)\n expected_extract_output = {\n \"vocab\": np.array([\"and\", \"earth\", \"wind\", \"fire\", \"michigan\"]),\n \"idf\": np.array([0.510826, 0.510826, 0.510826, 0.693147, 0.693147]),\n \"oov_idf\": np.array([1.098612])\n }\n expected_accumulator_output = {\n \"vocab\": np.array([\"and\", \"earth\", \"wind\", \"fire\", \"michigan\"]),\n \"counts\": np.array([2, 2, 2, 1, 1]),\n \"document_counts\": np.array([2, 2, 2, 1, 1]),\n \"num_documents\": np.array(1),\n }\n self.validate_accumulator_serialize_and_deserialize(\n combiner, data, expected_accumulator_output)\n self.validate_accumulator_uniqueness(combiner, data)\n self.validate_accumulator_extract(combiner, data, expected_extract_output)\n\n # TODO(askerryryan): Add tests confirming equivalence to behavior of\n # existing tf.keras.preprocessing.text.Tokenizer.\n @parameterized.named_parameters(\n {\n \"testcase_name\":\n \"top_k_smaller_than_full_vocab\",\n \"data\":\n np.array([[\"earth\", \"wind\"], [\"fire\", \"wind\"], [\"and\"],\n [\"fire\", \"wind\"]]),\n \"vocab_size\":\n 3,\n \"expected_accumulator_output\": {\n \"vocab\": np.array([\"wind\", \"fire\", \"and\", \"earth\"]),\n \"counts\": np.array([3, 2, 1, 1]),\n \"document_counts\": np.array([3, 2, 1, 1]),\n \"num_documents\": np.array(4),\n },\n \"expected_extract_output\": {\n \"vocab\": np.array([\"wind\", \"fire\", \"and\"]),\n \"idf\": np.array([0.693147, 0.847298, 1.098612]),\n \"oov_idf\": np.array([1.609438]),\n },\n },\n {\n \"testcase_name\":\n \"top_k_larger_than_full_vocab\",\n \"data\":\n np.array([[\"earth\", \"wind\"], [\"fire\", \"wind\"], [\"and\"],\n [\"fire\", \"wind\"]]),\n \"vocab_size\":\n 10,\n \"expected_accumulator_output\": {\n \"vocab\": np.array([\"wind\", \"fire\", \"and\", \"earth\"]),\n \"counts\": np.array([3, 2, 1, 1]),\n \"document_counts\": np.array([3, 2, 1, 1]),\n \"num_documents\": np.array(4),\n },\n \"expected_extract_output\": {\n \"vocab\": np.array([\"wind\", \"fire\", \"and\", \"earth\"]),\n \"idf\": np.array([0.693147, 0.847298, 1.098612, 1.098612]),\n \"oov_idf\": np.array([1.609438]),\n },\n },\n {\n \"testcase_name\":\n \"no_top_k\",\n \"data\":\n np.array([[\"earth\", \"wind\"], [\"fire\", \"wind\"], [\"and\"],\n [\"fire\", \"wind\"]]),\n \"vocab_size\":\n None,\n \"expected_accumulator_output\": {\n \"vocab\": np.array([\"wind\", \"fire\", \"and\", \"earth\"]),\n \"counts\": np.array([3, 2, 1, 1]),\n \"document_counts\": np.array([3, 2, 1, 1]),\n \"num_documents\": np.array(4),\n },\n \"expected_extract_output\": {\n \"vocab\": np.array([\"wind\", \"fire\", \"and\", \"earth\"]),\n \"idf\": np.array([0.693147, 0.847298, 1.098612, 1.098612]),\n \"oov_idf\": np.array([1.609438]),\n },\n },\n {\n \"testcase_name\": \"single_element_per_row\",\n \"data\": np.array([[\"earth\"], [\"wind\"], [\"fire\"], [\"wind\"], [\"and\"]]),\n \"vocab_size\": 3,\n \"expected_accumulator_output\": {\n \"vocab\": np.array([\"wind\", \"and\", \"earth\", \"fire\"]),\n \"counts\": np.array([2, 1, 1, 1]),\n \"document_counts\": np.array([2, 1, 1, 1]),\n \"num_documents\": np.array(5),\n },\n \"expected_extract_output\": {\n \"vocab\": np.array([\"wind\", \"and\", \"earth\"]),\n \"idf\": np.array([0.980829, 1.252763, 1.252763]),\n \"oov_idf\": np.array([1.791759]),\n },\n },\n # Which tokens are retained are based on global frequency, and thus are\n # sensitive to frequency within a document. In contrast, because idf only\n # considers the presence of a token in a document, it is insensitive\n # to the frequency of the token within the document.\n {\n \"testcase_name\":\n \"retained_tokens_sensitive_to_within_document_frequency\",\n \"data\":\n np.array([[\"earth\", \"earth\"], [\"wind\", \"wind\"], [\"fire\", \"fire\"],\n [\"wind\", \"wind\"], [\"and\", \"michigan\"]]),\n \"vocab_size\":\n 3,\n \"expected_accumulator_output\": {\n \"vocab\": np.array([\"wind\", \"earth\", \"fire\", \"and\", \"michigan\"]),\n \"counts\": np.array([4, 2, 2, 1, 1]),\n \"document_counts\": np.array([2, 1, 1, 1, 1]),\n \"num_documents\": np.array(5),\n },\n \"expected_extract_output\": {\n \"vocab\": np.array([\"wind\", \"earth\", \"fire\"]),\n \"idf\": np.array([0.980829, 1.252763, 1.252763]),\n \"oov_idf\": np.array([1.791759]),\n },\n })\n def test_combiner_computation(self,\n data,\n vocab_size,\n expected_accumulator_output,\n expected_extract_output,\n compute_idf=True):\n combiner = text_vectorization._TextVectorizationCombiner(\n vocab_size=vocab_size, compute_idf=compute_idf)\n expected_accumulator = combiner._create_accumulator(\n **expected_accumulator_output)\n self.validate_accumulator_computation(combiner, data, expected_accumulator)\n self.validate_accumulator_extract(combiner, data, expected_extract_output)\n\n\n\nif __name__ == \"__main__\":\n test.main()\n",
"# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for pooling layers.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\n\nfrom tensorflow.python import keras\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.framework import test_util as tf_test_util\nfrom tensorflow.python.keras import testing_utils\nfrom tensorflow.python.platform import test\n\n\nclass GlobalPoolingTest(test.TestCase):\n\n @tf_test_util.run_in_graph_and_eager_modes\n def test_globalpooling_1d(self):\n testing_utils.layer_test(keras.layers.pooling.GlobalMaxPooling1D,\n input_shape=(3, 4, 5))\n testing_utils.layer_test(keras.layers.pooling.GlobalMaxPooling1D,\n kwargs={'data_format': 'channels_first'},\n input_shape=(3, 4, 5))\n testing_utils.layer_test(\n keras.layers.pooling.GlobalAveragePooling1D, input_shape=(3, 4, 5))\n testing_utils.layer_test(keras.layers.pooling.GlobalAveragePooling1D,\n kwargs={'data_format': 'channels_first'},\n input_shape=(3, 4, 5))\n\n @tf_test_util.run_in_graph_and_eager_modes\n def test_globalpooling_1d_masking_support(self):\n model = keras.Sequential()\n model.add(keras.layers.Masking(mask_value=0., input_shape=(None, 4)))\n model.add(keras.layers.GlobalAveragePooling1D())\n model.compile(loss='mae', optimizer='rmsprop')\n\n model_input = np.random.random((2, 3, 4))\n model_input[0, 1:, :] = 0\n output = model.predict(model_input)\n self.assertAllClose(output[0], model_input[0, 0, :])\n\n @tf_test_util.run_in_graph_and_eager_modes\n def test_globalpooling_2d(self):\n testing_utils.layer_test(\n keras.layers.pooling.GlobalMaxPooling2D,\n kwargs={'data_format': 'channels_first'},\n input_shape=(3, 4, 5, 6))\n testing_utils.layer_test(\n keras.layers.pooling.GlobalMaxPooling2D,\n kwargs={'data_format': 'channels_last'},\n input_shape=(3, 5, 6, 4))\n testing_utils.layer_test(\n keras.layers.pooling.GlobalAveragePooling2D,\n kwargs={'data_format': 'channels_first'},\n input_shape=(3, 4, 5, 6))\n testing_utils.layer_test(\n keras.layers.pooling.GlobalAveragePooling2D,\n kwargs={'data_format': 'channels_last'},\n input_shape=(3, 5, 6, 4))\n\n @tf_test_util.run_in_graph_and_eager_modes\n def test_globalpooling_3d(self):\n testing_utils.layer_test(\n keras.layers.pooling.GlobalMaxPooling3D,\n kwargs={'data_format': 'channels_first'},\n input_shape=(3, 4, 3, 4, 3))\n testing_utils.layer_test(\n keras.layers.pooling.GlobalMaxPooling3D,\n kwargs={'data_format': 'channels_last'},\n input_shape=(3, 4, 3, 4, 3))\n testing_utils.layer_test(\n keras.layers.pooling.GlobalAveragePooling3D,\n kwargs={'data_format': 'channels_first'},\n input_shape=(3, 4, 3, 4, 3))\n testing_utils.layer_test(\n keras.layers.pooling.GlobalAveragePooling3D,\n kwargs={'data_format': 'channels_last'},\n input_shape=(3, 4, 3, 4, 3))\n\n\nclass Pooling2DTest(test.TestCase):\n\n @tf_test_util.run_in_graph_and_eager_modes\n def test_maxpooling_2d(self):\n pool_size = (3, 3)\n for strides in [(1, 1), (2, 2)]:\n testing_utils.layer_test(\n keras.layers.MaxPooling2D,\n kwargs={\n 'strides': strides,\n 'padding': 'valid',\n 'pool_size': pool_size\n },\n input_shape=(3, 5, 6, 4))\n\n @tf_test_util.run_in_graph_and_eager_modes\n def test_averagepooling_2d(self):\n testing_utils.layer_test(\n keras.layers.AveragePooling2D,\n kwargs={'strides': (2, 2),\n 'padding': 'same',\n 'pool_size': (2, 2)},\n input_shape=(3, 5, 6, 4))\n testing_utils.layer_test(\n keras.layers.AveragePooling2D,\n kwargs={'strides': (2, 2),\n 'padding': 'valid',\n 'pool_size': (3, 3)},\n input_shape=(3, 5, 6, 4))\n\n # This part of the test can only run on GPU but doesn't appear\n # to be properly assigned to a GPU when running in eager mode.\n if not context.executing_eagerly():\n # Only runs on GPU with CUDA, channels_first is not supported on CPU.\n # TODO(b/62340061): Support channels_first on CPU.\n if test.is_gpu_available(cuda_only=True):\n testing_utils.layer_test(\n keras.layers.AveragePooling2D,\n kwargs={\n 'strides': (1, 1),\n 'padding': 'valid',\n 'pool_size': (2, 2),\n 'data_format': 'channels_first'\n },\n input_shape=(3, 4, 5, 6))\n\n\nclass Pooling3DTest(test.TestCase):\n\n @tf_test_util.run_in_graph_and_eager_modes\n def test_maxpooling_3d(self):\n if test.is_built_with_rocm():\n self.skipTest('Pooling with 3D tensors is not supported in ROCm')\n pool_size = (3, 3, 3)\n testing_utils.layer_test(\n keras.layers.MaxPooling3D,\n kwargs={'strides': 2,\n 'padding': 'valid',\n 'pool_size': pool_size},\n input_shape=(3, 11, 12, 10, 4))\n testing_utils.layer_test(\n keras.layers.MaxPooling3D,\n kwargs={\n 'strides': 3,\n 'padding': 'valid',\n 'data_format': 'channels_first',\n 'pool_size': pool_size\n },\n input_shape=(3, 4, 11, 12, 10))\n\n @tf_test_util.run_in_graph_and_eager_modes\n def test_averagepooling_3d(self):\n if test.is_built_with_rocm():\n self.skipTest('Pooling with 3D tensors is not supported in ROCm')\n pool_size = (3, 3, 3)\n testing_utils.layer_test(\n keras.layers.AveragePooling3D,\n kwargs={'strides': 2,\n 'padding': 'valid',\n 'pool_size': pool_size},\n input_shape=(3, 11, 12, 10, 4))\n testing_utils.layer_test(\n keras.layers.AveragePooling3D,\n kwargs={\n 'strides': 3,\n 'padding': 'valid',\n 'data_format': 'channels_first',\n 'pool_size': pool_size\n },\n input_shape=(3, 4, 11, 12, 10))\n\n\nclass Pooling1DTest(test.TestCase):\n\n @tf_test_util.run_in_graph_and_eager_modes\n def test_maxpooling_1d(self):\n for padding in ['valid', 'same']:\n for stride in [1, 2]:\n testing_utils.layer_test(\n keras.layers.MaxPooling1D,\n kwargs={'strides': stride,\n 'padding': padding},\n input_shape=(3, 5, 4))\n testing_utils.layer_test(\n keras.layers.MaxPooling1D,\n kwargs={'data_format': 'channels_first'},\n input_shape=(3, 2, 6))\n\n @tf_test_util.run_in_graph_and_eager_modes\n def test_averagepooling_1d(self):\n for padding in ['valid', 'same']:\n for stride in [1, 2]:\n testing_utils.layer_test(\n keras.layers.AveragePooling1D,\n kwargs={'strides': stride,\n 'padding': padding},\n input_shape=(3, 5, 4))\n\n testing_utils.layer_test(\n keras.layers.AveragePooling1D,\n kwargs={'data_format': 'channels_first'},\n input_shape=(3, 2, 6))\n\n\nif __name__ == '__main__':\n test.main()\n",
"# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Python wrapper for prefetching_ops.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom tensorflow.python.data.ops import dataset_ops\nfrom tensorflow.python.data.ops import iterator_ops\nfrom tensorflow.python.data.util import structure\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.eager import function\nfrom tensorflow.python.framework import composite_tensor\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import errors\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import tensor_spec\nfrom tensorflow.python.framework import type_spec\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import control_flow_ops\nfrom tensorflow.python.ops import functional_ops\nfrom tensorflow.python.ops import gen_dataset_ops\nfrom tensorflow.python.ops import resource_variable_ops\n\n\nclass _PerDeviceGenerator(dataset_ops.DatasetV2):\n \"\"\"A `dummy` generator dataset.\"\"\"\n\n def __init__(self, shard_num, multi_device_iterator_resource, incarnation_id,\n source_device, element_spec):\n self._element_spec = element_spec\n\n multi_device_iterator_string_handle = (\n gen_dataset_ops.multi_device_iterator_to_string_handle(\n multi_device_iterator_resource))\n\n # TODO(b/124254153): Enable autograph once the overhead is low enough.\n @function.defun(autograph=False) # Pure graph code.\n def _init_func():\n return multi_device_iterator_string_handle\n\n init_func_concrete = _init_func._get_concrete_function_internal() # pylint: disable=protected-access\n\n # TODO(b/124254153): Enable autograph once the overhead is low enough.\n @function.defun(autograph=False) # Pure graph code.\n def _remote_init_func():\n return functional_ops.remote_call(\n target=source_device,\n args=init_func_concrete.captured_inputs,\n Tout=[dtypes.string],\n f=init_func_concrete)\n\n self._init_func = _remote_init_func._get_concrete_function_internal() # pylint: disable=protected-access\n self._init_captured_args = self._init_func.captured_inputs\n\n # TODO(b/124254153): Enable autograph once the overhead is low enough.\n @function.defun(\n input_signature=[tensor_spec.TensorSpec([], dtypes.string)],\n autograph=False) # Pure graph code.\n def _next_func(string_handle):\n # pylint: disable=protected-access\n multi_device_iterator = (\n gen_dataset_ops.multi_device_iterator_from_string_handle(\n string_handle=string_handle,\n output_types=structure.get_flat_tensor_types(self._element_spec),\n output_shapes=structure.get_flat_tensor_shapes(\n self._element_spec)))\n return gen_dataset_ops.multi_device_iterator_get_next_from_shard(\n multi_device_iterator=multi_device_iterator,\n shard_num=shard_num,\n incarnation_id=incarnation_id,\n output_types=structure.get_flat_tensor_types(self._element_spec),\n output_shapes=structure.get_flat_tensor_shapes(self._element_spec))\n\n next_func_concrete = _next_func._get_concrete_function_internal() # pylint: disable=protected-access\n\n # TODO(b/124254153): Enable autograph once the overhead is low enough.\n @function.defun_with_attributes(\n input_signature=[tensor_spec.TensorSpec([], dtypes.string)],\n attributes={\"experimental_ints_on_device\": True},\n autograph=False) # Pure graph code.\n def _remote_next_func(string_handle):\n return functional_ops.remote_call(\n target=source_device,\n args=[string_handle] + next_func_concrete.captured_inputs,\n Tout=structure.get_flat_tensor_types(self._element_spec),\n f=next_func_concrete)\n\n self._next_func = _remote_next_func._get_concrete_function_internal() # pylint: disable=protected-access\n self._next_captured_args = self._next_func.captured_inputs\n\n self._incarnation_id_index = -1\n for i, arg in enumerate(self._next_captured_args):\n if arg is incarnation_id:\n self._incarnation_id_index = i\n\n # TODO(b/124254153): Enable autograph once the overhead is low enough.\n @function.defun(\n input_signature=[tensor_spec.TensorSpec([], dtypes.string)],\n autograph=False) # Pure graph code.\n def _finalize_func(unused_string_handle):\n return array_ops.constant(0, dtypes.int64)\n\n finalize_func_concrete = _finalize_func._get_concrete_function_internal() # pylint: disable=protected-access\n\n # TODO(b/124254153): Enable autograph once the overhead is low enough.\n @function.defun(\n input_signature=[tensor_spec.TensorSpec([], dtypes.string)],\n autograph=False) # Pure graph code.\n def _remote_finalize_func(string_handle):\n return functional_ops.remote_call(\n target=source_device,\n args=[string_handle] + finalize_func_concrete.captured_inputs,\n Tout=[dtypes.int64],\n f=finalize_func_concrete)\n\n self._finalize_func = (\n _remote_finalize_func._get_concrete_function_internal()) # pylint: disable=protected-access\n self._finalize_captured_args = self._finalize_func.captured_inputs\n\n variant_tensor = gen_dataset_ops.generator_dataset(\n self._init_captured_args,\n self._next_captured_args,\n self._finalize_captured_args,\n init_func=self._init_func,\n next_func=self._next_func,\n finalize_func=self._finalize_func,\n **self._flat_structure)\n super(_PerDeviceGenerator, self).__init__(variant_tensor)\n\n def _inputs(self):\n # TODO(b/116506223): Determine which datasets should be used as inputs here.\n return []\n\n @property\n def element_spec(self):\n return self._element_spec\n\n\nclass _ReincarnatedPerDeviceGenerator(dataset_ops.DatasetV2):\n \"\"\"Creates a _PerDeviceGenerator-like dataset with a new incarnation_id.\n\n Re-uses the functions from the provided per_device_dataset and just switches\n out the function argument corresponding to the incarnation_id.\n \"\"\"\n\n def __init__(self, per_device_dataset, incarnation_id):\n # pylint: disable=protected-access\n self._element_spec = per_device_dataset.element_spec\n self._init_func = per_device_dataset._init_func\n self._init_captured_args = self._init_func.captured_inputs\n\n self._next_func = per_device_dataset._next_func\n self._next_captured_args = per_device_dataset._next_captured_args\n # The captured arguments to the next_func are string_handle, incarnation_id.\n # We update the incarnation id to the new one.\n self._next_captured_args[\n per_device_dataset._incarnation_id_index] = incarnation_id\n\n self._finalize_func = per_device_dataset._finalize_func\n self._finalize_captured_args = per_device_dataset._finalize_captured_args\n\n variant_tensor = gen_dataset_ops.generator_dataset(\n self._init_captured_args,\n self._next_captured_args,\n self._finalize_captured_args,\n init_func=self._init_func,\n next_func=self._next_func,\n finalize_func=self._finalize_func,\n **self._flat_structure)\n super(_ReincarnatedPerDeviceGenerator, self).__init__(variant_tensor)\n\n def _inputs(self):\n # TODO(b/116506223): Determine which datasets should be used as inputs here.\n return []\n\n @property\n def element_spec(self):\n return self._element_spec\n\n\ndef _create_device_dataset(prototype_ds, incarnation_id, prefetch_buffer_size,\n experimental_slack):\n \"\"\"Uses _prototype_device_datasets[i] to build a dataset for the device.\"\"\"\n ds = _ReincarnatedPerDeviceGenerator(prototype_ds, incarnation_id)\n if prefetch_buffer_size > 0:\n if experimental_slack:\n ds = dataset_ops.PrefetchDataset(ds, prefetch_buffer_size, slack_period=1)\n else:\n ds = ds.prefetch(prefetch_buffer_size)\n # TODO(jsimsa): Enable auto-tuning and optimizations when supported for\n # non-CPU devices.\n options = dataset_ops.Options()\n options.experimental_optimization.apply_default_optimizations = False\n options.experimental_optimization.autotune = False\n ds = ds.with_options(options)\n return ds\n\n\nclass MultiDeviceIterator(object):\n \"\"\"An iterator over multiple devices.\"\"\"\n\n def __init__(self,\n dataset,\n devices,\n max_buffer_size=1,\n prefetch_buffer_size=1,\n source_device=\"/cpu:0\"):\n \"\"\"Constructs a MultiDeviceIterator.\n\n Args:\n dataset: The input dataset to be iterated over.\n devices: The list of devices to fetch data to.\n max_buffer_size: Maximum size of the host side per device buffer to keep.\n prefetch_buffer_size: if > 1, then we setup a buffer on each device to\n prefetch into.\n source_device: The host device to place the `dataset` on. In order to\n prevent deadlocks, if the prefetch_buffer_size is greater than the\n max_buffer_size, we set the max_buffer_size to prefetch_buffer_size.\n \"\"\"\n options = dataset_ops.Options()\n options.experimental_distribute.num_devices = len(devices)\n dataset = dataset.with_options(options)\n self._dataset = dataset._apply_options() # pylint: disable=protected-access\n self._experimental_slack = dataset.options().experimental_slack\n self._devices = devices\n self._source_device = source_device\n self._source_device_tensor = ops.convert_to_tensor(source_device)\n self._max_buffer_size = max_buffer_size\n self._prefetch_buffer_size = prefetch_buffer_size\n\n if self._prefetch_buffer_size > self._max_buffer_size:\n self._max_buffer_size = self._prefetch_buffer_size\n\n # Create the MultiDeviceIterator.\n with ops.device(self._source_device):\n # TODO(b/121378567): Get rid of this shared_name hack.\n shared_name = \"\"\n if context.executing_eagerly():\n shared_name = context.shared_name()\n self._multi_device_iterator_resource = (\n gen_dataset_ops.multi_device_iterator(\n devices=self._devices,\n shared_name=shared_name,\n container=\"\",\n **self._dataset._flat_structure)) # pylint: disable=protected-access\n if context.executing_eagerly():\n # Delete the resource when this object is deleted\n self._resource_deleter = resource_variable_ops.EagerResourceDeleter(\n handle=self._multi_device_iterator_resource,\n handle_device=self._source_device)\n\n # The incarnation ID is used to ensure consistency between the per-device\n # iterators and the multi-device iterator.\n self._incarnation_id = gen_dataset_ops.multi_device_iterator_init(\n self._dataset._variant_tensor, # pylint: disable=protected-access\n self._multi_device_iterator_resource,\n max_buffer_size=self._max_buffer_size)\n\n self._prototype_device_datasets = []\n for i, device in enumerate(self._devices):\n with ops.device(device):\n ds = _PerDeviceGenerator(i, self._multi_device_iterator_resource,\n self._incarnation_id,\n self._source_device_tensor,\n self._dataset.element_spec)\n self._prototype_device_datasets.append(ds)\n\n # TODO(rohanj): Explore the possibility of the MultiDeviceIterator to\n # initialize the device side of the pipeline. This would allow the\n # MultiDeviceIterator to choose, for example, to move some transformations\n # into the device side from its input. It might be useful in rewriting.\n # Create the per device iterators.\n self._device_iterators = []\n for i, device in enumerate(self._devices):\n with ops.device(device):\n ds = _create_device_dataset(self._prototype_device_datasets[i],\n self._incarnation_id,\n self._prefetch_buffer_size,\n self._experimental_slack)\n if context.executing_eagerly():\n self._device_iterators.append(dataset_ops.make_one_shot_iterator(ds))\n else:\n self._device_iterators.append(\n dataset_ops.make_initializable_iterator(ds))\n\n if not context.executing_eagerly():\n device_iterator_initializers = [\n iterator.initializer for iterator in self._device_iterators\n ]\n self._initializer = control_flow_ops.group(*device_iterator_initializers)\n\n def _create_device_dataset(self, i):\n \"\"\"Uses _prototype_device_datasets[i] to build a dataset for the device.\"\"\"\n ds = self._prototype_device_datasets[i]\n ds = _ReincarnatedPerDeviceGenerator(ds, self._incarnation_id)\n if self._prefetch_buffer_size > 0:\n if self._experimental_slack:\n ds = dataset_ops.PrefetchDataset(\n ds, self._prefetch_buffer_size, slack_period=1)\n else:\n ds = ds.prefetch(self._prefetch_buffer_size)\n # TODO(jsimsa): Enable auto-tuning and optimizations when supported for\n # non-CPU devices.\n options = dataset_ops.Options()\n options.experimental_optimization.apply_default_optimizations = False\n options.experimental_optimization.autotune = False\n ds = ds.with_options(options)\n return ds\n\n def get_next(self, device=None):\n \"\"\"Returns the next element given a `device`, else returns all in a list.\"\"\"\n if device is not None:\n index = self._devices.index(device)\n return self._device_iterators[index].get_next()\n\n result = []\n for i, device in enumerate(self._devices):\n with ops.device(device):\n result.append(self._device_iterators[i].get_next())\n return result\n\n def get_next_as_optional(self):\n result = []\n for i, device in enumerate(self._devices):\n with ops.device(device):\n result.append(\n iterator_ops.get_next_as_optional(self._device_iterators[i]))\n return result\n\n @property\n def initializer(self):\n if context.executing_eagerly():\n return control_flow_ops.no_op()\n return self._initializer\n\n def _eager_reset(self):\n \"\"\"Resets the MultiDeviceIterator in eager mode.\"\"\"\n if not ops.executing_eagerly_outside_functions():\n raise ValueError(\"Eager reset is only supported in eager mode.\")\n # pylint: disable=protected-access\n self._incarnation_id = gen_dataset_ops.multi_device_iterator_init(\n self._dataset._variant_tensor,\n self._multi_device_iterator_resource,\n max_buffer_size=self._max_buffer_size)\n for i, device in enumerate(self._devices):\n with ops.device(device):\n ds = _create_device_dataset(self._prototype_device_datasets[i],\n self._incarnation_id,\n self._prefetch_buffer_size,\n self._experimental_slack)\n # Reset the device iterator resources with the new dataset.\n ds_variant = ds._variant_tensor\n gen_dataset_ops.make_iterator(\n ds_variant, self._device_iterators[i]._iterator_resource)\n\n @property\n def element_spec(self):\n return self._dataset.element_spec\n\n\nclass MultiDeviceIteratorResourceDeleter(object):\n \"\"\"An object which cleans up a Multi Device Iterator resource.\n\n An alternative to defining a __del__ method on an object. Even if the parent\n object is part of a reference cycle, the cycle will be collectible.\n \"\"\"\n\n def __init__(self, multi_device_iterator, iterators, device, deleter):\n self._deleter = deleter\n self._multi_device_iterator = multi_device_iterator\n self._iterators = iterators\n self._device = device\n self._eager_mode = context.executing_eagerly()\n\n def __del__(self):\n with ops.device(self._device):\n # Make sure the resource is deleted in the same mode as it was created in.\n # We pass in the iterator handles as inputs to the op to make sure that\n # this op runs after all the iterators are deleted.\n if self._eager_mode:\n with context.eager_mode():\n gen_dataset_ops.delete_multi_device_iterator(\n multi_device_iterator=self._multi_device_iterator,\n iterators=self._iterators,\n deleter=self._deleter)\n else:\n with context.graph_mode():\n gen_dataset_ops.delete_multi_device_iterator(\n multi_device_iterator=self._multi_device_iterator,\n iterators=self._iterators,\n deleter=self._deleter)\n\n\nclass MultiDeviceIteratorSpec(type_spec.TypeSpec):\n \"\"\"Type specification for `MultiDeviceIteratorV2`.\"\"\"\n\n __slots__ = [\"_devices\", \"_source_device\", \"_element_spec\"]\n\n def __init__(self, devices, source_device, element_spec):\n self._devices = devices\n self._source_device = source_device\n self._element_spec = element_spec\n\n @property\n def value_type(self):\n return MultiDeviceIteratorV2\n\n def _serialize(self):\n return (tuple(self._devices), self._source_device, self._element_spec)\n\n @property\n def _component_specs(self):\n specs = [\n tensor_spec.TensorSpec([], dtypes.resource),\n tensor_spec.TensorSpec([], dtypes.scalar)\n ]\n for _ in range(len(self._devices)):\n specs.append(iterator_ops.IteratorSpec(self._element_spec))\n return specs\n\n def _to_components(self, value):\n # pylint: disable=protected-access\n c = [value._multi_device_iterator_resource, value._deleter]\n c.extend(value._device_iterators)\n return c\n\n def _from_components(self, components):\n return MultiDeviceIteratorV2(\n dataset=None,\n devices=self._devices,\n source_device=self._source_device,\n components=components,\n element_spec=self._element_spec)\n\n @staticmethod\n def from_value(value):\n # pylint: disable=protected-access\n return MultiDeviceIteratorSpec(\n value._devices,\n value._source_device,\n value.element_spec)\n\n\nclass MultiDeviceIteratorV2(composite_tensor.CompositeTensor):\n \"\"\"An iterator over multiple devices.\"\"\"\n\n def __init__(self,\n dataset=None,\n devices=None,\n max_buffer_size=1,\n prefetch_buffer_size=1,\n source_device=\"/cpu:0\",\n components=None,\n element_spec=None):\n \"\"\"Constructs a MultiDeviceIteratorV2 object.\n\n Args:\n dataset: The input dataset to be iterated over.\n devices: The list of devices to fetch data to.\n max_buffer_size: Maximum size of the host side per device buffer to keep.\n prefetch_buffer_size: if > 1, then we setup a buffer on each device to\n prefetch into.\n source_device: The host device to place the `dataset` on. In order to\n prevent deadlocks, if the prefetch_buffer_size is greater than the\n max_buffer_size, we set the max_buffer_size to prefetch_buffer_size.\n components: Tensor components to construct the MultiDeviceIterator from.\n element_spec: A nested structure of `TypeSpec` objects that\n represents the type specification of elements of the iterator.\n\n Raises:\n RuntimeError: If executed in graph mode or outside of function building\n mode.\n \"\"\"\n if (not context.executing_eagerly() and\n not ops.get_default_graph()._building_function): # pylint: disable=protected-access\n raise RuntimeError(\"MultiDeviceIteratorV2 is only supported inside of \"\n \"tf.function or when eager execution is enabled.\")\n if devices is None:\n raise ValueError(\"`devices` must be provided\")\n error_message = \"Either `dataset` or both `components` and \"\n \"`element_spec` need to be provided.\"\n\n if dataset is None:\n if (components is None or element_spec is None):\n raise ValueError(error_message)\n self._element_spec = element_spec\n self._devices = devices\n self._source_device = source_device\n self._multi_device_iterator_resource = components[0]\n self._deleter = components[1]\n self._device_iterators = components[2:]\n iterator_handles = []\n for it in self._device_iterators:\n iterator_handles.append(it._iterator_resource) # pylint: disable=protected-access\n else:\n if (components is not None or element_spec is not None):\n raise ValueError(error_message)\n options = dataset_ops.Options()\n options.experimental_distribute.num_devices = len(devices)\n dataset = dataset.with_options(options)\n dataset = dataset._apply_options() # pylint: disable=protected-access\n self._element_spec = dataset.element_spec\n experimental_slack = dataset.options().experimental_slack\n self._devices = devices\n self._source_device = source_device\n source_device_tensor = ops.convert_to_tensor(self._source_device)\n\n if prefetch_buffer_size > max_buffer_size:\n max_buffer_size = prefetch_buffer_size\n\n # Create the MultiDeviceIterator.\n with ops.device(self._source_device):\n self._multi_device_iterator_resource, self._deleter = (\n gen_dataset_ops.anonymous_multi_device_iterator(\n devices=self._devices, **dataset._flat_structure)) # pylint: disable=protected-access\n\n # The incarnation ID is used to ensure consistency between the\n # per-device iterators and the multi-device iterator.\n incarnation_id = gen_dataset_ops.multi_device_iterator_init(\n dataset._variant_tensor, # pylint: disable=protected-access\n self._multi_device_iterator_resource,\n max_buffer_size=max_buffer_size)\n\n prototype_device_datasets = []\n for i, device in enumerate(self._devices):\n with ops.device(device):\n ds = _PerDeviceGenerator(i, self._multi_device_iterator_resource,\n incarnation_id, source_device_tensor,\n dataset.element_spec)\n prototype_device_datasets.append(ds)\n\n # TODO(rohanj): Explore the possibility of the MultiDeviceIterator to\n # initialize the device side of the pipeline. This would allow the\n # MultiDeviceIterator to choose, for example, to move some transformations\n # into the device side from its input. It might be useful in rewriting.\n # Create the per device iterators.\n self._device_iterators = []\n iterator_handles = []\n for i, device in enumerate(self._devices):\n with ops.device(device):\n ds = _create_device_dataset(prototype_device_datasets[i],\n incarnation_id, prefetch_buffer_size,\n experimental_slack)\n iterator = iter(ds)\n self._device_iterators.append(iterator)\n iterator_handles.append(iterator._iterator_resource) # pylint: disable=protected-access\n\n self._resource_deleter = MultiDeviceIteratorResourceDeleter(\n multi_device_iterator=self._multi_device_iterator_resource,\n iterators=iterator_handles,\n device=self._source_device,\n deleter=self._deleter)\n\n def get_next(self, device=None):\n \"\"\"Returns the next element given a `device`, else returns all in a list.\"\"\"\n if device is not None:\n index = self._devices.index(device)\n return self._device_iterators[index].get_next()\n\n result = []\n for i, device in enumerate(self._devices):\n with ops.device(device):\n result.append(self._device_iterators[i].get_next())\n return result\n\n def __iter__(self):\n return self\n\n def __next__(self):\n return self.next()\n\n def next(self):\n try:\n return self.get_next()\n except errors.OutOfRangeError:\n raise StopIteration\n\n def get_next_as_optional(self):\n result = []\n for i, device in enumerate(self._devices):\n with ops.device(device):\n result.append(\n iterator_ops.get_next_as_optional(self._device_iterators[i]))\n return result\n\n @property\n def element_spec(self):\n return self._element_spec\n\n @property\n def _type_spec(self):\n return MultiDeviceIteratorSpec(self._devices, self._source_device,\n self._element_spec)\n"
] |
[
[
"tensorflow.python.keras.keras_parameterized.run_all_keras_modes",
"tensorflow.python.keras.Input",
"tensorflow.python.keras.Model",
"tensorflow.python.ops.gen_string_ops.string_lower",
"tensorflow.python.keras.utils.generic_utils.register_keras_serializable",
"tensorflow.python.keras.layers.preprocessing.text_vectorization._TextVectorizationCombiner",
"tensorflow.python.keras.utils.generic_utils.CustomObjectScope",
"tensorflow.python.keras.testing_utils.layer_test",
"tensorflow.python.keras.Model.from_config",
"tensorflow.python.keras.saving.saved_model_experimental.export_saved_model",
"tensorflow.python.platform.test.main",
"tensorflow.python.keras.backend.get_session",
"numpy.array",
"tensorflow.python.eager.context.executing_eagerly",
"tensorflow.python.ops.ragged.ragged_string_ops.string_split_v2"
],
[
"numpy.random.random",
"tensorflow.python.keras.layers.Masking",
"tensorflow.python.platform.test.is_gpu_available",
"tensorflow.python.keras.testing_utils.layer_test",
"tensorflow.python.platform.test.is_built_with_rocm",
"tensorflow.python.keras.Sequential",
"tensorflow.python.platform.test.main",
"tensorflow.python.eager.context.executing_eagerly",
"tensorflow.python.keras.layers.GlobalAveragePooling1D"
],
[
"tensorflow.python.ops.gen_dataset_ops.delete_multi_device_iterator",
"tensorflow.python.ops.array_ops.constant",
"tensorflow.python.ops.functional_ops.remote_call",
"tensorflow.python.data.ops.dataset_ops.make_one_shot_iterator",
"tensorflow.python.framework.ops.executing_eagerly_outside_functions",
"tensorflow.python.framework.ops.device",
"tensorflow.python.ops.resource_variable_ops.EagerResourceDeleter",
"tensorflow.python.ops.control_flow_ops.no_op",
"tensorflow.python.eager.context.executing_eagerly",
"tensorflow.python.data.util.structure.get_flat_tensor_types",
"tensorflow.python.ops.gen_dataset_ops.make_iterator",
"tensorflow.python.data.ops.iterator_ops.IteratorSpec",
"tensorflow.python.eager.context.graph_mode",
"tensorflow.python.data.ops.dataset_ops.PrefetchDataset",
"tensorflow.python.eager.context.eager_mode",
"tensorflow.python.data.ops.dataset_ops.make_initializable_iterator",
"tensorflow.python.eager.function.defun",
"tensorflow.python.data.util.structure.get_flat_tensor_shapes",
"tensorflow.python.framework.ops.convert_to_tensor",
"tensorflow.python.data.ops.iterator_ops.get_next_as_optional",
"tensorflow.python.ops.gen_dataset_ops.multi_device_iterator_to_string_handle",
"tensorflow.python.ops.gen_dataset_ops.anonymous_multi_device_iterator",
"tensorflow.python.ops.gen_dataset_ops.generator_dataset",
"tensorflow.python.ops.gen_dataset_ops.multi_device_iterator_init",
"tensorflow.python.framework.tensor_spec.TensorSpec",
"tensorflow.python.ops.control_flow_ops.group",
"tensorflow.python.data.ops.dataset_ops.Options",
"tensorflow.python.framework.ops.get_default_graph",
"tensorflow.python.eager.context.shared_name",
"tensorflow.python.ops.gen_dataset_ops.multi_device_iterator"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.7",
"2.6",
"2.3",
"2.4",
"2.9",
"2.5",
"2.8",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.8",
"2.7",
"2.6",
"2.4",
"2.3",
"2.9",
"2.5",
"2.2",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.6",
"2.3",
"2.4",
"2.5",
"2.2"
]
}
] |
christianbrodbeck/Eelbrain
|
[
"0c24abcc382abb590c062b8bc683f749265f564f",
"0c24abcc382abb590c062b8bc683f749265f564f"
] |
[
"eelbrain/plot/_plot_utils.py",
"eelbrain/plot/_base.py"
] |
[
"# Utilities working with (and needing to import) plots\nimport os\n\nimport numpy as np\n\nfrom .._utils import ui\nfrom ._base import TimeSlicer\n\n\ndef save_movie(figures, filename=None, time_dilation=4, tstart=None, tstop=None, size=None, **kwargs):\n \"\"\"Save a movie combining multiple figures with moving time axes\n\n Parameters\n ----------\n figures : dict\n ``{(xpos, ypos): figure}`` dictionary indicating placement of figures.\n filename : str\n Filename for the movie (omit to use a GUI).\n time_dilation : float\n Factor by which to stretch time (default 4). Time dilation is\n controlled through the frame-rate; if the ``fps`` keyword argument\n is specified, ``time_dilation`` is ignored.\n tstart : float\n Time axis start (default is earliest time in ``figures``).\n tstop : float\n Time axis stop (default includes latest time in ``figures``).\n ...\n :func:`imageio.mimwrite` parmeters.\n \"\"\"\n import imageio\n from PIL import Image\n\n if filename is None:\n filename = ui.ask_saveas(\"Save movie...\", None, [('Movie (*.mov)', '*.mov')])\n if not filename:\n return\n else:\n filename = os.path.expanduser(filename)\n\n time_dims = list(filter(None, (getattr(f, '_time_dim', None) for f in figures.values())))\n if tstart is None:\n tstart = min(dim.tmin for dim in time_dims)\n if tstop is None:\n tstop = max(dim.tstop for dim in time_dims)\n if 'fps' in kwargs:\n tstep = time_dilation / kwargs['fps']\n else:\n tstep = min(dim.tstep for dim in time_dims)\n kwargs['fps'] = 1. / tstep / time_dilation\n\n times = np.arange(tstart, tstop, tstep)\n\n ims = []\n x_max = y_max = None\n for t in times:\n t_ims = []\n for (x, y), fig in figures.items():\n fig.set_time(t)\n f_im = Image.fromarray(fig._im_array(), 'RGBA')\n t_ims.append((x, y, f_im))\n\n if x_max is None:\n x_max = max(x + im.size[0] for x, y, im in t_ims)\n y_max = max(y + im.size[1] for x, y, im in t_ims)\n im_buf = Image.new('RGBA', (x_max, y_max), (1, 1, 1, 0))\n\n for x, y, im in t_ims:\n im_buf.paste(im, (x, y))\n\n ims.append(np.array(im_buf))\n imageio.mimwrite(filename, ims, **kwargs)\n",
"# Author: Christian Brodbeck <[email protected]>\n\"\"\"Framework for figures embedded in a GUI\n\nImplementation\n==============\n\nPlotting is implemented hierarchically in 3 different types of\nfunctions/classes:\n\ntop-level (public names)\n Top-level functions or classes have public names create an entire figure.\n Some classes also retain the figure and provide methods for manipulating\n it.\n\n_ax_\n Functions beginning with _ax_ organize an axes object. They do not\n create their own axes object (this is provided by the top-level function),\n but change axes formatting such as labels and extent.\n\n_plt_\n Functions beginning with _plt_ only plot data to a given axes object\n without explicitly changing aspects of the axes themselves.\n\n\nTop-level plotters can be called with nested lists of data-objects (NDVar\ninstances). They create a separate axes for each list element. Axes\nthemselves can have multiple layers (e.g., a difference map visualized through\na colormap, and significance levels indicated by contours).\n\n\nExample: t-test\n---------------\n\nFor example, the default plot for testnd.ttest() results is the\nfollowing list (assuming the test compares A and B):\n\n``[A, B, [diff(A,B), p(A, B)]]``\n\nwhere ``diff(...)`` is a difference map and ``p(...)`` is a map of p-values.\nThe main plot function creates a separate axes object for each list element:\n\n- ``A``\n- ``B``\n- ``[diff(A,B), p(A, B)]``\n\nEach of these element is then plotted with the corresponding _ax_ function.\nThe _ax_ function calls _plt_ for each of its input elements. Thus, the\nfunctions executed are:\n\n#. plot([A, B, [diff(A,B), p(A, B)]])\n#. ---> _ax_(A)\n#. ------> _plt_(A)\n#. ---> _ax_(B)\n#. ------> _plt_(B)\n#. ---> _ax_([diff(A,B), p(A, B)])\n#. ------> _plt_(diff(A,B))\n#. ------> _plt_(p(A, B))\n\n\nVision\n------\n\nModularize organization\n\nData organizers:\n Take data from input arguments and organize it into axes and layers\nStyle mappers:\n Take data and input arguments and determine matplotlib parameters\nFigure components\n Manage styling and interaction for figure properties (XAxisMixin,\n TimeController, ...)\n\n\"\"\"\nfrom __future__ import annotations\nimport __main__\n\nfrom collections.abc import Iterable\nfrom collections import defaultdict\nfrom copy import copy\nfrom dataclasses import dataclass, replace\nfrom enum import Enum, auto\nfrom functools import cached_property, reduce\nfrom itertools import chain, cycle, repeat\nfrom logging import getLogger\nimport math\nfrom numbers import Number\nimport os\nimport re\nimport time\nfrom typing import Any, Callable, Dict, Iterator, List, Literal, Optional, Sequence, Tuple, Union\nimport weakref\n\nimport matplotlib as mpl\nimport matplotlib.axes\nimport matplotlib.font_manager\nimport matplotlib.patches\nimport matplotlib.text\nfrom matplotlib.colors import Colormap\nfrom matplotlib.figure import SubplotParams\nfrom matplotlib.ticker import FuncFormatter\nimport numpy as np\n\nfrom .._celltable import Celltable\nfrom .._colorspaces import LocatedColormap, symmetric_cmaps, zerobased_cmaps, ALPHA_CMAPS\nfrom .._config import CONFIG\nfrom .._data_obj import Dimension, Dataset, Factor, Interaction, NDVar, Var, Case, UTS, NDVarArg, CategorialArg, IndexArg, CellArg, NDVarTypes, ascategorial, asndvar, assub, isnumeric, isdataobject, combine_cells, cellname\nfrom .._utils.notebooks import use_inline_backend\nfrom .._stats import testnd\nfrom .._utils import IS_WINDOWS, intervals, ui\nfrom .._ndvar import erode, resample\nfrom .._text import enumeration, ms\nfrom ..fmtxt import FMTextArg, Image, asfmtext, asfmtext_or_none\nfrom ..table import melt_ndvar\nfrom ._decorations import mark_difference\nfrom ._styles import Style, find_cell_styles\nfrom ._utils import adjust_hsv\n\n\n# constants\nPOINT = 0.013888888888898\n\n# defaults\ndefaults = {'maxw': 16, 'maxh': 10}\n\n# Types\nCMapArg = Any\nColorArg = Any\nLegendArg = Optional[Union[str, int, Tuple[float, float], bool]]\n\n\nclass PlotType(Enum):\n GENERAL = auto()\n LEGACY = auto()\n LINE = auto()\n IMAGE = auto()\n CONTOUR = auto()\n\n\ndef do_autorun(run=None):\n # http://stackoverflow.com/a/2356420/166700\n if run is not None:\n return run\n elif CONFIG['autorun'] is None:\n return not hasattr(__main__, '__file__')\n else:\n return CONFIG['autorun']\n\n\ndef mpl_font_size(key: str) -> float:\n \"Font size in inches\"\n p = matplotlib.font_manager.FontProperties(size=mpl.rcParams[key])\n return p.get_size() * POINT\n\n\ndef inch_to_figure(figure: matplotlib.figure.Figure, x: float = 0, y: float = 0):\n \"Transform (x, y) vector in inches to figure coordinates\"\n coords = figure.dpi_scale_trans.transform((x, y))\n return figure.transFigure.inverted().transform(coords)\n\n\nDISPLAY_UNIT = {\n 's': 'ms',\n 'V': 'µV',\n 'T': 'fT',\n 'sensor': int,\n}\nUNIT_FORMAT = {\n 'A': 1,\n 'Am': 1,\n 'V': 1,\n 'ms': 1e3,\n 'mV': 1e3,\n 'µV': 1e6,\n 'pT': 1e12,\n 'fT': 1e15,\n 'dSPM': 1,\n 'p': 1,\n 'T': 1,\n 'n': int, # %i format\n 'normalized': 1,\n int: int,\n}\nSCALE_FORMATTERS = {\n 1: None,\n 1e3: FuncFormatter(lambda x, pos: '%g' % (1e3 * x)),\n 1e6: FuncFormatter(lambda x, pos: '%g' % (1e6 * x)),\n 1e9: FuncFormatter(lambda x, pos: '%g' % (1e9 * x)),\n 1e12: FuncFormatter(lambda x, pos: '%g' % (1e12 * x)),\n 1e15: FuncFormatter(lambda x, pos: '%g' % (1e15 * x)),\n int: FuncFormatter(lambda x, pos: '%i' % round(x)),\n}\nDEFAULT_CMAPS = {\n 'B': 'xpolar',\n 'V': 'xpolar',\n 'p': 'sig',\n 'f': 'viridis',\n 'r': 'xpolar',\n 't': 'xpolar',\n}\n\nINITIAL_RC = mpl.rcParams.copy()\ndel INITIAL_RC['backend']\n\n\ndef reset_rc():\n \"Reset matplotlib rc-parameters to state at Eelbrain initialization\"\n mpl.rcParams.update(INITIAL_RC)\n\n\nclass AxisScale:\n \"\"\"Find matching number formatter and label for display unit (!= data unit)\n\n Parameters\n ----------\n v\n Display unit or scale of the axis, or daat to infer these. See\n ``unit_format`` dict above for options.\n label\n If ``label is True``, try to infer a label from ``v``.\n \"\"\"\n def __init__(\n self,\n v: Union[NDVar, Var, Number, str, 'PlotData'],\n label: Union[bool, str, Sequence[str]] = True,\n ):\n if isinstance(v, str):\n data_unit = None\n meas = None\n unit = v\n scale = UNIT_FORMAT.get(v, 1)\n elif isinstance(v, Number):\n data_unit = None\n meas = None\n unit = None\n scale = v\n else:\n if isnumeric(v):\n meas = v.info.get('meas')\n data_unit = v.info.get('unit')\n elif isinstance(v, PlotData):\n meas = v.meas\n data_unit = v.unit\n else:\n raise TypeError(f\"unit={v!r}\")\n\n if data_unit in DISPLAY_UNIT:\n unit = DISPLAY_UNIT[data_unit]\n scale = UNIT_FORMAT[unit]\n if data_unit in UNIT_FORMAT:\n scale /= UNIT_FORMAT[data_unit]\n else:\n scale = 1\n unit = data_unit\n self.data_unit = data_unit # None | str\n self.display_unit = unit\n # ScalarFormatter: disabled because it always used e notation in status bar\n # (needs separate instance because it adapts to data)\n # fmt = ScalarFormatter() if scale == 1 else scale_formatters[scale]\n self.formatter = SCALE_FORMATTERS[scale] # Matplotlib tick formatter\n\n if label is True:\n if meas and unit and meas not in unit:\n label = f'{meas} [{unit}]'\n elif unit:\n label = unit\n elif meas:\n label = meas\n elif isinstance(v, PlotData):\n label = v.default_y_label\n elif isnumeric(v):\n label = v.name\n else:\n label = None\n self.label = label\n\n\ndef find_uts_hlines(ndvar):\n \"\"\"Find horizontal lines for uts plots (based on contours)\n\n Parameters\n ----------\n ndvar : NDVar\n Data to be plotted.\n\n Returns\n -------\n h_lines : iterator\n Iterator over (y, kwa) tuples.\n \"\"\"\n contours = ndvar.info.get('contours', None)\n if contours:\n for level in sorted(contours):\n args = contours[level]\n if isinstance(args, dict):\n yield level, args.copy()\n else:\n yield level, {'color': args}\n\n\ndef find_uts_ax_vlim(\n layers: Sequence[NDVar],\n vlims: Dict = (),\n) -> (Optional[float], Optional[float]):\n \"\"\"Find y axis limits for uts axes\n\n Parameters\n ----------\n layers\n Data to be plotted.\n vlims\n Vmax and vmin values by (meas, cmap).\n\n Returns\n -------\n bottom\n Lowest value on y axis.\n top\n Highest value on y axis.\n \"\"\"\n bottom = None\n top = None\n for ndvar in layers:\n meas = ndvar.info.get('meas')\n if meas in vlims:\n bottom_, top_ = vlims[meas]\n if bottom is None:\n bottom = bottom_\n elif bottom_ != bottom:\n bottom = min(bottom, bottom_)\n if top is None:\n top = top_\n elif top_ != top:\n top = max(top, top_)\n\n return bottom, top\n\n\ndef find_fig_cmaps(\n epochs: Sequence[Sequence[NDVar]],\n cmap: Union[dict, CMapArg] = None,\n alpha: bool = False,\n) -> Dict[str, CMapArg]:\n \"\"\"Find cmap for every meas\n\n Parameters\n ----------\n epochs\n All NDVars in the plot.\n cmap\n Use this instead of the default for the first ``meas`` (for user\n argument).\n alpha\n If possible, use cmaps with alpha.\n\n Returns\n -------\n cmaps\n {meas: cmap} dict for all meas.\n \"\"\"\n if isinstance(cmap, dict):\n out = cmap.copy()\n cmap = None\n else:\n out = {}\n\n for ndvar in chain(*epochs):\n meas = ndvar.info.get('meas')\n\n if meas in out and out[meas]:\n pass\n elif cmap is not None:\n out[meas] = cmap\n cmap = None\n elif 'cmap' in ndvar.info:\n out[meas] = ndvar.info['cmap']\n else:\n out[meas] = None\n\n for k in out.keys():\n if out[k] is None:\n out[k] = DEFAULT_CMAPS.get(meas, 'xpolar')\n # replace with cmap with alpha\n if alpha and out[k] in ALPHA_CMAPS:\n out[k] = ALPHA_CMAPS[out[k]]\n\n return out\n\n\ndef find_fig_contours(epochs, vlims, contours_arg):\n \"\"\"Find contour arguments for every meas type\n\n Parameters\n ----------\n epochs : list of list of NDVar\n Data to be plotted.\n vlims : dist\n Vlims dict (used to interpret numerical arguments)\n contours_arg : int | sequence | dict\n User argument. Can be an int (number of contours), a sequence (values\n at which to draw contours), a kwargs dict (must contain the \"levels\"\n key), or a {meas: kwargs} dictionary.\n\n Returns\n -------\n contours : dict\n {meas: kwargs} mapping for contour plots.\n\n Notes\n -----\n The NDVar's info dict contains default arguments that determine how the\n NDVar is plotted as base and as overlay. In case of insufficient\n information, defaults apply. On the other hand, defaults can be overridden\n by providing specific arguments to plotting functions.\n \"\"\"\n if isinstance(contours_arg, dict) and 'levels' not in contours_arg:\n out = contours_arg.copy()\n contours_arg = None\n else:\n out = {}\n\n for ndvars in epochs:\n for layer, ndvar in enumerate(ndvars):\n meas = ndvar.info.get('meas')\n if meas in out:\n continue\n\n if contours_arg is not None:\n param = contours_arg\n contours_arg = None\n else:\n if layer: # overlay\n kind = ndvar.info.get('overlay', ('contours',))\n else:\n kind = ndvar.info.get('base', ())\n\n if 'contours' in kind:\n param = ndvar.info.get('contours', None)\n if layer:\n param = ndvar.info.get('overlay_contours', param)\n else:\n param = ndvar.info.get('base_contours', param)\n\n if isinstance(param, dict) and 'levels' not in param:\n levels = sorted(param)\n colors = [param[v] for v in levels]\n param = {'levels': levels, 'colors': colors}\n else:\n param = None\n\n if param is None:\n out[meas] = None\n elif isinstance(param, dict):\n out[meas] = param\n elif isinstance(param, int):\n vmin, vmax = vlims[meas]\n out[meas] = {'levels': np.linspace(vmin, vmax, param),\n 'colors': 'k'}\n else:\n out[meas] = {'levels': tuple(param), 'colors': 'k'}\n\n return out\n\n\ndef find_fig_vlims(plots, vmax=None, vmin=None, cmaps=None):\n \"\"\"Find vmin and vmax parameters for every (meas, cmap) combination\n\n Parameters\n ----------\n plots : nested list of NDVar\n Unpacked plot data.\n vmax : None | dict | scalar\n Dict: predetermined vlims (take precedence). Scalar: user-specified\n vmax parameter (used for for the first meas kind).\n vmin : None | scalar\n User-specified vmin parameter. If vmax is user-specified but vmin is\n None, -vmax is used.\n cmaps : dict\n If provided, vlims will be fixed to match symmetric or 0-based cmaps.\n\n Returns\n -------\n vlims : dict\n Dictionary of im limits: {meas: (vmin, vmax)}.\n \"\"\"\n vlims = {}\n if isinstance(vmax, dict):\n vlims.update(vmax)\n ndvars = [v for v in chain.from_iterable(plots) if v.info.get('meas') not in vlims]\n else:\n ndvars = [*chain.from_iterable(plots)]\n\n if vmin is None and vmax is not None:\n if cmaps is None and any(v.min() < 0 for v in ndvars):\n vmin = -vmax\n else:\n vmin = 0\n\n # apply user specified vlim\n if vmin is not None or vmax is not None:\n meas = ndvars[0].info.get('meas')\n if vmax is None:\n meas_ndvars = [v for v in ndvars if v.info.get('meas') == meas]\n for ndvar in meas_ndvars:\n _, vmax_ = find_vlim_args(ndvar)\n vmax = vmax_ if vmax is None else max(vmax, vmax_)\n\n vlims[meas] = (vmin, vmax)\n ndvars = [v for v in ndvars if v.info.get('meas') != meas]\n\n # for other meas, fill in data limits\n for ndvar in ndvars:\n meas = ndvar.info.get('meas')\n vmin, vmax = find_vlim_args(ndvar)\n if meas in vlims:\n vmin_, vmax_ = vlims[meas]\n vmin = vmin if vmin_ is None else min(vmin, vmin_)\n vmax = vmax if vmax_ is None else max(vmax, vmax_)\n\n if vmin == vmax:\n vmin -= 1\n vmax += 1\n vlims[meas] = (vmin, vmax)\n\n # fix vlims based on cmaps\n if cmaps is not None:\n for meas in vlims.keys():\n vmin, vmax = vlims[meas]\n vlims[meas] = fix_vlim_for_cmap(vmin, vmax, cmaps[meas])\n\n return vlims\n\n\ndef find_vlim_args(ndvar, vmin=None, vmax=None):\n if vmax is None:\n vmax = ndvar.info.get('vmax', None)\n if vmin is None:\n vmin = ndvar.info.get('vmin', None)\n\n if vmax is None or vmin is None:\n xmax = np.nanmax(ndvar.x)\n if np.ma.isMaskedArray(xmax):\n xmax = xmax.data\n xmin = np.nanmin(ndvar.x)\n if np.ma.isMaskedArray(xmin):\n xmin = xmin.data\n abs_max = max(abs(xmax), abs(xmin)) or 1e-14\n if np.isnan(abs_max):\n raise ValueError(f\"Can't plot all NaN input\")\n scale = math.floor(np.log10(abs_max))\n if vmax is None:\n vmax = math.ceil(xmax * 10 ** -scale) * 10 ** scale\n if vmin is None:\n vmin = math.floor(xmin * 10 ** -scale) * 10 ** scale\n\n return vmin, vmax\n\n\ndef fix_vlim_for_cmap(vmin, vmax, cmap):\n \"Fix the vmin value to yield an appropriate range for the cmap\"\n if isinstance(cmap, LocatedColormap):\n vmin, vmax = cmap.vmin, cmap.vmax\n is_symmetric = cmap.symmetric\n starts_at_zero = False\n else:\n if isinstance(cmap, Colormap):\n cmap = cmap.name\n is_symmetric = cmap in symmetric_cmaps \n starts_at_zero = cmap in zerobased_cmaps\n\n if is_symmetric:\n if vmax is None and vmin is None:\n pass\n elif vmin is None:\n vmax = abs(vmax)\n vmin = -vmax\n elif vmax is None:\n vmax = abs(vmin)\n vmin = -vmax\n else:\n vmax = max(abs(vmax), abs(vmin))\n vmin = -vmax\n elif starts_at_zero:\n vmin = 0\n return vmin, vmax\n\n\ndef find_data_dims(\n ndvar: NDVar,\n dims: Union[int, Tuple[str, ...]],\n extra_dim: str = None,\n) -> Tuple[Union[str, None], List[str]]:\n \"\"\"Find dimensions in data.\n\n Raise a ValueError if the dimensions don't match, except when the ``case``\n dimension is omitted in ``dims``.\n\n Parameters\n ----------\n ndvar : NDVar\n NDVar instance to query.\n dims : int | tuple of str\n The requested dimensions.\n extra_dim : str\n Dimension that will be removed by other operation (e.g. ``xax``).\n\n Returns\n -------\n agg : None | str\n Dimension to aggregate over.\n dims : list of str\n Dimension names with all instances of ``None`` replaced by a string.\n \"\"\"\n if isinstance(dims, int):\n if extra_dim:\n dims += 1\n\n dimnames = list(ndvar.dimnames)\n if ndvar.ndim == dims:\n agg = None\n elif ndvar.ndim == dims + 1:\n for agg in dimnames:\n if agg != extra_dim:\n break\n dimnames.remove(agg)\n else:\n raise ValueError(f\"y={ndvar} has wrong number of dimensions; {dims} or {dims + 1} required\")\n else:\n required_dims = (extra_dim, *dims) if extra_dim else dims\n if ndvar.ndim == len(required_dims):\n agg = None\n dimnames = list(ndvar.get_dimnames(required_dims))\n elif ndvar.ndim == len(required_dims) + 1:\n if any(d is None for d in required_dims):\n if ndvar.has_case and 'case' not in required_dims:\n agg = 'case'\n else:\n raise ValueError(f\"y={ndvar} is ambiguous for required dimensions {required_dims}\")\n else:\n agg = None\n dimnames = list(ndvar.get_dimnames((agg, *required_dims)))\n agg = dimnames.pop(0)\n else:\n raise ValueError(f\"y={ndvar} has wrong dimensions; {required_dims} or one more required\")\n\n if extra_dim:\n dimnames.remove(extra_dim)\n return agg, dimnames\n\n\ndef find_labels(\n cells: Sequence[CellArg],\n labels_arg: Dict[CellArg: str] = None,\n delim: str = ' ',\n) -> Dict[CellArg, str]:\n if not labels_arg:\n return {cell: cellname(cell, delim) for cell in cells}\n labels = {}\n for cell in cells:\n if cell in labels_arg:\n label = labels_arg[cell]\n elif isinstance(cell, str):\n label = cell\n elif isinstance(cell, tuple):\n label = cellname([labels_arg.get(item, item) for item in cell], delim)\n else:\n raise TypeError(f\"{cell=}\")\n labels[cell] = label\n return labels\n\n\ndef brain_data(\n data: Union[NDVar, testnd.NDTest],\n):\n # for GlassBrain and surfer brain\n if isinstance(data, testnd.NDDifferenceTest):\n return data.masked_difference()\n else:\n return asndvar(data)\n\n\ndef butterfly_data(\n data: Union[NDVar, testnd.NDTest],\n hemi: str,\n resample_: int = None,\n colors: bool = False,\n return_vector_data: bool = False,\n):\n \"\"\"Data for plotting butterfly plot with brain\n\n Returns\n -------\n hemis : list of str\n Hemispheres in the data.\n butterfly_daya :\n Data for Butterfly plot.\n brain_data :\n Data for brain plot.\n \"\"\"\n # find input type\n if isinstance(data, NDVar):\n y = data\n kind = 'ndvar'\n elif isinstance(data, testnd.NDDifferenceTest):\n y = data.masked_difference()\n kind = 'ndvar'\n else:\n raise TypeError(f\"ndvar={data!r}\")\n source = y.get_dim('source')\n\n # find samplingrate\n if resample_ is not None:\n raise NotImplementedError(f\"resample_={resample_}\")\n\n # find hemispheres to include\n if hemi is None:\n hemis = []\n if source.lh_n:\n hemis.append('lh')\n if source.rh_n:\n hemis.append('rh')\n elif hemi in ('lh', 'rh'):\n hemis = [hemi]\n else:\n raise ValueError(\"hemi=%r\" % (hemi,))\n\n if kind == 'ndvar':\n if y.has_case:\n y = y.mean('case')\n if resample_:\n y = resample(y, resample_, window='hamming')\n if y.has_dim('space'):\n if return_vector_data:\n brain_data = y\n y = y.norm('space')\n else:\n y = y.norm('space')\n brain_data = y\n else:\n brain_data = y\n bfly_data = [y.sub(source=hemi, name=hemi.capitalize()) for hemi in hemis]\n elif kind == 'test':\n sig = data.p <= 0.05\n y_magnitude = y.rms('time')\n # resample\n if resample_:\n y = resample(y, resample_, window='hamming')\n sig = resample(sig, resample_) > 0.5\n brain_data = y.mask(~sig)\n # mask\n non_sig = erode(~sig, 'time')\n y_sig = y.mask(non_sig)\n y_ns = y.mask(sig)\n # line-styles\n from ._colors import Style\n if colors:\n lh_color = '#046AAD'\n rh_color = '#A60628'\n line_color_sig = {'lh': lh_color, 'rh': rh_color}\n line_color_ns = {'lh': adjust_hsv(lh_color, 0, -0.5, -0.),\n 'rh': adjust_hsv(rh_color, 0, -0.7, -0.)}\n else:\n color_sig = (0,) * 3\n color_ns = (.7,) * 3\n line_color_sig = {'lh': color_sig, 'rh': color_sig}\n line_color_ns = {'lh': color_ns, 'rh': color_ns}\n linestyle_ns = {'linewidth': 0.2, 'color': line_color_ns, 'alpha': 0.2}\n linestyle_sig = {'linewidth': 0.2, 'color': line_color_sig, 'alpha': 1.0}\n # layer-data\n axes = []\n for hemi in hemis:\n # z-order\n y_mag = y_magnitude.sub(source=hemi)\n z_order = dict(zip(y_mag.source, -y_mag.x.argsort()))\n # data\n layers = []\n for y, linestyle in ((y_ns, linestyle_ns), (y_sig, linestyle_sig)):\n kwargs = {'zorder': z_order, **linestyle}\n layers.append(DataLayer(y.sub(source=hemi), PlotType.LINE, kwargs))\n axes.append(AxisData(layers))\n bfly_data = PlotData(axes, ('time', 'source'), plot_names=hemis)\n else:\n raise RuntimeError(f\"kind={kind}\")\n return hemis, bfly_data, brain_data\n\n\ndef pop_if_dict(kwargs, key):\n \"Helper for artist-sepcific matplotlib kwargs\"\n if key in kwargs and isinstance(kwargs[key], dict):\n return kwargs.pop(key)\n\n\ndef set_dict_arg(key, arg, line_dim_obj, artists, legend_handles=None):\n \"Helper for artist-sepcific matplotlib kwargs\"\n set_attr_name = 'set_' + key\n for dim_index, value in arg.items():\n index = line_dim_obj._array_index(dim_index)\n if isinstance(index, int):\n key_artists = [artists[index]]\n else:\n key_artists = artists[index]\n\n if not key_artists:\n continue\n\n for artist in key_artists:\n getattr(artist, set_attr_name)(value)\n\n if legend_handles is not None:\n for artist in key_artists:\n artist.set_label(dim_index)\n legend_handles[dim_index] = artist\n\n\n_remap_args = {'c': 'color'}\n\n\ndef _dict_arg(arg: dict = None) -> dict:\n if arg is None:\n return {}\n elif any(k in arg for k in _remap_args):\n return {_remap_args.get(k, k): v for k, v in arg.items()}\n else:\n return arg\n\n\n@dataclass(eq=False)\nclass Layer:\n y: NDVar\n plot_type: PlotType = PlotType.GENERAL\n\n def bin(self, bin_length, tstart, tstop):\n raise NotImplementedError\n\n def sub_time(self, time: float, data_only: bool = False):\n raise NotImplementedError\n\n def for_plot(self, plot_type: PlotType) -> Iterator['DataLayer']:\n raise NotImplementedError\n\n\n@dataclass(eq=False)\nclass DataLayer(Layer):\n \"\"\"Data for one subplot layer\"\"\"\n _plot_args: dict = None\n _plot_args_2: dict = None # alternate (contour plot of IMAGE layer)\n _bin_func: callable = np.mean\n\n def __post_init__(self):\n self._plot_args = _dict_arg(self._plot_args)\n if self.plot_type == PlotType.IMAGE:\n self._plot_args_2 = _dict_arg(self._plot_args_2)\n elif self._plot_args_2:\n raise TypeError(f\"plot_args_2={self._plot_args_2!r} for {self.plot_type}\")\n\n def plot_args(self, kwargs: dict) -> dict:\n # needs to be a copy?\n return {**_dict_arg(kwargs), **self._plot_args}\n\n def contour_plot_args(self, contours):\n out = {}\n # contours arg\n meas = self.y.info.get('meas')\n if meas in contours:\n if contours[meas] is not None:\n out.update(contours[meas])\n # layer\n if self.plot_type == PlotType.IMAGE:\n out.update(self._plot_args_2)\n elif self.plot_type == PlotType.CONTOUR:\n out.update(self._plot_args)\n else:\n raise RuntimeError(f\"layer of type {self.plot_type}\")\n return out\n\n def im_plot_args(self, vlims: dict, cmaps: dict) -> dict:\n assert self.plot_type == PlotType.IMAGE\n meas = self.y.info.get('meas')\n if meas in cmaps:\n cmap = cmaps[meas]\n elif 'cmap' in self.y.info:\n cmap = self.y.info['cmap']\n else:\n cmap = DEFAULT_CMAPS.get(meas, 'xpolar')\n\n if meas in vlims:\n vmin, vmax = vlims[meas]\n else:\n vmin, vmax = find_vlim_args(self.y)\n vmin, vmax = fix_vlim_for_cmap(vmin, vmax, cmap)\n return {'cmap': cmap, 'vmin': vmin, 'vmax': vmax, **self._plot_args}\n\n def for_plot(self, plot_type: PlotType) -> Iterator['DataLayer']:\n if self.plot_type == plot_type:\n yield self\n elif not np.ma.isMaskedArray(self.y.x):\n yield DataLayer(self.y, plot_type, self._plot_args)\n elif plot_type == PlotType.LEGACY:\n yield DataLayer(self.y.unmask(), plot_type, self._plot_args)\n elif self.plot_type != PlotType.GENERAL:\n raise RuntimeError(f\"Invalid PlotData conversion: {self.plot_type} -> {plot_type}\")\n elif plot_type == PlotType.LINE:\n un_mask = NDVar(~self.y.x.mask, self.y.dims)\n # kwargs = {}\n if self.y.has_dim('time'):\n un_mask = erode(un_mask, 'time')\n # if self.y.ndim == 2:\n # mag = self.y.rms('time')\n # z_dim = mag.dimnames[0]\n # kwargs['zorder'] = dict(zip(mag.get_dim(z_dim), -mag.x.argsort()))\n y_masked = self.y.unmask().mask(un_mask)\n args_main = {'alpha': 1., 'zorder': 1}\n args_masked = {'alpha': 0.4, 'color': (.7, .7, .7), 'zorder': 0}\n for y, args in ((self.y, args_main), (y_masked, args_masked)):\n yield DataLayer(y, plot_type, args)\n elif plot_type == PlotType.IMAGE:\n x = NDVar(self.y.x.data, self.y.dims, self.y.name, self.y.info)\n yield DataLayer(x, PlotType.IMAGE)\n x = NDVar(1. - self.y.x.mask, self.y.dims, self.y.name, {'meas': 'mask'})\n yield DataLayer(x, PlotType.CONTOUR, {'levels': [0.5], 'colors': ['black']}, _bin_func=np.max)\n else:\n raise RuntimeError(f\"plot_type={plot_type!r}\")\n\n def sub_time(self, time: float, data_only: bool = False):\n y = self.y.sub(time=time)\n if data_only:\n return y\n else:\n return DataLayer(y, self.plot_type, self._plot_args, self._plot_args_2, self._bin_func)\n\n def bin(self, bin_length, tstart, tstop):\n y = self.y.bin(bin_length, tstart, tstop, self._bin_func)\n return replace(self, y=y)\n\n\n@dataclass(eq=False)\nclass StatLayer(Layer):\n style: Style = None\n ct: Celltable = None\n cell: CellArg = None\n mask: NDVar = None # mask needs to be applied to stats\n mask_missing: bool = True\n\n def _apply_mask(self, y: np.ndarray) -> np.ndarray:\n if self.mask is None:\n return y\n if np.ma.isMaskedArray(y):\n y = y.data\n return NDVar(y, self.y.dims[1:]).mask(self.mask, missing=self.mask_missing).x\n\n def get_statistic(self, func: Callable = np.mean) -> np.ndarray:\n return self._apply_mask(self.ct._get_func(self.cell, func))\n\n def get_dispersion(self, spec, pool) -> np.ndarray:\n return self._apply_mask(self.ct._get_dispersion(self.cell, spec, pool))\n\n def for_plot(self, plot_type: PlotType) -> Iterator['DataLayer']:\n if self.plot_type == plot_type:\n yield self\n elif self.mask is None:\n yield replace(self, plot_type=plot_type)\n elif plot_type == PlotType.LINE:\n inverse_mask = ~self.mask\n if self.y.has_dim('time'):\n inverse_mask = erode(inverse_mask, 'time')\n yield replace(self, plot_type=plot_type, style=self.style.masked_style, mask=inverse_mask, mask_missing=False)\n yield replace(self, plot_type=plot_type)\n else:\n raise NotImplementedError\n\n\n@dataclass(eq=False)\nclass AxisData:\n \"\"\"Represent one axis (multiple layers)\"\"\"\n layers: List[Layer]\n title: str = None\n\n def __iter__(self):\n return iter(self.layers)\n\n @property\n def y0(self):\n for layer in self.layers:\n return layer.y\n raise IndexError(\"No data\")\n\n def for_plot(self, plot_type: PlotType) -> 'AxisData':\n return replace(self, layers=[l for layer in self.layers for l in layer.for_plot(plot_type)])\n\n def bin(self, bin_length, tstart, tstop):\n return replace(self, layers=[l.bin(bin_length, tstart, tstop) for l in self.layers])\n\n def sub_time(self, time: float, data_only: bool = False):\n axis = []\n for layer in self.layers:\n if time in layer.y.time:\n axis.append(layer.sub_time(time, data_only))\n if data_only:\n return axis\n else:\n return replace(self, layers=axis)\n\n\ndef x_arg(x: CategorialArg):\n if isinstance(x, str) and x.startswith('.'):\n return None, x\n else:\n return x, None\n\n\ndef combine_x_args(x, xax):\n if x is None:\n return xax\n elif xax is None:\n return x\n elif not isinstance(xax, x.__class__):\n raise TypeError(f\"x={x}, xax={xax}: x and xax must be of same type or None\")\n elif isinstance(xax, str):\n return f\"({x}) % ({xax})\"\n else:\n return x % xax\n\n\n@dataclass(eq=False)\nclass PlotData:\n \"\"\"Organize nd-data for plotting\n\n Notes\n -----\n Two-stage initialization:\n\n - Initially independent of plot type\n - Layers contain masked NDVars\n\n - Converted into specific plot-types with :meth:`.for_plot` methods.\n - Masks converted into layers\n\n\n Additional consideration for statistics-plots (UTSStat)\n - Need central tendency and dispersion\n - Dispersion is not derivable from layer data (within-subject SEM)\n - Ideally potential to be dynamic (switch between viewing all data and statistic)\n\n Implementation\n - DataLayer subclass that keeps reference to a CellTable?\n\n \"\"\"\n plot_data: List[AxisData] # Data for each axis\n dims: Sequence[str] # Dimensions assigned to the axes\n frame_title: str = \"unnamed data\" # Default window title\n plot_names: List[Union[str, None]] = None # Titles for the plots (all non-None axes)\n plot_used: List[bool] = None # List indicating which plot slots are used\n plot_type: PlotType = PlotType.GENERAL\n ct: Celltable = None\n x: Union[Factor, Interaction] = None\n xax: Union[Factor, Interaction] = None\n\n def __post_init__(self):\n self.n_plots = len(self.plot_data)\n if self.plot_used is None:\n self.plot_used = [True] * self.n_plots\n else:\n assert sum(self.plot_used) == self.n_plots\n if self.plot_names is None:\n self.plot_names = []\n for layers in self.plot_data:\n for layer in layers:\n if layer.y.name:\n self.plot_names.append(layer.y.name)\n break\n else:\n self.plot_names.append(None)\n\n def __repr__(self):\n desc = [f'{self.n_plots} plots']\n if not all(self.plot_used):\n desc.append(f'{len(self.plot_used) - self.n_plots} empty')\n desc.append(' x '.join(self.dims))\n return f\"<PlotData {self.frame_title!r}: {', '.join(desc)}>\"\n\n def _cannot_skip_axes(self, parent):\n if not all(self.plot_used):\n raise NotImplementedError(f\"y can not contain None for {parent.__class__.__name__} plot\")\n\n def __iter__(self):\n return iter(self.plot_data)\n\n @classmethod\n def from_args(\n cls,\n y: Union[NDVarArg, Sequence[NDVarArg]],\n dims: Union[int, Tuple[Optional[str], ...]],\n xax: CategorialArg = None,\n ds: Dataset = None,\n sub: IndexArg = None,\n ):\n \"\"\"Unpack the first argument to top-level NDVar plotting functions\n\n Parameters\n ----------\n y\n the first argument.\n dims\n The dimensions needed for the plotting function. ``None`` to indicate\n arbitrary dimensions.\n xax\n A model to divide ``y`` into different axes. ``xax`` is currently\n applied on the first level, i.e., it assumes that ``y``'s first\n dimension is cases.\n ds\n Dataset containing data objects which are provided as :class:`str`.\n sub\n Index selecting a subset of cases.\n\n Notes\n -----\n Ndvar plotting functions above 1-d UTS level should support the following\n API:\n\n - simple NDVar: summary ``plot(meg)``\n - by dim: each case ``plot(meg, '.case')``\n - NDVar and xax argument: summary for each ``plot(meg, subject)\n - nested list of layers (e.g., ttest results: [c1, c0, [c1-c0, p]])\n \"\"\"\n if isinstance(y, cls):\n return y\n elif isinstance(y, AxisData):\n for layer in y.layers:\n dims = find_data_dims(layer.y, dims)\n return PlotData([y], dims)\n sub = assub(sub, ds)\n if hasattr(y, '_default_plot_obj'):\n ys = getattr(y, '_default_plot_obj')()\n else:\n ys = y\n\n if isinstance(ys, NDVarTypes):\n ys = (ys,)\n\n ax_names = None\n if xax is None:\n # y=[[y1], y2], xax=None\n axes = []\n for ax in ys:\n if ax is None:\n axes.append(None)\n elif isinstance(ax, NDVarTypes):\n ax = asndvar(ax, sub, ds)\n agg, dims = find_data_dims(ax, dims)\n layer = aggregate(ax, agg)\n axes.append([layer])\n else:\n layers = []\n for layer in ax:\n layer = asndvar(layer, sub, ds)\n agg, dims = find_data_dims(layer, dims)\n layers.append(aggregate(layer, agg))\n axes.append(layers)\n x_name = None\n # determine y names\n y_names = []\n for layers in axes:\n if layers is None:\n continue\n for layer in layers:\n if layer.name and layer.name not in y_names:\n y_names.append(layer.name)\n elif all(ax is None or isinstance(ax, NDVarTypes) for ax in ys):\n ys = [asndvar(layer, sub, ds) for layer in ys]\n y_names = [layer.name for layer in ys]\n layers = []\n if isinstance(xax, str) and xax.startswith('.'):\n # y=[y1, y2], xax='.dim'\n dimname, attr = re.match(r'\\.(\\w+)(?:\\.(\\w+))?$', xax).groups()\n xax_dim = indexes = dissolve_dim = None\n for layer in ys:\n dim = layer.get_dim(dimname)\n if xax_dim is None:\n xax_dim = dim\n if attr is None:\n indexes = dim\n dissolve_dim = dimname # dissolved by indexing\n # axis labels\n unit = f' {xax_dim._axis_unit}' if xax_dim._axis_unit else ''\n ax_names = [f'{v}{unit}' for v in xax_dim]\n else:\n f = getattr(dim, attr)\n if not isinstance(f, Factor):\n raise ValueError(f'xax={xax!r}')\n indexes = [f == cell for cell in f.cells]\n ax_names = f.cells\n elif dim != xax_dim:\n raise ValueError(f\"y={y}, xax={xax!r}: dimension not equal on different y\")\n agg, dims = find_data_dims(layer, dims, dissolve_dim)\n layers.append([aggregate(layer.sub(**{dimname: i}), agg) for i in indexes])\n x_name = xax\n else:\n # y=[y1, y2], xax=categorial\n xax = ascategorial(xax, sub, ds)\n xax_indexes = [xax == cell for cell in xax.cells]\n for layer in ys:\n agg, dims = find_data_dims(layer, dims)\n layers.append([aggregate(layer.sub(index), agg) for index in xax_indexes])\n x_name = xax.name\n ax_names = [cellname(cell) for cell in xax.cells]\n axes = list(zip(*layers))\n else:\n raise TypeError(f\"{y=}, {xax=}: y can't be nested list if xax is specified, use single list\")\n\n if len(y_names) == 0:\n y_name = None\n elif len(y_names) == 1:\n y_name = y_names[0]\n else:\n y_name = ', '.join(y_names)\n\n use_axes = [ax is not None for ax in axes]\n axes = [AxisData([DataLayer(l) for l in ax]) for ax in axes if ax]\n title = frame_title(y_name, x_name)\n return cls(axes, dims, title, ax_names, use_axes)\n\n @classmethod\n def from_stats(\n cls,\n y: Union[NDVarArg, Sequence[NDVarArg]],\n x: CategorialArg = None,\n xax: CategorialArg = None,\n match: CategorialArg = None,\n sub: IndexArg = None,\n ds: Dataset = None,\n dims: Tuple[Union[str, None]] = None,\n colors: dict = None,\n mask: Union[NDVar, Dict[CellArg, NDVar]] = None,\n ):\n if isinstance(y, (tuple, list)):\n if xax is not None:\n raise TypeError(f\"{y=}, {xax=}: xax cannot be specified with multiple y\")\n axes_data = [cls.from_stats(yi, x, xax, match, sub, ds, dims, colors, mask) for yi in y]\n axes = list(chain.from_iterable(ax.plot_data for ax in axes_data))\n return replace(axes_data[0], plot_data=axes, plot_used=None, plot_names=None)\n x, x_dim = x_arg(x)\n xax, xax_dim = x_arg(xax)\n if x_dim or xax_dim:\n if isinstance(y, NDVar):\n varname = Dataset.as_key(y.name)\n else:\n varname = y\n\n if x_dim:\n dim = x_dim[1:]\n ds = melt_ndvar(y, dim, ds=ds, varname=varname)\n y = varname\n x = combine_x_args(x, dim)\n\n if xax_dim:\n dim = xax_dim[1:]\n ds = melt_ndvar(y, dim, ds=ds, varname=varname)\n y = varname\n xax = combine_x_args(xax, dim)\n x_full = combine_x_args(x, xax)\n ct = Celltable(y, x_full, match, sub, ds=ds)\n # data dimensions\n agg, dims = find_data_dims(ct.y, dims, 'case')\n if agg:\n raise NotImplementedError\n # reconstruct x/xax\n if xax is None:\n ax_cells = [None]\n else:\n xax = ct._align(xax, ds=ds, coerce=ascategorial)\n ax_cells = xax.cells\n if x is not None:\n x = ct._align(x, ds=ds, coerce=ascategorial)\n title = frame_title(y, x, xax)\n # find styles\n styles = find_cell_styles(ct.cells, colors)\n # find masks\n if mask is None:\n masks = defaultdict(lambda: None)\n elif isinstance(mask, NDVar):\n masks = defaultdict(lambda: mask)\n elif isinstance(mask, dict):\n masks = defaultdict(lambda: None, **mask)\n else:\n raise TypeError(f\"{mask=}\")\n # assemble layers\n axes = []\n for ax_cell in ax_cells:\n if x is None:\n cells = [ax_cell]\n elif ax_cell is None:\n cells = x.cells\n else:\n x_cells = x.cells\n cells = [combine_cells(x_cell, ax_cell) for x_cell in x_cells]\n cells = [cell for cell in cells if cell in ct.data]\n layers = [StatLayer(ct.data[cell], style=styles[cell], ct=ct, cell=cell, mask=masks[cell]) for cell in cells]\n axes.append(AxisData(layers, cellname(ax_cell)))\n return cls(axes, dims, title, ct=ct, x=x, xax=xax)\n\n @classmethod\n def empty(cls, plots: Union[int, List[bool]], dims: Sequence[str], title: str):\n \"\"\"Empty PlotData object that can be filled by appending to layers\n\n Parameters\n ----------\n plots : int | list of bool\n Number of plots, or list of booleans indicating for each plot\n whether its slot is used.\n dims : sequence of str\n Names of the dimensions.\n title : str\n Data description for the plot frame.\n \"\"\"\n if isinstance(plots, int):\n plots = [AxisData([]) for _ in range(plots)]\n else:\n plots = [AxisData([]) if p else None for p in plots]\n return cls(plots, dims, title)\n\n @property\n def y0(self):\n for ax in self.plot_data:\n for layer in ax:\n return layer.y\n raise IndexError(\"No data\")\n\n @property\n def default_y_label(self):\n \"Y-label in case meas and unit are uninformative\"\n names = {l.y.name for ax in self.plot_data for l in ax}\n names.discard(None)\n if len(names) == 1:\n return names.pop()\n return None\n\n @property\n def meas(self):\n meass = {l.y.info.get('meas') for ax in self.plot_data for l in ax}\n meass.discard(None)\n if len(meass) == 1:\n return meass.pop()\n return None\n\n @property\n def unit(self):\n units = {l.y.info.get('unit') for ax in self.plot_data for l in ax}\n units.discard(None)\n if len(units) == 1:\n return units.pop()\n return None\n\n @cached_property\n def data(self):\n \"For backwards compatibility with nested list of NDVar\"\n data = self.for_plot(PlotType.LEGACY)\n return [[l.y for l in layers] for layers in data.plot_data]\n\n @cached_property\n def time_dim(self):\n \"UTS dimension to expose for time slicer\"\n time_dims = [l.y.get_dim('time') for ax in self.plot_data for l in ax.layers if l.y.has_dim('time')]\n if time_dims:\n return reduce(UTS._union, time_dims)\n\n def for_plot(self, plot_type: PlotType) -> 'PlotData':\n if self.plot_type == plot_type:\n return self\n plot_data = [ax.for_plot(plot_type) for ax in self.plot_data]\n return replace(self, plot_data=plot_data, plot_type=plot_type)\n\n def bin(self, bin_length, tstart, tstop):\n axes = [ax.bin(bin_length, tstart, tstop) for ax in self.plot_data]\n return PlotData(axes, self.dims, self.frame_title, self.plot_names, self.plot_used, self.plot_type)\n\n def sub_time(self, time: float, data_only: bool = False):\n axes = [ax.sub_time(time, data_only) for ax in self.plot_data]\n if data_only:\n return axes\n else:\n dims = [dim for dim in self.dims if dim != 'time']\n return PlotData(axes, dims, self.frame_title, self.plot_names, self.plot_used, self.plot_type)\n\n\ndef aggregate(y, agg):\n return y if agg is None else y.mean(agg)\n\n\nclass FigureFrame:\n\n def __init__(self, figure):\n self.figure = figure\n self.canvas = self.figure.canvas\n self._background = None\n\n def Close(self):\n pass\n\n def SetStatusText(self, text):\n pass\n\n def Show(self):\n pass\n\n def redraw(self, axes=(), artists=()):\n \"Adapted duplicate of mpl_canvas.FigureCanvasPanel\"\n self.canvas.restore_region(self._background)\n for ax in axes:\n ax.draw_artist(ax)\n extent = ax.get_window_extent()\n self.canvas.blit(extent)\n for artist in artists:\n artist.axes.draw_artist(artist.axes)\n extent = artist.axes.get_window_extent()\n self.canvas.blit(extent)\n\n def store_canvas(self):\n self._background = self.canvas.copy_from_bbox(self.figure.bbox)\n\n\nclass MatplotlibFrame(FigureFrame):\n \"Cf. _wxgui.mpl_canvas\"\n def __init__(self, **fig_kwargs):\n \"Create self.figure and self.canvas attributes and return the figure\"\n from matplotlib import pyplot\n\n figure = pyplot.figure(**fig_kwargs)\n FigureFrame.__init__(self, figure)\n self._plt = pyplot\n\n def Close(self):\n self._plt.close(self.figure)\n\n def Show(self):\n if mpl.get_backend() == 'WXAgg' and do_autorun():\n self._plt.show()\n\n\ndef frame_title(y, x=None, xax=None):\n \"\"\"Generate frame title from common data structure\n\n Parameters\n ----------\n y : data-obj | str\n Dependent variable.\n x : data-obj | str\n Predictor.\n xax : data-obj | str\n Grouping variable for axes.\n \"\"\"\n if isdataobject(y):\n y = y.name\n if isdataobject(x):\n x = x.name\n if isdataobject(xax):\n xax = xax.name\n\n if xax is None:\n if x is None:\n return \"%s\" % (y,)\n else:\n return \"%s ~ %s\" % (y, x)\n elif x is None:\n return \"%s | %s\" % (y, xax)\n else:\n return \"%s ~ %s | %s\" % (y, x, xax)\n\n\nclass MatplotlibFigure:\n \"\"\"Wrap a matplotlib figure for FMText\"\"\"\n def __init__(self, figure):\n self.figure = figure\n\n def _asfmtext(\n self,\n rasterize: bool = None,\n close_figures: bool = None,\n ):\n if rasterize is None:\n format = None\n elif rasterize:\n format = 'png'\n else:\n format = 'svg'\n return self.image(format=format, close=close_figures)\n\n def close(self):\n from matplotlib import pyplot\n pyplot.close(self.figure)\n\n def image(self, name: str = None, format: str = None, close: bool = None):\n \"\"\"Create FMTXT Image from the figure\n\n Parameters\n ----------\n name\n Name for the file (without extension; default is 'image').\n format\n File format. For HTML, use ``svg`` for vector graphics and ``png``\n for pixel graphics. The default is ``svg`` and can be changed with\n :func:`configure`).\n close\n Close the figure after writing to the ``image``. By default, this is\n ``True`` when in an inline context (Jupyter notebook), ``False``\n otherwise).\n\n Returns\n -------\n image : fmtxt.Image\n Image FMTXT object.\n \"\"\"\n if format is None:\n format = CONFIG['format']\n image = Image(name, format)\n self.figure.savefig(image, format=format)\n if close or (close is None and use_inline_backend()):\n self.close()\n return image\n\n\nclass EelFigure(MatplotlibFigure):\n \"\"\"Parent class for Eelbrain figures.\n\n In order to subclass:\n\n - find desired figure properties and then use them to initialize\n the _EelFigure superclass; then use the\n :py:attr:`_EelFigure.figure` and :py:attr:`_EelFigure.canvas` attributes.\n - end the initialization by calling `_EelFigure._show()`\n - add the :py:meth:`_fill_toolbar` method\n \"\"\"\n _default_xlabel_ax = -1\n _default_ylabel_ax = 0\n _make_axes = True\n _can_set_time = False\n _can_set_vlim = False\n _can_set_ylim = False\n _can_set_xlim = False\n _has_frame = False\n\n def __init__(self, data_desc: Optional[str], layout: BaseLayout):\n \"\"\"Parent class for Eelbrain figures.\n\n Parameters\n ----------\n data_desc\n Data description for frame title.\n layout\n Layout that determines figure dimensions.\n \"\"\"\n name = self.__class__.__name__\n desc = layout.name or data_desc\n self._title = f'{name}: {desc}' if desc else name\n\n # Use Eelbrain frame or pyplot\n if layout.user_axes:\n ax = layout.user_axes[0]\n frame = FigureFrame(ax.get_figure())\n elif CONFIG['eelbrain'] and not use_inline_backend():\n from .._wxgui import wx, get_app\n from .._wxgui.mpl_canvas import CanvasFrame\n get_app()\n pos = wx.DefaultPosition if layout.pos is None else layout.pos\n frame = CanvasFrame(title=self._title, eelfigure=self, pos=pos, **layout.fig_kwa())\n self._has_frame = True\n else:\n frame = MatplotlibFrame(**layout.fig_kwa())\n\n figure = frame.figure\n if layout.title:\n self._figtitle = figure.suptitle(layout.title)\n else:\n self._figtitle = None\n\n # make axes\n if self._make_axes:\n axes = layout.make_axes(figure)\n else:\n axes = []\n\n # store attributes\n MatplotlibFigure.__init__(self, figure)\n self._frame = frame\n self.axes = axes\n self.canvas = frame.canvas\n self._layout = layout\n self._last_draw_time = 1.\n self.__callback_key_press = {}\n self.__callback_key_release = {}\n\n # containers for hooks\n self._draw_hooks = []\n self._untight_draw_hooks = []\n\n # options\n self._draw_crosshairs = False\n self._crosshair_lines = None\n self._crosshair_axes = None\n\n # add callbacks\n self.canvas.mpl_connect('motion_notify_event', self._on_motion)\n self.canvas.mpl_connect('axes_leave_event', self._on_leave_axes)\n self.canvas.mpl_connect('resize_event', self._on_resize)\n self.canvas.mpl_connect('key_press_event', self._on_key_press)\n self.canvas.mpl_connect('key_release_event', self._on_key_release)\n\n def __repr__(self):\n title = self._frame.GetTitle() if self._has_frame else self._title\n return f'<{title}>'\n\n def _ipython_display_(self):\n from IPython.display import display\n display(self.figure)\n\n def _set_axtitle(\n self,\n axtitle: Union[bool, str, Iterator[str]] = None,\n data: PlotData = None,\n axes: Union[List[matplotlib.axes.Axes], int] = None,\n names: Sequence[str] = None,\n **kwargs,\n ):\n \"\"\"Set axes titles automatically\n\n Parameters\n ----------\n axtitle\n Plot parameter.\n data\n Plotted data (if available).\n axes\n Axes for which to set title (default is self.axes). If an int,\n (n axes) the method does not set axes title but returns ``None``\n or a tuple of titles.\n names\n Instead of using ``epochs`` name attributes, use these names.\n ...\n Matplotlib ``Axes.set_title()`` parameters.\n \"\"\"\n if axtitle is False or axtitle is None:\n return\n\n if axes is None:\n axes = self.axes\n\n naxes = axes if isinstance(axes, int) else len(axes)\n\n if axtitle is True and naxes == 1:\n return\n elif axtitle is True or isinstance(axtitle, str):\n if names is None:\n if data is None:\n raise RuntimeError(f\"data=None and names=None with {axtitle=}\")\n names = data.plot_names\n\n if axtitle is True:\n axtitle = names\n else:\n axtitle = [axtitle.format(name=n) if n else None for n in names]\n\n if isinstance(axes, int):\n return axtitle\n\n for title, ax in zip(axtitle, axes):\n if title is not None:\n ax.set_title(asfmtext(title), **kwargs)\n\n def _show(self, crosshair_axes=None):\n if self._layout.user_axes:\n return\n if self._layout.tight:\n self._tight()\n\n if crosshair_axes is None:\n self._crosshair_axes = self.axes\n else:\n self._crosshair_axes = crosshair_axes\n\n self.draw()\n\n # Allow hooks to modify figure after first draw\n need_redraw = any([func() for func in self._draw_hooks])\n if not self._layout.tight:\n need_redraw = any([func() for func in self._untight_draw_hooks]) or need_redraw\n if need_redraw:\n self.draw()\n\n if CONFIG['show'] and self._layout.show:\n self._frame.Show()\n if self._has_frame and do_autorun(self._layout.run):\n from .._wxgui import run\n run()\n\n if self._has_frame and not self.canvas._background:\n self._frame.store_canvas()\n\n def _tight(self):\n \"Default implementation based on matplotlib\"\n try:\n self.figure.tight_layout()\n except ValueError as exception:\n getLogger('eelbrain').debug('tight-layout: %s', exception)\n\n if self._figtitle:\n trans = self.figure.transFigure.inverted()\n extent = self._figtitle.get_window_extent(self.figure.canvas.renderer)\n bbox = trans.transform(extent)\n t_bottom = bbox[0, 1]\n self.figure.subplots_adjust(top=1 - 2 * (1 - t_bottom))\n\n def _on_key_press(self, event):\n if event.key in self.__callback_key_press:\n self.__callback_key_press[event.key](event)\n event.guiEvent.Skip(False) # Matplotlib Skip()s all events\n\n def _on_key_release(self, event):\n if event.key in self.__callback_key_release:\n self.__callback_key_release[event.key](event)\n event.guiEvent.Skip(False)\n\n def _on_leave_axes(self, event):\n \"Update the status bar when the cursor leaves axes\"\n if self._frame is None:\n return\n self._frame.SetStatusText(self._on_leave_axes_status_text(event))\n if self._draw_crosshairs:\n self._remove_crosshairs(True)\n\n def _on_leave_axes_status_text(self, event):\n return '☺︎'\n\n def _on_motion(self, event):\n \"Update the status bar for mouse movement\"\n if self._frame is None:\n return\n redraw_axes = self._on_motion_sub(event)\n ax = event.inaxes\n # draw crosshairs\n if self._draw_crosshairs and ax in self._crosshair_axes:\n if self._crosshair_lines is None:\n self._crosshair_lines = tuple(\n (ax.axhline(event.ydata, color='k'),\n ax.axvline(event.xdata, color='k'))\n for ax in self._crosshair_axes)\n else:\n for hline, vline in self._crosshair_lines:\n hline.set_ydata([event.ydata, event.ydata])\n vline.set_xdata([event.xdata, event.xdata])\n redraw_axes.update(self._crosshair_axes)\n # update status bar\n self._frame.SetStatusText(self._on_motion_status_text(event))\n # redraw\n self.canvas.redraw(redraw_axes)\n\n @staticmethod\n def _on_motion_status_text(event):\n ax = event.inaxes\n if ax:\n return ('x = %s, y = %s' % (\n ax.xaxis.get_major_formatter().format_data_short(event.xdata),\n ax.yaxis.get_major_formatter().format_data_short(event.ydata)))\n return ''\n\n def _on_motion_sub(self, event):\n \"Subclass action on mouse motion, return set of axes to redraw\"\n return set()\n\n def _on_resize(self, event):\n if self._layout.tight:\n self._tight()\n\n def _register_key(self, key, press=None, release=None):\n if press:\n if key in self.__callback_key_press:\n raise RuntimeError(\"Attempting to assign key press %r twice\" %\n key)\n self.__callback_key_press[key] = press\n if release:\n if key in self.__callback_key_release:\n raise RuntimeError(\"Attempting to assign key release %r twice\" %\n key)\n self.__callback_key_release[key] = release\n\n def _remove_crosshairs(self, draw=False):\n if self._crosshair_lines is not None:\n for hline, vline in self._crosshair_lines:\n hline.remove()\n vline.remove()\n self._crosshair_lines = None\n if draw:\n self.canvas.redraw(self._crosshair_axes)\n\n def _fill_toolbar(self, tb):\n \"\"\"\n Add toolbar tools\n\n Subclasses should add their toolbar items in this function which\n is called by ``CanvasFrame.FillToolBar()``.\n \"\"\"\n pass\n\n def close(self):\n \"Close the figure.\"\n self._frame.Close()\n\n def _get_axes(self, axes):\n \"Iterate over axes corresponding to ``axes`` parameter\"\n if axes is None:\n return self.axes\n elif isinstance(axes, int):\n return self.axes[axes],\n else:\n return (self.axes[i] for i in axes)\n\n def _configure_axis(\n self,\n axis: str, # 'x' | 'y'\n ticklabels: Union[str, int, Sequence[int]], # where to show tick-labels\n params: Iterable, # (formatter, locator, label) for each Axes\n axes: List[matplotlib.axes.Axes] = None, # axes which to format\n ):\n if axes is None:\n axes = self.axes\n\n # find axes with tick-labels\n nax = len(axes)\n if isinstance(ticklabels, bool):\n show_ticklabels = [ticklabels] * nax\n elif isinstance(ticklabels, str):\n if ticklabels == 'bottom':\n if all(isinstance(ax, matplotlib.axes.SubplotBase) for ax in axes):\n subplotspecs = [ax.get_subplotspec() for ax in axes]\n bottom = min([spec.rowspan.stop for spec in subplotspecs])\n show_ticklabels = [spec.rowspan.stop == bottom for spec in subplotspecs]\n else:\n first = len(axes) - min(self._layout.ncol, nax)\n show_ticklabels = [i >= first for i in range(len(axes))]\n elif ticklabels == 'left':\n if all(isinstance(ax, matplotlib.axes.SubplotBase) for ax in axes):\n subplotspecs = [ax.get_subplotspec() for ax in axes]\n left = min([spec.colspan.start for spec in subplotspecs])\n show_ticklabels = [spec.colspan.start == left for spec in subplotspecs]\n else:\n ncol = self._layout.ncol or nax\n show_ticklabels = [i % ncol == 0 for i in range(len(axes))]\n elif ticklabels == 'all':\n show_ticklabels = [True] * nax\n elif ticklabels == 'none':\n show_ticklabels = [False] * nax\n else:\n raise ValueError(f\"ticklabels={ticklabels!r}\")\n else:\n show_ticklabels = [False] * nax\n if isinstance(ticklabels, int):\n show_ticklabels[ticklabels] = True\n else:\n for i in ticklabels:\n show_ticklabels[i] = True\n\n # parameter for hiding tick-labels\n if axis == 'y':\n tick_params = {'labelleft': False}\n else:\n tick_params = {'labelbottom': False}\n\n # format ticks\n labels = []\n for ax, (formatter, locator, label_), show_ticklabels_ in zip(axes, params, show_ticklabels):\n axis_ = ax.yaxis if axis == 'y' else ax.xaxis\n if locator:\n axis_.set_major_locator(locator)\n if formatter:\n axis_.set_major_formatter(formatter)\n if not show_ticklabels_:\n ax.tick_params(**tick_params)\n labels.append(label_)\n\n # set labels\n if any(labels):\n if len(set(labels)) == 1:\n # default positioning\n if axis == 'y':\n self.set_ylabel(labels[0])\n else:\n self.set_xlabel(labels[0])\n else:\n for ax, label in zip(axes, labels):\n if label:\n if axis == 'y':\n ax.set_ylabel(label)\n else:\n ax.set_xlabel(label)\n\n def _configure_axis_data(\n self,\n axis: str, # 'x' | 'y'\n data: NDVar, # data for default label\n label: Union[bool, str], # override label\n ticklabels: Union[str, int, Sequence[int]] = True, # where to show tick-labels\n axes: List[matplotlib.axes.Axes] = None, # axes which to format\n ):\n \"Configure an axis based on data\"\n scale = AxisScale(data, label)\n formatters = repeat(scale.formatter)\n if not isinstance(scale.label, str) and isinstance(scale.label, Iterable):\n labels = chain(scale.label, repeat(None))\n else:\n labels = repeat(scale.label)\n params = zip(formatters, repeat(None), labels)\n self._configure_axis(axis, ticklabels, params, axes)\n\n def _configure_axis_dim(\n self,\n axis: str, # 'x' | 'y'\n dim: Union[str, Dimension], # The dimension assigned to the axis\n label: Union[bool, str], # axis labale\n ticklabels: Union[str, int, Sequence[int]], # where to show tick-labels\n axes: List[matplotlib.axes.Axes] = None, # axes which to format\n scalar: bool = True,\n data: List = None,\n ):\n \"Configure an axis based on a dimension\"\n # Dimension objects\n if isinstance(dim, str):\n dims = [layers[0].get_dim(dim) for layers in data]\n else:\n dims = repeat(dim)\n params = (dim._axis_format(scalar, label) for dim in dims)\n self._configure_axis(axis, ticklabels, params, axes)\n\n def draw(self):\n \"(Re-)draw the figure (after making manual changes).\"\n if self._frame is None:\n return\n t0 = time.time()\n self._frame.canvas.draw()\n self._last_draw_time = time.time() - t0\n\n def draw_crosshairs(self, enable=True):\n \"\"\"Draw crosshairs under the cursor\n\n Parameters\n ----------\n enable : bool\n Enable drawing crosshairs (default True, set to False to disable).\n \"\"\"\n self._draw_crosshairs = enable\n if not enable:\n self._remove_crosshairs(True)\n\n def draw_outline(self, color='k', **kwargs):\n \"\"\"Draw the outline of the figure\n\n Mainly for fine-tuning the figure layout in Jupyter, which crops\n the display area based on figure elements rather than actual figure size.\n \"\"\"\n kwargs.setdefault('fc', 'none')\n artist = matplotlib.patches.Rectangle((0, 0), 1, 1, ec=color, **kwargs)\n self.figure.add_artist(artist)\n\n def save(self, *args, **kwargs):\n \"Short-cut for Matplotlib's :meth:`~matplotlib.figure.Figure.savefig()`\"\n self.figure.savefig(*args, **kwargs)\n\n def add_hline(self, y, axes=None, *args, **kwargs):\n \"\"\"Draw a horizontal line on one or more axes\n\n Parameters\n ----------\n y : scalar\n Level at which to draw the line.\n axes : int | list of int\n Which axes to mark (default is all axes).\n ...\n :meth:`matplotlib.axes.Axes.axhline` parameters.\n\n\n Notes\n -----\n See Matplotlib's :meth:`matplotlib.axes.Axes.axhline` for more\n arguments.\n \"\"\"\n for ax in self._get_axes(axes):\n ax.axhline(y, *args, **kwargs)\n self.draw()\n\n def add_hspan(self, bottom, top, axes=None, *args, **kwargs):\n \"\"\"Draw a horizontal bar on one or more axes\n\n Parameters\n ----------\n bottom : scalar\n Bottom end of the horizontal bar.\n top : scalar\n Top end of the horizontal bar.\n axes : int | list of int\n Which axes to mark (default is all axes).\n ...\n :meth:`matplotlib.axes.Axes.axvspan` parameters.\n\n\n Notes\n -----\n See Matplotlib's :meth:`matplotlib.axes.Axes.axhspan` for more\n arguments.\n \"\"\"\n for ax in self._get_axes(axes):\n ax.axhspan(bottom, top, *args, **kwargs)\n self.draw()\n\n def add_vline(self, x, axes=None, *args, **kwargs):\n \"\"\"Draw a vertical line on one or more axes\n\n Parameters\n ----------\n x : scalar\n Value at which to place the vertical line.\n axes : int | list of int\n Which axes to mark (default is all axes).\n ...\n :meth:`matplotlib.axes.Axes.axvspan` parameters.\n\n\n Notes\n -----\n See Matplotlib's :meth:`matplotlib.axes.Axes.axvline` for more\n arguments.\n \"\"\"\n for ax in self._get_axes(axes):\n ax.axvline(x, *args, **kwargs)\n self.draw()\n\n def add_vspan(self, xmin, xmax, axes=None, *args, **kwargs):\n \"\"\"Draw a vertical bar on one or more axes\n\n Parameters\n ----------\n xmin : scalar\n Start value on the x-axis.\n xmax : scalar\n Last value on the x-axis.\n axes : int | list of int\n Which axes to mark (default is all axes).\n ...\n :meth:`matplotlib.axes.Axes.axvspan` parameters.\n\n\n Notes\n -----\n See Matplotlib's :meth:`matplotlib.axes.Axes.axvspan` for more\n arguments.\n \"\"\"\n for ax in self._get_axes(axes):\n ax.axvspan(xmin, xmax, *args, **kwargs)\n self.draw()\n\n def set_name(self, name):\n \"\"\"Set the figure window title\"\"\"\n plot_name = self.__class__.__name__\n self._frame.SetTitle(f'{plot_name}: {name}' if name else plot_name)\n\n def set_xtick_rotation(self, rotation):\n \"\"\"Rotate every x-axis tick-label by an angle (counterclockwise, in degrees)\n\n Parameters\n ----------\n rotation : scalar\n Counterclockwise rotation angle, in degrees.\n \"\"\"\n for ax in self.axes:\n for t in ax.get_xticklabels():\n t.set_rotation(rotation)\n self.draw()\n\n def set_xlabel(self, label: str, ax: int = None):\n \"\"\"Set the label for the x-axis\n\n Parameters\n ----------\n label\n X-axis label.\n ax\n Axis on which to set the label (default is usually the last axis).\n \"\"\"\n if ax is None:\n ax = self._default_xlabel_ax\n self.axes[ax].set_xlabel(label)\n\n def set_ylabel(self, label: str, ax: int = None):\n \"\"\"Set the label for the y-axis\n\n Parameters\n ----------\n label\n Y-axis label.\n ax\n Axis on which to set the label (default is usually the first axis).\n \"\"\"\n if ax is None:\n ax = self._default_ylabel_ax\n self.axes[ax].set_ylabel(label)\n\n\ndef format_axes(\n ax: mpl.axes.Axes,\n frame: Union[bool, str],\n yaxis: bool,\n):\n if frame == 't':\n ax.tick_params(direction='inout', bottom=False, top=True,\n left=False, right=True, labelbottom=True,\n labeltop=False, labelleft=True,\n labelright=False)\n ax.spines['right'].set_position('zero')\n ax.spines['left'].set_visible(False)\n ax.spines['top'].set_position('zero')\n ax.spines['bottom'].set_visible(False)\n elif frame == 'none':\n for spine in ax.spines.values():\n spine.set_visible(False)\n elif frame == 'off':\n ax.axis('off')\n elif not frame:\n ax.yaxis.set_ticks_position('left')\n ax.spines['right'].set_visible(False)\n ax.xaxis.set_ticks_position('bottom')\n ax.spines['top'].set_visible(False)\n\n if not yaxis:\n ax.yaxis.set_ticks(())\n ax.spines['left'].set_visible(False)\n\n\nclass BaseLayout:\n def __init__(\n self,\n h: float,\n w: float,\n dpi: float,\n tight: bool,\n show: bool,\n run: bool,\n title: FMTextArg = None,\n autoscale: bool = False,\n name: str = None,\n right_of: Union[EelFigure, int] = None,\n below: Union[EelFigure, int] = None,\n axes: Union[matplotlib.axes.Axes, List[matplotlib.axes.Axes]] = None,\n ):\n self.h = h\n self.w = w\n self.dpi = dpi or mpl.rcParams['figure.dpi']\n self.tight = tight\n self.show = show\n self.run = run\n self.autoscale = autoscale\n self.title = asfmtext_or_none(title)\n self.name = asfmtext_or_none(name) or title\n if isinstance(axes, matplotlib.axes.Axes):\n axes = [axes]\n elif axes is not None:\n axes = list(axes)\n self.user_axes = axes\n\n x = y = None\n if hasattr(right_of, '_frame'):\n rect = right_of._frame.GetRect()\n x = rect.GetRight() + 1\n if below is None:\n y = rect.GetTop()\n elif isinstance(right_of, int):\n x = right_of\n elif right_of is not None:\n raise TypeError(f\"{right_of=}\")\n\n if hasattr(below, '_frame'):\n rect = below._frame.GetRect()\n y = rect.GetBottom() + 1\n if x is None:\n x = rect.GetLeft()\n elif isinstance(below, int):\n y = below\n elif below is not None:\n raise TypeError(f\"{below=}\")\n\n if x is None and y is None:\n self.pos = None\n else:\n if x is None:\n x = -1\n if y is None:\n y = -1\n self.pos = (x, y)\n\n def fig_kwa(self):\n out = {'figsize': (self.w, self.h), 'dpi': self.dpi}\n if CONFIG['figure_background'] is not False:\n out['facecolor'] = CONFIG['figure_background']\n return out\n\n def make_axes(self, figure):\n if self.user_axes:\n axes = self.user_axes\n else:\n axes = self._make_axes(figure)\n self._configure_axes(axes)\n return axes\n\n def _make_axes(self, figure):\n raise NotImplementedError\n\n def _configure_axes(self, axes):\n raise NotImplementedError\n\n\ndef resolve_plot_rect(w, h, dpi):\n # infer figure dimensions from screen size\n w_applies = w is not None and w <= 0\n h_applies = h is not None and h <= 0\n if w_applies or h_applies:\n from .._wxgui import wx, get_app\n\n get_app()\n effective_dpi = dpi or mpl.rcParams['figure.dpi']\n display_w, display_h = wx.GetDisplaySize()\n if h_applies:\n effective_display_h = display_h - 50\n h = effective_display_h / effective_dpi + h\n if w_applies:\n w = display_w / effective_dpi + w\n return w, h\n\n\nclass LayoutDim:\n \"Helper function to determine figure spacing\"\n _properties = ('total', 'ax', 'first', 'last', 'space')\n _equations = dict(\n total='first + n_ax * ax + (n_ax - 1) * space + last',\n ax='(total - first - last - (n_ax - 1) * space) / n_ax',\n first='total - n_ax * ax - (n_ax - 1) * space - last',\n last='total - first - n_ax * ax - (n_ax - 1) * space',\n space='(total - first - n_ax * ax - last) / (n_ax - 1)',\n )\n\n def __init__(self, n_ax, total, ax, first, space, last, ax_default, first_default, space_default, last_default):\n if space is None and n_ax == 1:\n space = 0.\n values = {'total': total, 'first': first, 'space': space, 'last': last, 'ax': ax, 'n_ax': n_ax}\n defaults = {'first': first_default, 'space': space_default, 'last': last_default, 'ax': ax_default}\n for i, p in enumerate(self._properties):\n if values[p] is None:\n for p2 in self._properties[i + 1:]:\n if values[p2] is None:\n values[p2] = defaults[p2]\n values[p] = eval(self._equations[p], values)\n break\n\n self.total = values['total']\n self.ax = values['ax']\n self.first = values['first']\n self.space = values['space']\n self.last = values['last']\n\n\nclass Layout(BaseLayout):\n \"\"\"Layout for figures with several axes of the same size\"\"\"\n _default_margins = {'left': 0.4, 'bottom': 0.5, 'right': 0.05, 'top': 0.05, 'wspace': 0.1, 'hspace': 0.1}\n\n def __init__(\n self,\n nax: Union[int, List[bool]],\n ax_aspect: float, # width / height\n axh_default: float,\n tight: bool = True,\n title: str = None,\n h: float = None,\n w: float = None,\n axh: float = None,\n axw: float = None,\n nrow: int = None,\n ncol: int = None,\n dpi: float = None,\n margins: Dict[str, float] = None,\n show: bool = True,\n run: bool = None,\n frame: Union[bool, str] = True,\n yaxis=True,\n share_axes: bool = False,\n **kwargs):\n \"\"\"Create a grid of axes based on variable parameters.\n\n Parameters\n ----------\n nax\n Number of axes required. If provided as a list, axes are only added\n for items where ``item`` is True.\n ax_aspect\n Width / height aspect of the axes.\n axh_default\n The default axes height if it can not be determined from the other\n parameters.\n tight\n Rescale axes so that the space in the figure is used optimally\n (default True).\n title : str\n Figure title.\n h\n Height of the figure.\n w\n Width of the figure.\n axh\n Height of the axes.\n axw\n Width of the axes.\n nrow\n Set a limit to the number of rows (default is no limit).\n ncol\n Set a limit to the number of columns (defaut is no limit). If\n neither nrow or ncol is specified, a square layout is preferred.\n dpi\n DPI for the figure (default is to use matplotlib rc parameters).\n margins\n Absolute subplot parameters (in inches). Implies ``tight=False``. \n If ``margins`` is specified, ``axw`` and ``axh`` are interpreted \n exclusive of the margins, i.e., ``axh=2, margins={'top': .5}`` for\n a plot with one axes will result in a total height of 2.5.\n show\n Show the figure in the GUI (default True). Use False for creating\n figures and saving them without displaying them on the screen.\n run\n Run the Eelbrain GUI app (default is True for interactive plotting and\n False in scripts).\n frame : bool | 't' | 'none'\n Draw frame around axes: \n - True: all four spines\n - False: only spines with ticks\n - 't': spines at x=0 and y=0\n - 'none': no spines at all\n \"\"\"\n if h and axh:\n if h < axh:\n raise ValueError(\"h < axh\")\n if w and axw:\n if w < axw:\n raise ValueError(\"w < axw\")\n w, h = resolve_plot_rect(w, h, dpi)\n\n self.h_fixed = h\n self.w_fixed = w if w is not None else axw\n self._margins_arg = margins\n\n if margins is True:\n use_margins = True\n tight = False\n margins = self._default_margins.copy()\n elif margins is not None:\n use_margins = True\n tight = False\n margins = dict(margins)\n invalid = set(margins).difference(self._default_margins)\n if invalid:\n raise ValueError(f\"{margins=}: Unknown keys {invalid}\")\n else:\n margins = {k: 0 for k in self._default_margins}\n use_margins = False\n\n h_is_implicit = h is None\n w_is_implicit = w is None\n\n if nax is None:\n axes = None\n elif isinstance(nax, int):\n axes = list(range(nax))\n elif isinstance(nax, (list, tuple)):\n axes = [i for i, ax in enumerate(nax) if ax]\n nax = len(nax)\n else:\n raise TypeError(\"nax=%r\" % (nax,))\n\n trim = None\n if not nax:\n if w is None:\n if h is None:\n h = axh_default\n w = ax_aspect * h\n elif h is None:\n h = w / ax_aspect\n elif nax == 1:\n ncol = ncol or 1\n nrow = nrow or 1\n elif nrow is None and ncol is None:\n if w and axw:\n trim = 'row'\n ncol = math.floor(w / axw)\n elif h and axh:\n trim = 'col'\n nrow = math.floor(h / axh)\n elif w:\n trim = 'row'\n if axh:\n ncol = round(w / (axh * ax_aspect))\n else:\n ncol = round(w / (axh_default * ax_aspect))\n ncol = max(1, min(nax, ncol))\n elif h:\n trim = 'col'\n if axw:\n nrow = round(h / (axw / ax_aspect))\n else:\n nrow = round(h / axh_default)\n nrow = max(1, min(nax, nrow))\n elif axh or axw:\n trim = 'row'\n if not axh:\n axh = axw / ax_aspect\n nrow = min(nax, math.floor(defaults['maxh'] / axh))\n else:\n trim = 'row'\n # default: minimum number of columns (max number of rows)\n hspace = margins.get('hspace', 0)\n maxh = defaults['maxh'] - margins.get('top', 0) - margins.get('bottom', 0) + hspace\n axh_with_space = axh_default + hspace\n nrow = min(nax, math.floor(maxh / axh_with_space))\n ncol = math.ceil(nax / nrow)\n # test width\n wspace = margins.get('wspace', 0)\n maxw = defaults['maxw'] - margins.get('left', 0) - margins.get('right', 0) + wspace\n axw_with_space = axh_default * ax_aspect + wspace\n if ncol * axw_with_space > maxw:\n # nrow/ncol proportional to (maxh / axh) / (maxw / axw)\n ratio = (maxh / axh_with_space) / (maxw / axw_with_space)\n # nax = ncol * (ncol * ratio)\n # ncol = sqrt(nax / ratio)\n ncol = math.floor(math.sqrt(nax / ratio))\n nrow = math.ceil(nax / ncol)\n axh = (maxh - nrow * hspace) / nrow\n axw = axh * ax_aspect\n\n if nax:\n if nrow is None:\n nrow = math.ceil(nax / ncol)\n elif ncol is None:\n ncol = math.ceil(nax / nrow)\n\n if trim == 'row':\n if (nrow * ncol) - nax >= ncol:\n nrow -= 1\n elif trim == 'col':\n if (nrow * ncol) - nax >= nrow:\n nrow -= 1\n\n if axw:\n axh_default = axw / ax_aspect\n elif w:\n axh_default = w / ncol / ax_aspect\n h_dim = LayoutDim(nrow, h, axh, margins.get('top'), margins.get('hspace'), margins.get('bottom'), axh_default, self._default_margins['top'], self._default_margins['hspace'], self._default_margins['bottom'])\n w_dim = LayoutDim(ncol, w, axw, margins.get('left'), margins.get('wspace'), margins.get('right'), h_dim.ax * ax_aspect, self._default_margins['left'], self._default_margins['wspace'], self._default_margins['right'])\n h = h_dim.total\n w = w_dim.total\n axh = h_dim.ax\n axw = w_dim.ax\n margins = {\n 'top': h_dim.first, 'bottom': h_dim.last, 'hspace': h_dim.space,\n 'left': w_dim.first, 'right': w_dim.last, 'wspace': w_dim.space}\n h_is_implicit = w_is_implicit = False\n\n if h_is_implicit:\n hspace = 0 if nrow is None else margins['hspace'] * (nrow - 1)\n h += margins['bottom'] + hspace + margins['top']\n if w_is_implicit:\n wspace = 0 if ncol is None else margins['wspace'] * (ncol - 1)\n w += margins['left'] + wspace + margins['right']\n\n BaseLayout.__init__(self, h, w, dpi, tight, show, run, title, **kwargs)\n self.nax = nax\n self.axes = axes\n self.axh = axh\n self.axw = axw\n self.nrow = nrow\n self.ncol = ncol\n self.frame = frame\n self.yaxis = yaxis\n self.share_axes = share_axes\n self.margins = margins if use_margins else None\n\n def __repr__(self):\n kwargs = self.fig_kwa()\n if 'subplotpars' in kwargs:\n pars = kwargs['subplotpars']\n attrs = ((k, getattr(pars, k)) for k in ('left', 'right', 'bottom', 'top', 'wspace', 'hspace'))\n desc = ', '.join(f'{k}={v}' for k, v in attrs if v is not None)\n kwargs['subplotpars'] = f\"<{desc}>\"\n args = ', '.join(f'{k}={v}' for k, v in kwargs.items())\n return f'<Layout: {args}>'\n\n def fig_kwa(self):\n out = BaseLayout.fig_kwa(self)\n\n if self.margins: # absolute subplot parameters\n out['subplotpars'] = SubplotParams(\n self.margins['left'] / self.w,\n self.margins['bottom'] / self.h,\n 1 - self.margins['right'] / self.w,\n 1 - self.margins['top'] / self.h,\n # space expressed as a fraction of the average axis height/width\n self.margins['wspace'] / self.axw,\n self.margins['hspace'] / self.axh)\n\n return out\n\n def _make_axes(self, figure):\n if not self.nax:\n return []\n axes = []\n kwargs = {}\n for i in self.axes:\n ax = figure.add_subplot(self.nrow, self.ncol, i + 1, autoscale_on=self.autoscale, **kwargs)\n axes.append(ax)\n if self.share_axes:\n kwargs.update(sharex=ax, sharey=ax)\n return axes\n\n def _configure_axes(self, axes):\n for ax in axes:\n format_axes(ax, self.frame, self.yaxis)\n return axes\n\n\nclass ImLayout(Layout):\n \"\"\"Layout subclass for axes without space\n\n Make sure to specify the ``margins`` parameter for absolute spacing\n \"\"\"\n\n def __init__(\n self,\n nax: Union[int, List[bool]],\n ax_aspect: float, # width / height\n axh_default: float,\n margins: dict = None,\n default_margins: dict = None,\n title: str = None,\n axtitle: Union[bool, Sequence[str]] = False, # for default spacing\n **kwargs,\n ):\n if axtitle is True:\n has_axtitle = (len(nax) if isinstance(nax, list) else nax) > 1\n else:\n has_axtitle = True if isinstance(axtitle, np.ndarray) else bool(axtitle)\n title_space = 1.5 * mpl_font_size('figure.titlesize') if title else 0\n axtitle_space = 1.5 * mpl_font_size('axes.titlesize') if has_axtitle else 0\n margins_ = {\n 'left': 0, 'wspace': 0, 'right': 0,\n 'top': axtitle_space + title_space,\n 'hspace': axtitle_space,\n 'bottom': 0,\n }\n if default_margins:\n margins_.update(default_margins)\n if margins:\n margins_.update(margins)\n Layout.__init__(self, nax, ax_aspect, axh_default, title=title, margins=margins_, **kwargs)\n\n def _make_axes(self, figure):\n axes = []\n for i in self.axes:\n ax = figure.add_subplot(self.nrow, self.ncol, i + 1, autoscale_on=self.autoscale)\n axes.append(ax)\n return axes\n\n def _configure_axes(self, axes):\n for ax in axes:\n ax.axis('off')\n\n\nclass VariableAspectLayout(BaseLayout):\n \"\"\"Layout with a fixed number of columns that differ in spacing\n\n Axes are originally created to fill the whole rectangle allotted to them.\n Developed for TopoButterfly plot: one variable aspect butterfly plot, and\n one square topomap plot.\n\n Parameters\n ----------\n nrow\n Number of rows.\n axh_default\n Default row height.\n w_default\n Default figure width.\n aspect\n Axes aspect ratio (w/h) for each column; None for axes with flexible\n width.\n ax_kwargs\n Parameters for :meth:`figure.add_axes` for each column.\n ax_frames\n ``frame`` parameter for :func:`format_axes` for each column.\n row_titles\n One title per row.\n \"\"\"\n def __init__(\n self,\n nrow: int,\n axh_default: float,\n w_default: float,\n aspect: Sequence[Optional[float]] = (None, 1),\n ax_kwargs: Sequence[dict] = None,\n ax_frames: Sequence[bool] = None,\n row_titles: Sequence[Optional[str]] = None,\n title: FMTextArg = None,\n h: float = None,\n w: float = None,\n axh: float = None,\n dpi: float = None,\n show: bool = True,\n run: bool = None,\n **kwargs,\n ):\n w, h = resolve_plot_rect(w, h, dpi)\n self.w_fixed = w\n\n if axh and h:\n raise ValueError(\"h and axh can not be specified both at the same time\")\n elif h:\n axh = h / nrow\n elif axh:\n h = nrow * axh\n else:\n axh = axh_default\n h = nrow * axh\n\n if w is None:\n w = w_default\n\n if ax_kwargs is None:\n ax_kwargs = [{}] * len(aspect)\n if ax_frames is None:\n ax_frames = [True] * len(aspect)\n\n BaseLayout.__init__(self, h, w, dpi, False, show, run, title, **kwargs)\n self.nax = nrow * len(aspect)\n self.axh = axh\n self.nrow = nrow\n self.ncol = len(aspect)\n self.share_axes = False\n self.row_titles = row_titles\n self.aspect = aspect\n self.n_flexible = self.aspect.count(None)\n self.ax_kwargs = ax_kwargs\n self.ax_frames = ax_frames\n\n # Compute axes outlines for given height and width\n h = self.h\n w = self.w\n text_buffer = 20 * POINT\n\n # buffers for legends\n left_buffer = text_buffer * (3 + (self.row_titles is not None))\n bottom_buffer = text_buffer * 2\n top_buffer = text_buffer * (1 + 2 * bool(self.title))\n\n # rectangle base in inches\n axh = (h - bottom_buffer - top_buffer) / self.nrow\n axws = [None if a is None else a * axh for a in self.aspect]\n fixed = sum(axw for axw in axws if axw is not None)\n w_free = (w - fixed - left_buffer) / self.n_flexible\n widths = [w_free if axw is None else axw for axw in axws]\n lefts = (sum(widths[:i]) + left_buffer for i in range(len(widths)))\n bottoms = (i * axh + bottom_buffer for i in range(self.nrow - 1, -1, -1))\n\n # convert to figure coords\n height = axh / h\n lefts_ = [l / w for l in lefts]\n widths_ = [w_ / w for w_ in widths]\n bottoms_ = [b / h for b in bottoms]\n\n # rectangles: (left, bottom, width, height)\n self._ax_rects = [[(l, bottom, w, height) for l, w in zip(lefts_, widths_)] for bottom in bottoms_]\n\n def _make_axes(self, figure):\n axes = []\n for row, row_rects in enumerate(self._ax_rects):\n for rect, kwa, frame in zip(row_rects, self.ax_kwargs, self.ax_frames):\n ax = figure.add_axes(rect, autoscale_on=self.autoscale, **kwa)\n axes.append(ax)\n\n if self.row_titles and self.row_titles[row]:\n bottom, height = rect[1], rect[3]\n figure.text(0, bottom + height / 2, self.row_titles[row], ha='left', va='center', rotation='vertical')\n return axes\n\n def _configure_axes(self, axes):\n for ax, frame in zip(axes, cycle(self.ax_frames)):\n format_axes(ax, frame, True)\n # id axes for callbacks\n for i, ax in enumerate(axes):\n ax.id = i\n\n\ndef subplots(\n nrows: int = 1,\n ncols: int = 1,\n axh: float = None,\n axw: float = None,\n h: float = None,\n w: float = None,\n left: float = None,\n right: float = None,\n wspace: float = None,\n width_ratios: Sequence[float] = None,\n bottom: float = None,\n top: float = None,\n hspace: float = None,\n height_ratios: Sequence[float] = None,\n **kwargs,\n):\n \"\"\"Specify :func:`matplotlib.pyplot.subplots` parameters in inches\n\n Parameters\n ----------\n nrows\n Number of subplot rows.\n ncols\n Number of subplot columns.\n axh\n Height of each axes.\n axw\n Width of each axes.\n h\n Figure height.\n w\n Figure width.\n left\n Margin to the left of the axes.\n right\n Margin to the right of the axes.\n wspace\n Width of the margin between axes.\n width_ratios\n The relative widths of the columns (see :class:`matplotlib.gridspec.GridSpec`).\n bottom\n Margin below the axes.\n top\n Margin above the axes.\n hspace\n Height of the margin between axes.\n height_ratios\n The relative heights of the rows (see :class:`matplotlib.gridspec.GridSpec`).\n **\n Other parameters for :func:`matplotlib.pyplot.subplots`.\n \"\"\"\n from matplotlib import pyplot\n\n margins = {'left': left, 'bottom': bottom, 'right': right, 'top': top, 'wspace': wspace, 'hspace': hspace}\n layout = Layout(nrows*ncols, 1, 2, False, None, h, w, axh, axw, nrows, ncols, None, margins)\n gridspec_kw = {\n 'left': layout.margins['left'] / layout.w,\n 'right': 1 - layout.margins['right'] / layout.w,\n 'wspace': layout.margins['wspace'] / layout.axw,\n 'width_ratios': width_ratios,\n 'bottom': layout.margins['bottom'] / layout.h,\n 'top': 1 - layout.margins['top'] / layout.h,\n 'hspace': layout.margins['hspace'] / layout.axh,\n 'height_ratios': height_ratios,\n }\n return pyplot.subplots(layout.nrow, layout.ncol, figsize=(layout.w, layout.h), gridspec_kw=gridspec_kw, **kwargs)\n\n\nclass ColorBarMixin:\n \"\"\"Colorbar toolbar button mixin\n\n Parameters\n ----------\n param_func : func\n Function that returns color-bar parameters.\n \"\"\"\n def __init__(\n self,\n param_func: Callable = None, # function to get cmap, vmin, vmax\n data: Union[NDVar, Var, Number, str, 'PlotData'] = None, # to infer unit\n mappable: Any = None, # matplotlib mappable object\n ):\n self.__get_params = param_func\n self.__mappable = mappable\n if data is None:\n self.__scale = AxisScale(1)\n else:\n self.__scale = AxisScale(data)\n\n def _fill_toolbar(self, tb):\n from .._wxgui import wx, ID, Icon\n\n tb.AddTool(ID.PLOT_COLORBAR, \"Plot Colorbar\", Icon(\"plot/colorbar\"))\n tb.Bind(wx.EVT_TOOL, self.__OnPlotColorBar, id=ID.PLOT_COLORBAR)\n\n def __OnPlotColorBar(self, event):\n return self.plot_colorbar()\n\n def plot_colorbar(\n self,\n label: Union[bool, str] = True,\n label_position: Literal['left', 'right', 'top', 'bottom'] = None,\n label_rotation: float = None,\n clipmin: float = None,\n clipmax: float = None,\n orientation: Literal['horizontal', 'vertical'] = 'horizontal',\n **kwargs,\n ):\n \"\"\"Plot a colorbar corresponding to the displayed data\n\n Parameters\n ----------\n label\n Label for the x-axis (default is based on the data).\n label_position\n Position of the axis label. Valid values depend on orientation.\n label_rotation\n Angle of the label in degrees (For horizontal colorbars, the default is\n 0; for vertical colorbars, the default is 0 for labels of 3 characters\n and shorter, and 90 for longer labels).\n clipmin\n Clip the color-bar below this value.\n clipmax\n Clip the color-bar above this value.\n orientation\n Orientation of the bar (default is horizontal).\n ...\n More parameters for :class:`plot.ColorBar`.\n\n Returns\n -------\n colorbar : plot.ColorBar\n ColorBar plot object.\n \"\"\"\n # cf. matplorlib.colorbar.Colorbar transforming mappable to color-map\n from . import ColorBar\n if self.__mappable is not None:\n cmap = self.__mappable.cmap\n vmin = self.__mappable.norm\n vmax = None\n elif self.__get_params is not None:\n cmap, vmin, vmax = self.__get_params()\n else:\n raise RuntimeError(f\"No colormap on {self}\")\n return ColorBar(cmap, vmin, vmax, label, label_position, label_rotation, clipmin, clipmax, orientation, self.__scale, **kwargs)\n\n\nclass ColorMapMixin(ColorBarMixin):\n \"\"\"takes care of color-map and includes color-bar\"\"\"\n _can_set_vlim = True\n\n def __init__(self, epochs, cmap, vmax, vmin, contours, plots):\n ColorBarMixin.__init__(self, self.__get_cmap_params, epochs[0][0])\n self.__plots = plots # can be empty list at __init__\n self._cmaps = find_fig_cmaps(epochs, cmap)\n self._vlims = find_fig_vlims(epochs, vmax, vmin, self._cmaps)\n self._contours = find_fig_contours(epochs, self._vlims, contours)\n self._first_meas = epochs[0][0].info.get('meas')\n\n def __get_cmap_params(self):\n return (self._cmaps[self._first_meas],) + self._vlims[self._first_meas]\n\n def add_contour(self, level: float, color: Any = 'k', meas: str = None):\n \"\"\"Add a contour line\n\n Parameters\n ----------\n level : scalar\n The value at which to draw the contour.\n color : matplotlib color\n The color of the contour line.\n meas : str\n The measurement for which to add a contour line (default is the\n measurement plotted first).\n \"\"\"\n if meas is None:\n meas = self._first_meas\n\n for p in self.__plots:\n p.add_contour(meas, level, color)\n self.draw()\n\n def set_cmap(self, cmap, meas=None):\n \"\"\"Change the colormap in the array plots\n\n Parameters\n ----------\n cmap : str | colormap\n New colormap.\n meas : None | str\n Measurement to which to apply the colormap. With None, it is\n applied to all.\n \"\"\"\n if meas is None:\n meas = self._first_meas\n\n for p in self.__plots:\n p.set_cmap(cmap, meas)\n self._cmaps[meas] = cmap\n if isinstance(cmap, LocatedColormap):\n self.set_vlim(cmap.vmin, cmap.vmax, meas)\n else:\n self.draw()\n\n def set_vlim(self, v=None, vmax=None, meas=None):\n \"\"\"Change the colormap limits\n\n If the limit is symmetric, use ``set_vlim(vlim)``; if it is not, use\n ``set_vlim(vmin, vmax)``.\n\n Parameters\n ----------\n v : scalar\n If this is the only value specified it is interpreted as the upper\n end of the scale, and the lower end is determined based on\n the colormap to be ``-v`` or ``0``. If ``vmax`` is also specified,\n ``v`` specifies the lower end of the scale.\n vmax : scalar (optional)\n Upper end of the color scale.\n meas : str (optional)\n Measurement type to apply (default is the first one found).\n \"\"\"\n if meas is None:\n meas = self._first_meas\n elif meas not in self._cmaps:\n raise ValueError(\"meas=%r\" % (meas,))\n\n if vmax is None:\n vmin, vmax = fix_vlim_for_cmap(None, abs(v), self._cmaps[meas])\n else:\n vmin = v\n\n for p in self.__plots:\n p.set_vlim(vmin, vmax, meas)\n self._vlims[meas] = vmin, vmax\n\n if self._can_set_ylim:\n self.set_ylim(vmin, vmax)\n else:\n self.draw()\n\n def get_vlim(self, meas=None):\n \"Retrieve colormap value limits as ``(vmin, vmax)`` tuple\"\n if meas is None:\n meas = self._first_meas\n return self._vlims[meas]\n\n\nclass LegendMixin:\n __choices = ('invisible', 'separate window', 'draggable', 'upper right',\n 'upper left', 'lower left', 'lower right', 'right',\n 'center left', 'center right', 'lower center', 'upper center',\n 'center')\n __args = (False, 'fig', 'draggable', 1, 2, 3, 4, 5, 6, 7, 8, 9, 10)\n _has_frame = None\n\n def __init__(\n self,\n loc: LegendArg,\n handles: Dict[CellArg, Any],\n labels: Dict[CellArg, str] = None,\n ):\n \"\"\"Legend toolbar menu mixin\n\n Parameters\n ----------\n loc\n Matplotlib figure legend location argument or 'fig' to plot the\n legend in a separate figure.\n handles : dict\n {cell: handle} dictionary.\n labels : dict\n Dictionary with labels for cells.\n \"\"\"\n # whether to plot default legend\n if loc is not None:\n initial_loc = loc\n elif len(handles) > 1:\n initial_loc = 'upper right'\n else:\n initial_loc = False\n self.__handles = handles\n self.legend = None\n self.__labels = None\n self.__set_labels(labels)\n self.plot_legend(initial_loc)\n\n def __set_labels(self, labels: Dict[CellArg, str] = None):\n if labels is not None:\n self.__labels = {key: asfmtext(label) for key, label in labels.items()}\n\n def _fill_toolbar(self, tb):\n from .._wxgui import wx\n\n choices = [name.title() for name in self.__choices]\n self.__ctrl = wx.Choice(tb, choices=choices, name='Legend')\n tb.AddControl(self.__ctrl, \"Legend\")\n self.__ctrl.Bind(wx.EVT_CHOICE, self.__OnChoice, source=self.__ctrl)\n\n def __OnChoice(self, event):\n self.__plot(self.__args[event.GetSelection()])\n\n def plot_legend(\n self,\n loc: LegendArg = 'fig',\n labels=None,\n **kwargs):\n \"\"\"Plot the legend (or remove it from the figure).\n\n Parameters\n ----------\n loc\n Where to plot the legend (see Notes; default 'fig').\n labels : dict\n Dictionary with alternate labels for all cells.\n ... :\n Parameters for :class:`eelbrain.plot.Legend`.\n\n Returns\n -------\n legend_figure : None | legend\n If loc=='fig' the Figure, otherwise None.\n\n Notes\n -----\n legend content can be modified through the figure's\n ``legend_handles`` and ``legend_labels`` attributes.\n\n Possible values for the ``loc`` argument:\n\n ``False``:\n Make the current legend invisible\n ``'fig'``:\n Plot the legend in a new figure\n ``'draggable'``:\n The legend can be dragged to the desired position with the mouse\n pointer.\n str | int | (float, float):\n Matplotlib :meth:`~matplotlib.figure.Figure.legend` position argument.\n \"\"\"\n if loc in self.__choices:\n choice = self.__choices.index(loc)\n arg = self.__args[choice]\n elif loc is None:\n choice = 0\n arg = False\n elif loc is True:\n choice = 3\n arg = 'best'\n elif isinstance(loc, Sequence) and not isinstance(loc, str):\n choice = 0\n arg = loc\n elif loc not in self.__args:\n raise ValueError(f\"Invalid legend location: {loc!r}; use one of: {enumeration(map(repr, self.__choices), 'or')}\")\n else:\n choice = self.__args.index(loc)\n arg = loc\n\n if self._has_frame:\n self.__ctrl.SetSelection(choice)\n\n if arg is not False:\n return self.__plot(loc, labels, **kwargs)\n\n def save_legend(self, *args, **kwargs):\n \"\"\"Save the legend as image file\n\n Parameters\n ----------\n ... :\n Parameters for Matplotlib's figure.savefig()\n \"\"\"\n p = self.plot_legend(show=False)\n p.save(*args, **kwargs)\n p.close()\n\n def __plot(self, loc: LegendArg, labels: Dict[CellArg, str] = None, **kwargs):\n self.__set_labels(labels)\n if loc and self.__handles:\n if self.__labels is None:\n cells = list(self.__handles)\n labels = [cellname(cell) for cell in cells]\n elif isinstance(self.__labels, dict):\n cells = list(self.__labels.keys())\n labels = list(self.__labels.values())\n else:\n raise TypeError(f\"{labels=}; needs to be dict\")\n handles = [self.__handles[cell] for cell in cells]\n if loc == 'fig':\n return Legend(handles, labels, **kwargs)\n else:\n # take care of old legend\n if self.legend is not None and loc == 'draggable':\n self.legend.set_draggable(True)\n elif self.legend is not None:\n self.legend.remove()\n elif loc == 'draggable':\n self.legend = self.figure.legend(handles, labels, loc=1)\n self.legend.set_draggable(True)\n\n if loc != 'draggable':\n self.legend = self.figure.legend(handles, labels, loc=loc)\n self.draw()\n elif self.legend is not None:\n self.legend.remove()\n self.legend = None\n self.draw()\n elif not self.__handles:\n raise RuntimeError(\"No handles to produce legend.\")\n\n\nclass Legend(EelFigure):\n\n def __init__(self, handles, labels, **kwargs):\n layout = Layout(0, 1, 2, tight=False, **kwargs)\n EelFigure.__init__(self, None, layout)\n\n self.legend = self.figure.legend(handles, labels, loc=2)\n\n # resize figure to match legend\n if not self._layout.w_fixed and self._has_frame:\n self.draw()\n bb = self.legend.get_window_extent()\n w0, h0 = self._frame.GetSize()\n h = int(h0 + bb.x0 - bb.y0)\n w = int(bb.x0 + bb.x1)\n self._frame.SetSize((w, h))\n\n self._show()\n\n\nclass TimeController:\n # Link plots that have the TimeSlicer mixin\n def __init__(\n self,\n t: float = 0,\n fixate: bool = False,\n ):\n self._plots = [] # list of weakref to plots\n self.current_time = t\n self.fixate = fixate\n\n def add_plot(self, plot: 'TimeSlicer'):\n if plot._time_controller is None:\n t = plot._validate_time(self.current_time)\n plot._set_time(t, self.fixate)\n self._plots.append(weakref.ref(plot))\n plot._time_controller = self\n elif plot._time_controller is not self:\n self.merge(plot._time_controller)\n\n def iter_plots(self):\n needs_cleaning = False\n for ref in self._plots:\n plot = ref()\n if plot is None:\n needs_cleaning = True\n else:\n yield plot\n if needs_cleaning:\n self._plots = [ref for ref in self._plots if ref() is not None]\n\n def merge(self, time_controller):\n \"Merge another TimeController into self\"\n for plot in time_controller.iter_plots():\n plot._time_controller = None\n self.add_plot(plot)\n\n def set_time(self, t, fixate):\n if t == self.current_time and fixate == self.fixate:\n return\n for p in self.iter_plots():\n t = p._validate_time(t)\n for p in self.iter_plots():\n p._update_time_wrapper(t, fixate)\n self.current_time = t\n self.fixate = fixate\n\n def set_xlim(self, xmin, xmax):\n for p in self.iter_plots():\n if isinstance(p, XAxisMixin):\n p._set_xlim(xmin, xmax, draw=True)\n\n\nclass TimeSlicer:\n # Interface to link time axes of multiple plots.\n # update data in a child plot of time-slices\n _time_dim = None\n _current_time = None # needs to reflect what is currently displayed\n _initial_time = None # used by delayed initialization of time-controller\n _display_time_in_frame_title = False\n\n def __init__(\n self,\n time_dim: Union[UTS, Case] = None,\n time_fixed: bool = None,\n display_text: matplotlib.text.Text = None,\n initial_time: float = None,\n ):\n self._time_controller = None\n self._time_fixed = time_fixed if time_fixed is not None else (initial_time is not None)\n self.__display_text = display_text\n self._initial_time = initial_time\n if time_dim is not None:\n self._init_time_dim(time_dim)\n\n def _init_time_dim(self, time_dim: Union[UTS, Case]):\n if self._time_dim is not None:\n if time_dim == self._time_dim:\n return\n raise ValueError(f\"An incompatible time dimension is already set on {self}\\nold: {self._time_dim}\\nnew: {time_dim}\")\n self._time_dim = time_dim\n if isinstance(time_dim, UTS):\n if self._initial_time is None:\n self._initial_time = time_dim.tmin\n elif isinstance(time_dim, Case):\n if self._initial_time is None:\n self._initial_time = 0\n else:\n raise TypeError(f'{time_dim=}')\n\n def _init_controller(self):\n # Only instantiated if more than one plots need to be linked\n tc = TimeController(self._initial_time, self._time_fixed)\n tc.add_plot(self)\n\n def link_time_axis(self, other):\n \"\"\"Link the time axis of this figure with another figure\"\"\"\n if self._time_dim is None:\n raise NotImplementedError(\"Slice plot for dimension other than time\")\n elif not isinstance(other, TimeSlicer):\n raise TypeError(f\"{other.__class__.__name__} plot does not support linked time axes\")\n elif other._time_dim is None:\n raise NotImplementedError(\"Slice plot for dimension other than time\")\n elif other._time_controller:\n other._time_controller.add_plot(self)\n else:\n if not self._time_controller:\n self._init_controller()\n self._time_controller.add_plot(other)\n\n def _nudge_time(self, offset):\n if self._time_dim is None:\n return\n current_i = self._time_dim._array_index(self.get_time())\n if offset > 0:\n new_i = min(self._time_dim.nsamples - 1, current_i + offset)\n else:\n new_i = max(0, current_i + offset)\n self._set_time(self._time_dim[new_i], True)\n\n def get_time(self):\n \"Retrieve the current time\"\n if self._current_time is None:\n return self._initial_time\n return self._current_time\n\n def play_movie(self, time_dilation=4.):\n \"\"\"Cycle through the time axis\n\n See Also\n --------\n .save_movie : Save a movie to disk for smoother playback\n \"\"\"\n t = self._time_dim[0]\n self.set_time(t)\n tmax = self._time_dim[-1]\n last_frame = time.time()\n time.sleep(0.05)\n while True:\n now = time.time()\n t += (now - last_frame) / time_dilation\n last_frame = now\n if t > tmax:\n break\n self.set_time(t)\n self.set_time(tmax)\n\n def set_time(self, time):\n \"\"\"Set the time point to display\n\n Parameters\n ----------\n time : scalar\n Time to display.\n \"\"\"\n self._set_time(time, True)\n\n def _set_time(self, t, fixate=False):\n \"Called by the plot\"\n if self._time_controller is None:\n self._update_time_wrapper(t, fixate)\n else:\n self._time_controller.set_time(t, fixate)\n\n def _update_time_wrapper(self, t, fixate):\n \"Called by the TimeController\"\n if t == self._current_time and fixate == self._time_fixed:\n return\n self._update_time(t, fixate)\n self._current_time = t\n self._time_fixed = fixate\n if self._display_time_in_frame_title and self._frame:\n self._frame.SetTitleSuffix(f' [{ms(t)} ms]')\n if self.__display_text is not None:\n self.__display_text.set_text(f'{ms(t)} ms')\n\n def _update_time(self, t, fixate):\n raise NotImplementedError\n\n def _validate_time(self, t):\n if self._time_dim is not None:\n if t < self._time_dim.tmin:\n return self._time_dim.tmin\n elif t > self._time_dim.tmax:\n return self._time_dim.tmax\n return t\n\n def _im_array(self):\n # for movies\n raise NotImplementedError\n\n\nclass TimeSlicerEF(TimeSlicer):\n # TimeSlicer for Eelfigure\n _can_set_time = True\n\n def __init__(\n self,\n x_dimname: str,\n x_dim: Dimension,\n axes: Sequence[matplotlib.axes.Axes] = None,\n redraw: bool = True,\n display_text: matplotlib.text.Text = None,\n initial_time: float = None,\n ):\n if x_dimname != 'time':\n TimeSlicer.__init__(self, time_fixed=True, display_text=display_text)\n return\n TimeSlicer.__init__(self, x_dim, display_text=display_text, initial_time=initial_time)\n self.__axes = self.axes if axes is None else axes\n self.__time_lines = []\n self.__redraw = redraw\n self.canvas.mpl_connect('button_press_event', self._on_click)\n self._register_key('.', self._on_nudge_time)\n self._register_key(',', self._on_nudge_time)\n\n def _on_click(self, event):\n if self._time_controller and event.inaxes in self.__axes:\n self._set_time(event.xdata, fixate=event.button == 1)\n\n def _on_motion_sub(self, event):\n if not self._time_fixed and event.inaxes in self.__axes:\n self._set_time(event.xdata)\n return set()\n\n def _on_nudge_time(self, event):\n self._nudge_time(1 if event.key == '.' else -1)\n\n def _update_time(self, t, fixate):\n # Implementation for a plot with time axes\n if fixate:\n redraw = True\n if self.__time_lines:\n xdata = (t, t)\n for line in self.__time_lines:\n line.set_xdata(xdata)\n else:\n for ax in self.__axes:\n self.__time_lines.append(ax.axvline(t, color='k'))\n else:\n redraw = bool(self.__time_lines)\n while self.__time_lines:\n self.__time_lines.pop().remove()\n\n if self.__redraw and redraw and self._frame is not None:\n self.canvas.redraw(self.__axes)\n\n def save_movie(self, filename=None, time_dilation=4., **kwargs):\n \"\"\"Save the figure with moving time axis as movie\n\n Parameters\n ----------\n filename : path-like\n Filename for the movie (omit to use a GUI).\n time_dilation : float\n Factor by which to stretch time (default 4). Time dilation is\n controlled through the frame-rate; if the ``fps`` keyword argument\n is specified, ``time_dilation`` is ignored.\n ...\n :func:`imageio.mimwrite` parmeters.\n \"\"\"\n import imageio\n\n if filename is None:\n filename = ui.ask_saveas(\"Save movie...\", None, [('Movie (*.mov)', '*.mov')])\n if not filename:\n return\n else:\n filename = os.path.expanduser(filename)\n\n if 'fps' not in kwargs:\n kwargs['fps'] = 1. / self._time_dim.tstep / time_dilation\n\n ims = []\n for t in self._time_dim:\n self._set_time(t, True)\n im = self._im_array()\n ims.append(im)\n imageio.mimwrite(filename, ims, **kwargs)\n\n def _im_array(self):\n # private attr usage is official: https://matplotlib.org/gallery/misc/agg_buffer_to_array.html\n return np.array(self.figure.canvas.renderer._renderer)\n\n\nclass TopoMapKey:\n\n def __init__(self, data_func):\n self.__topo_data = data_func\n self._register_key('t', self.__on_topo)\n self._register_key('T', self.__on_topo)\n\n def __on_topo(self, event):\n topo_data = self.__topo_data(event)\n if topo_data is None:\n return\n\n from ._topo import Topomap\n\n data, title, proj = topo_data\n if event.key == 't':\n Topomap(data, proj=proj, cmap=self._cmaps, vmax=self._vlims,\n contours=self._contours, title=title)\n else:\n Topomap(data, proj=proj, cmap=self._cmaps, vmax=self._vlims,\n contours=self._contours, title=title, axw=9,\n sensorlabels='name')\n\n\nclass CategorialAxisMixin:\n\n def __init__(self, ax, axis, layout, label, model, ticks, labels, tick_delim, tick_pos, cells, origin=None):\n self.__ax = ax\n self.__axis = axis\n self.__cells = cells\n if axis == 'x':\n self.__axis_obj = ax.xaxis\n if layout.frame is not True:\n ax.spines['bottom'].set_visible(False)\n if origin is not None:\n ax.axhline(origin, color='k', linewidth=mpl.rcParams['axes.linewidth'], clip_on=False)\n elif axis == 'y':\n self.__axis_obj = ax.yaxis\n if layout.frame is not True:\n ax.spines['left'].set_visible(False)\n if origin is not None:\n ax.axvline(origin, color='k', linewidth=mpl.rcParams['axes.linewidth'], clip_on=False)\n else:\n raise ValueError(f\"axis={axis!r}\")\n\n # axis label\n if label is True:\n if model is not None and model.name:\n label = model.name.replace('_', ' ')\n else:\n label = False\n if label:\n self.__axis_obj.set_label_text(label)\n\n # ticks\n self.__axis_obj.set_ticks_position('none')\n if ticks:\n if isinstance(ticks, dict) or ticks is True:\n labels_ = find_labels(cells, labels, tick_delim)\n if isinstance(ticks, dict):\n labels_.update(ticks)\n tick_labels = [labels_[cell] for cell in cells]\n else:\n tick_labels = ticks\n self.__axis_obj.set_ticks(tick_pos)\n self.__axis_obj.set_ticklabels(tick_labels)\n elif ticks is False:\n self.__axis_obj.set_ticks(())\n\n if axis == 'x' and self._has_frame and not self._layout.w_fixed:\n self._draw_hooks.append(self.__separate_categorial_labels)\n\n def __separate_categorial_labels(self):\n # make sure x axis labels don't overlap\n labels = self.__axis_obj.get_ticklabels()\n n = len(labels)\n if n > 1:\n bbs = [l.get_window_extent(self.figure.canvas.renderer) for l in labels]\n overlap = max(bbs[i].x1 - bbs[i + 1].x0 for i in range(n - 1))\n extend = n * (overlap + 10)\n w, h = self._frame.GetSize()\n w += int(extend)\n self._frame.SetSize((w, h))\n return True\n\n def mark_pair(\n self,\n cell_1: Union[float, CellArg],\n cell_2: Union[float, CellArg],\n y: float,\n dy: float = None,\n mark: Union[float, str] = None,\n color: Any = None,\n nudge: Union[bool, float] = None,\n **text_args,\n ):\n \"\"\"Mark a pair of categories with a line and a label\n\n Parameters\n ----------\n cell_1\n Data-cell to be compared (can be specified as cell or as\n x-coordinate)\n cell_2\n Second cell to be compared.\n y\n Level above which to plot the bar.\n dy\n Length of vertical ticks on each side of the bar (offsets the\n location of the bar itself to ``y + dy``; use negative values to\n flip orientation).\n mark\n Text label, or p-value to automatically determine the label and\n ``color``.\n color\n Color for bar and ``label``.\n nudge\n Nudge the edges of the bar inwards to allow multiple bars\n side-by-side on the same level of ``y``.\n ...\n All other parameters are used to plot the text label with\n :meth:`matplotlib.axes.Axes.text`.\n \"\"\"\n if isinstance(cell_1, (str, tuple)):\n x1 = self.__cells.index(cell_1)\n else:\n x1 = cell_1\n if isinstance(cell_2, (str, tuple)):\n x2 = self.__cells.index(cell_2)\n else:\n x2 = cell_2\n location = {'x': 'top', 'y': 'right'}[self.__axis]\n mark_difference(x1, x2, y, mark, dy, color, nudge, location, self.__ax, **text_args)\n\n\nclass XAxisMixin:\n \"\"\"Manage x-axis\n\n Parameters\n ----------\n xmin : scalar\n Lower bound of the x axis.\n xmin : scalar\n Upper bound of the x axis.\n axes : list of Axes\n Axes that should be managed by the mixin.\n xlim : scalar | (scalar, scalar)\n Initial x-axis view limits as ``(left, right)`` tuple or as ``length``\n scalar (default is the full x-axis in the data).\n\n Notes\n -----\n Navigation:\n - ``←``: scroll left\n - ``→``: scroll right\n - ``home``: scroll to beginning\n - ``end``: scroll to end\n - ``f``: x-axis zoom in (reduce x axis range)\n - ``d``: x-axis zoom out (increase x axis range)\n \"\"\"\n _can_set_xlim = True\n\n def __init__(self, xmin, xmax, xlim=None, axes=None):\n self.__xmin = xmin\n self.__xmax = xmax\n self.__axes = axes or self.axes\n self.__vspans = []\n self._register_key('f', self.__on_zoom_plus)\n self._register_key('d', self.__on_zoom_minus)\n self._register_key('j' if IS_WINDOWS else 'left', self.__on_left)\n self._register_key('l' if IS_WINDOWS else 'right', self.__on_right)\n self._register_key('home', self.__on_beginning)\n self._register_key('end', self.__on_end)\n if xlim is None:\n xlim = (self.__xmin, self.__xmax)\n elif np.isscalar(xlim):\n xlim = (self.__xmin, self.__xmin + xlim)\n self._set_xlim(*xlim)\n\n def _init_with_data(\n self,\n epochs: Sequence[Sequence[NDVar]],\n xdim: str,\n xlim: Union[float, Tuple[float, float]] = None,\n axes: List[matplotlib.axes.Axes] = None,\n im: bool = False,\n ):\n \"\"\"Compute axis bounds from data\n\n Parameters\n ----------\n epochs\n The data that is plotted (to determine axis range).\n xdim\n Dimension that is plotted on the x-axis.\n axes\n Axes that should be managed by the mixin.\n xlim\n Initial x-axis view limits as ``(left, right)`` tuple or as ``length``\n scalar (default is the full x-axis in the data).\n im\n Plot displays an im, i.e. the axes limits need to extend beyond the\n dimension endpoints by half a step (default False).\n \"\"\"\n dims = (e.get_dim(xdim) for e in chain(*epochs))\n if im:\n dim_extent = [dim._axis_im_extent() for dim in dims]\n else:\n dim_extent = [dim._axis_extent() for dim in dims]\n xmin = min(e[0] for e in dim_extent)\n xmax = max(e[1] for e in dim_extent)\n XAxisMixin.__init__(self, xmin, xmax, xlim, axes)\n\n def get_xlim(self):\n return self.__axes[0].get_xlim()\n\n def __animate(self, vmin, vmin_dst, vmax, vmax_dst):\n n_steps = int(0.1 // self._last_draw_time)\n if n_steps > 1:\n vmin_d = vmin_dst - vmin\n vmax_d = vmax_dst - vmax\n for i in range(1, n_steps):\n x = i / n_steps\n self.set_xlim(vmin + x * vmin_d, vmax + x * vmax_d)\n self.set_xlim(vmin_dst, vmax_dst)\n\n def __on_beginning(self, event):\n left, right = self.get_xlim()\n d = right - left\n self.set_xlim(self.__xmin, min(self.__xmax, self.__xmin + d))\n\n def __on_end(self, event):\n left, right = self.get_xlim()\n d = right - left\n self.set_xlim(max(self.__xmin, self.__xmax - d), self.__xmax)\n\n def __on_zoom_plus(self, event):\n left, right = self.get_xlim()\n d = (right - left) / 4.\n self.__animate(left, left + d, right, right - d)\n\n def __on_zoom_minus(self, event):\n left, right = self.get_xlim()\n d = right - left\n new_left = max(self.__xmin, left - (d / 2.))\n new_right = min(self.__xmax, new_left + 2 * d)\n self.__animate(left, new_left, right, new_right)\n\n def __on_left(self, event):\n left, right = self.get_xlim()\n d = right - left\n new_left = max(self.__xmin, left - d)\n self.__animate(left, new_left, right, new_left + d)\n\n def __on_right(self, event):\n left, right = self.get_xlim()\n d = right - left\n new_right = min(self.__xmax, right + d)\n self.__animate(left, new_right - d, right, new_right)\n\n def _set_xlim(self, left, right, draw=False):\n for ax in self.__axes:\n ax.set_xlim(left, right)\n if draw:\n self.draw()\n\n def add_vspans(self, intervals, axes=None, *args, **kwargs):\n \"\"\"Draw vertical bars over axes\n\n Parameters\n ----------\n intervals : sequence of (start, stop) tuples\n Start and stop positions on the x-axis.\n axes : int | list of int\n Which axes to mark (default is all axes).\n additonal arguments :\n Additional arguments for :func:`matplotlib.axvspan`.\n \"\"\"\n if axes is None:\n axes = self.__axes\n elif isinstance(axes, int):\n axes = (self.__axes[axes],)\n else:\n axes = [self.__axes[i] for i in axes]\n\n for ax in axes:\n for xmin, xmax in intervals:\n self.__vspans.append(ax.axvspan(xmin, xmax, *args, **kwargs))\n self.draw()\n\n def set_xlim(self, left=None, right=None):\n \"\"\"Set the x-axis limits for all axes\"\"\"\n if isinstance(self, TimeSlicer) and self._time_controller is not None:\n if left is None or right is None:\n ax_left, ax_right = self.__axes[0].get_xlim()\n if left is None:\n left = ax_left\n if right is None:\n right = ax_right\n self._time_controller.set_xlim(left, right)\n else:\n self._set_xlim(left, right, draw=True)\n\n\nclass YLimMixin:\n \"\"\"Manage y-axis\n\n Parameters\n ----------\n plots : Sequence\n Plots to manage. Plots must have ``.ax`` attribute.\n\n Notes\n -----\n Navigation:\n - ``↑``: scroll up\n - ``↓``: scroll down\n - ``r``: y-axis zoom in (reduce y-axis range)\n - ``c``: y-axis zoom out (increase y-axis range)\n \"\"\"\n # Keep Y-lim and V-lim separate. For EEG, one might want to invert the\n # y-axis without inverting the colormap\n\n # What should be the organizing principle for different vlims within\n # one figure? Use cases:\n # - 2 axes with different data\n # - (not implemented) one axis with two y-axes\n _can_set_ylim = True\n\n def __init__(self, plots):\n self.__plots = plots\n self._register_key('r', self.__on_zoom_in)\n self._register_key('c', self.__on_zoom_out)\n self._register_key('i' if IS_WINDOWS else 'up', self.__on_move_up)\n self._register_key('k' if IS_WINDOWS else 'down', self.__on_move_down)\n self._draw_hooks.append(self.__draw_hook)\n # disable because it changes y-limits\n # self._untight_draw_hooks.append(self.__untight_draw_hook)\n\n def __draw_hook(self):\n need_draw = False\n for p in self.__plots:\n # decimate overlapping ticklabels\n locs = p.ax.yaxis.get_ticklocs()\n we = tuple(l.get_window_extent(self.canvas.renderer) for l in p.ax.yaxis.get_ticklabels())\n start = 0\n step = 1\n locs_list = list(locs) if 0 in locs else None\n while any(e1.ymin < e0.ymax for e0, e1 in intervals(we[start::step])):\n step += 1\n if locs_list:\n start = locs_list.index(0) % step\n if step > 1:\n p.ax.yaxis.set_ticks(locs[start::step])\n need_draw = True\n return need_draw\n\n def __untight_draw_hook(self):\n for p in self.__plots:\n # remove the top-most y tick-label if it is outside the figure\n extent = p.ax.yaxis.get_ticklabels()[-1].get_window_extent()\n if extent.height and extent.y1 > self.figure.get_window_extent().y1:\n p.ax.set_yticks(p.ax.get_yticks()[:-1])\n\n def get_ylim(self):\n vmin = min(p.vmin for p in self.__plots)\n vmax = max(p.vmax for p in self.__plots)\n return vmin, vmax\n\n def set_ylim(self, bottom=None, top=None):\n \"\"\"Set the y-axis limits\n\n Parameters\n ----------\n bottom : scalar\n Lower y-axis limit.\n top : scalar\n Upper y-axis limit.\n \"\"\"\n if bottom is None and top is None:\n return\n\n for p in self.__plots:\n p.set_ylim(bottom, top)\n self.draw()\n\n def __animate(self, vmin, vmin_d, vmax, vmax_d):\n n_steps = int(0.1 // self._last_draw_time)\n if n_steps <= 1:\n self.set_ylim(vmin + vmin_d, vmax + vmax_d)\n else:\n for i in range(1, n_steps + 1):\n x = i / n_steps\n self.set_ylim(vmin + x * vmin_d, vmax + x * vmax_d)\n\n def __on_move_down(self, event):\n vmin, vmax = self.get_ylim()\n d = (vmax - vmin) * 0.1\n self.__animate(vmin, -d, vmax, -d)\n\n def __on_move_up(self, event):\n vmin, vmax = self.get_ylim()\n d = (vmax - vmin) * 0.1\n self.__animate(vmin, d, vmax, d)\n\n def __on_zoom_in(self, event):\n vmin, vmax = self.get_ylim()\n d = (vmax - vmin) * 0.05\n self.__animate(vmin, d, vmax, -d)\n\n def __on_zoom_out(self, event):\n vmin, vmax = self.get_ylim()\n d = (vmax - vmin) * (1 / 22)\n self.__animate(vmin, -d, vmax, d)\n"
] |
[
[
"numpy.arange",
"numpy.array"
],
[
"numpy.nanmax",
"matplotlib.rcParams.copy",
"numpy.ma.isMaskedArray",
"numpy.linspace",
"numpy.isnan",
"numpy.nanmin",
"matplotlib.pyplot.subplots",
"matplotlib.figure.SubplotParams",
"matplotlib.get_backend",
"numpy.log10",
"matplotlib.rcParams.update",
"matplotlib.pyplot.close",
"numpy.isscalar",
"matplotlib.ticker.FuncFormatter",
"numpy.array",
"matplotlib.pyplot.figure"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
tongpinmo/R2CNN-Solarpannel
|
[
"b99754d587bd75c47c53a31e1db2060356a28c51"
] |
[
"data/io/Solarpanel/val_crop.py"
] |
[
"import os\nimport scipy.misc as misc\nfrom xml.dom.minidom import Document\nimport numpy as np\nimport copy, cv2\n\ndef save_to_xml(save_path, im_width, im_height, objects_axis, label_name):\n im_depth = 0\n object_num = len(objects_axis)\n doc = Document()\n\n annotation = doc.createElement('annotation')\n doc.appendChild(annotation)\n\n folder = doc.createElement('folder')\n folder_name = doc.createTextNode('VOC2007')\n folder.appendChild(folder_name)\n annotation.appendChild(folder)\n\n filename = doc.createElement('filename')\n filename_name = doc.createTextNode('000024.jpg')\n filename.appendChild(filename_name)\n annotation.appendChild(filename)\n\n source = doc.createElement('source')\n annotation.appendChild(source)\n\n database = doc.createElement('database')\n database.appendChild(doc.createTextNode('The VOC2007 Database'))\n source.appendChild(database)\n\n annotation_s = doc.createElement('annotation')\n annotation_s.appendChild(doc.createTextNode('PASCAL VOC2007'))\n source.appendChild(annotation_s)\n\n image = doc.createElement('image')\n image.appendChild(doc.createTextNode('flickr'))\n source.appendChild(image)\n\n flickrid = doc.createElement('flickrid')\n flickrid.appendChild(doc.createTextNode('322409915'))\n source.appendChild(flickrid)\n\n owner = doc.createElement('owner')\n annotation.appendChild(owner)\n\n flickrid_o = doc.createElement('flickrid')\n flickrid_o.appendChild(doc.createTextNode('knautia'))\n owner.appendChild(flickrid_o)\n\n name_o = doc.createElement('name')\n name_o.appendChild(doc.createTextNode('yang'))\n owner.appendChild(name_o)\n\n\n size = doc.createElement('size')\n annotation.appendChild(size)\n width = doc.createElement('width')\n width.appendChild(doc.createTextNode(str(im_width)))\n height = doc.createElement('height')\n height.appendChild(doc.createTextNode(str(im_height)))\n depth = doc.createElement('depth')\n depth.appendChild(doc.createTextNode(str(im_depth)))\n size.appendChild(width)\n size.appendChild(height)\n size.appendChild(depth)\n segmented = doc.createElement('segmented')\n segmented.appendChild(doc.createTextNode('0'))\n annotation.appendChild(segmented)\n for i in range(object_num):\n objects = doc.createElement('object')\n annotation.appendChild(objects)\n object_name = doc.createElement('name')\n object_name.appendChild(doc.createTextNode(label_name[int(objects_axis[i][-1])]))\n objects.appendChild(object_name)\n pose = doc.createElement('pose')\n pose.appendChild(doc.createTextNode('Unspecified'))\n objects.appendChild(pose)\n truncated = doc.createElement('truncated')\n truncated.appendChild(doc.createTextNode('1'))\n objects.appendChild(truncated)\n difficult = doc.createElement('difficult')\n difficult.appendChild(doc.createTextNode('0'))\n objects.appendChild(difficult)\n bndbox = doc.createElement('bndbox')\n objects.appendChild(bndbox)\n \n x0 = doc.createElement('x0')\n x0.appendChild(doc.createTextNode(str((objects_axis[i][0]))))\n bndbox.appendChild(x0)\n y0 = doc.createElement('y0')\n y0.appendChild(doc.createTextNode(str((objects_axis[i][1]))))\n bndbox.appendChild(y0)\n\n x1 = doc.createElement('x1')\n x1.appendChild(doc.createTextNode(str((objects_axis[i][2]))))\n bndbox.appendChild(x1)\n y1 = doc.createElement('y1')\n y1.appendChild(doc.createTextNode(str((objects_axis[i][3]))))\n bndbox.appendChild(y1)\n \n x2 = doc.createElement('x2')\n x2.appendChild(doc.createTextNode(str((objects_axis[i][4]))))\n bndbox.appendChild(x2)\n y2 = doc.createElement('y2')\n y2.appendChild(doc.createTextNode(str((objects_axis[i][5]))))\n bndbox.appendChild(y2)\n\n x3 = doc.createElement('x3')\n x3.appendChild(doc.createTextNode(str((objects_axis[i][6]))))\n bndbox.appendChild(x3)\n y3 = doc.createElement('y3')\n y3.appendChild(doc.createTextNode(str((objects_axis[i][7]))))\n bndbox.appendChild(y3)\n \n f = open(save_path,'w')\n f.write(doc.toprettyxml(indent = ''))\n f.close() \n\nclass_list = ['plane', 'baseball-diamond', 'bridge', 'ground-track-field', \n'small-vehicle', 'large-vehicle', 'ship', \n'tennis-court', 'basketball-court', \n'storage-tank', 'soccer-ball-field', \n'roundabout', 'harbor', \n'swimming-pool', 'helicopter']\n\n\n\n\ndef format_label(txt_list):\n format_data = []\n for i in txt_list[2:]:\n format_data.append(\n [int(xy) for xy in i.split(' ')[:8]] + [class_list.index(i.split(' ')[8])]\n # {'x0': int(i.split(' ')[0]),\n # 'x1': int(i.split(' ')[2]),\n # 'x2': int(i.split(' ')[4]),\n # 'x3': int(i.split(' ')[6]),\n # 'y1': int(i.split(' ')[1]),\n # 'y2': int(i.split(' ')[3]),\n # 'y3': int(i.split(' ')[5]),\n # 'y4': int(i.split(' ')[7]),\n # 'class': class_list.index(i.split(' ')[8]) if i.split(' ')[8] in class_list else 0, \n # 'difficulty': int(i.split(' ')[9])}\n )\n if i.split(' ')[8] not in class_list :\n print ('warning found a new label :', i.split(' ')[8])\n exit()\n return np.array(format_data)\n\ndef clip_image(file_idx, image, boxes_all, width, height):\n if len(boxes_all) > 0:\n # print ('image shape', image.shape)\n shape = image.shape\n for start_h in range(0, shape[0], 512):\n for start_w in range(0, shape[1], 512):\n boxes = copy.deepcopy(boxes_all)\n box = np.zeros_like(boxes_all)\n start_h_new = start_h\n start_w_new = start_w\n if start_h + height > shape[0]:\n start_h_new = shape[0] - height\n if start_w + width > shape[1]:\n start_w_new = shape[1] - width\n top_left_row = max(start_h_new, 0)\n top_left_col = max(start_w_new, 0)\n bottom_right_row = min(start_h + height, shape[0])\n bottom_right_col = min(start_w + width, shape[1])\n\n\n subImage = image[top_left_row:bottom_right_row, top_left_col: bottom_right_col]\n box[:, 0] = boxes[:, 0] - top_left_col\n box[:, 2] = boxes[:, 2] - top_left_col\n box[:, 4] = boxes[:, 4] - top_left_col\n box[:, 6] = boxes[:, 6] - top_left_col\n\n box[:, 1] = boxes[:, 1] - top_left_row\n box[:, 3] = boxes[:, 3] - top_left_row\n box[:, 5] = boxes[:, 5] - top_left_row\n box[:, 7] = boxes[:, 7] - top_left_row\n box[:, 8] = boxes[:, 8]\n center_y = 0.25*(box[:, 1] + box[:, 3] + box[:, 5] + box[:, 7])\n center_x = 0.25*(box[:, 0] + box[:, 2] + box[:, 4] + box[:, 6])\n # print('center_y', center_y)\n # print('center_x', center_x)\n # print ('boxes', boxes)\n # print ('boxes_all', boxes_all)\n # print ('top_left_col', top_left_col, 'top_left_row', top_left_row)\n\n cond1 = np.intersect1d(np.where(center_y[:]>=0 )[0], np.where(center_x[:]>=0 )[0])\n cond2 = np.intersect1d(np.where(center_y[:] <= (bottom_right_row - top_left_row))[0],\n np.where(center_x[:] <= (bottom_right_col - top_left_col))[0])\n idx = np.intersect1d(cond1, cond2)\n # idx = np.where(center_y[:]>=0 and center_x[:]>=0 and center_y[:] <= (bottom_right_row - top_left_row) and center_x[:] <= (bottom_right_col - top_left_col))[0]\n # save_path, im_width, im_height, objects_axis, label_name\n if len(idx) > 0:\n xml = os.path.join(save_dir, 'labeltxt', \"%s_%04d_%04d.xml\" % (file_idx, top_left_row, top_left_col))\n save_to_xml(xml, subImage.shape[1], subImage.shape[0], box[idx, :], class_list)\n # print ('save xml : ', xml)\n if subImage.shape[0] > 5 and subImage.shape[1] >5:\n img = os.path.join(save_dir, 'images', \"%s_%04d_%04d.jpg\" % (file_idx, top_left_row, top_left_col))\n cv2.imwrite(img, subImage)\n \n \n \n\nprint ('class_list', len(class_list))\nraw_data = '/dataset/DOTA/val/'\nraw_images_dir = os.path.join(raw_data, 'images')\nraw_label_dir = os.path.join(raw_data, 'labelTxt')\n\nsave_dir = '/dataset/DOTA_clip/val/'\n\nimages = [i for i in os.listdir(raw_images_dir) if 'jpg' in i]\nlabels = [i for i in os.listdir(raw_label_dir) if 'txt' in i]\n\nprint ('find image', len(images))\nprint ('find label', len(labels))\n\nmin_length = 1e10\nmax_length = 1\n\n\nfor idx, img in enumerate(images):\n # img = 'P2330.jpg'\n print (idx, 'read image', img)\n img_data = misc.imread(os.path.join(raw_images_dir, img))\n\n # if len(img_data.shape) == 2:\n # img_data = img_data[:, :, np.newaxis]\n # print ('find gray image')\n\n txt_data = open(os.path.join(raw_label_dir, img.replace('jpg', 'txt')), 'r').readlines()\n # print (idx, len(format_label(txt_data)), img_data.shape)\n # if max(img_data.shape[:2]) > max_length:\n # max_length = max(img_data.shape[:2])\n # if min(img_data.shape[:2]) < min_length:\n # min_length = min(img_data.shape[:2])\n # if idx % 50 ==0:\n # print (idx, len(format_label(txt_data)), img_data.shape)\n # print (idx, 'min_length', min_length, 'max_length', max_length)\n box = format_label(txt_data)\n clip_image(img.strip('.jpg'), img_data, box, 800, 800)\n \n \n \n \n\n\n"
] |
[
[
"numpy.intersect1d",
"numpy.array",
"numpy.zeros_like",
"numpy.where"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
mdda/Theano
|
[
"598d487c118e66875fdd625baa84ed29d283b800",
"598d487c118e66875fdd625baa84ed29d283b800",
"598d487c118e66875fdd625baa84ed29d283b800",
"598d487c118e66875fdd625baa84ed29d283b800",
"6ca7b2b65000e371f009b617d41bc5a90f022d38",
"6ca7b2b65000e371f009b617d41bc5a90f022d38",
"6ca7b2b65000e371f009b617d41bc5a90f022d38"
] |
[
"theano/compile/tests/test_nanguardmode.py",
"doc/hpcs2011_tutorial/pycuda_simple.py",
"theano/sparse/sandbox/sp.py",
"theano/tests/main.py",
"theano/scan_module/scan.py",
"theano/sandbox/gpuarray/tests/test_blas.py",
"theano/d3viz/tests/test_formatting.py"
] |
[
"\"\"\"\nThis test is for testing the NanGuardMode.\n\"\"\"\nimport logging\nfrom nose.tools import assert_raises\n\nimport numpy\n\nfrom theano.compile.nanguardmode import NanGuardMode\nimport theano\nimport theano.tensor as T\n\n\ndef test_NanGuardMode():\n \"\"\"\n Tests if NanGuardMode is working by feeding in numpy.inf and numpy.nans\n intentionally. A working implementation should be able to capture all\n the abnormalties.\n \"\"\"\n x = T.matrix()\n w = theano.shared(numpy.random.randn(5, 7).astype(theano.config.floatX))\n y = T.dot(x, w)\n\n fun = theano.function(\n [x], y,\n mode=NanGuardMode(nan_is_error=True, inf_is_error=True)\n )\n a = numpy.random.randn(3, 5).astype(theano.config.floatX)\n infa = numpy.tile(\n (numpy.asarray(100.) ** 1000000).astype(theano.config.floatX), (3, 5))\n nana = numpy.tile(\n numpy.asarray(numpy.nan).astype(theano.config.floatX), (3, 5))\n biga = numpy.tile(\n numpy.asarray(1e20).astype(theano.config.floatX), (3, 5))\n\n fun(a) # normal values\n\n # Temporarily silence logger\n _logger = logging.getLogger(\"theano.compile.nanguardmode\")\n try:\n _logger.propagate = False\n assert_raises(AssertionError, fun, infa) # INFs\n assert_raises(AssertionError, fun, nana) # NANs\n assert_raises(AssertionError, fun, biga) # big values\n finally:\n _logger.propagate = True\n",
"from __future__ import print_function\nimport pycuda.autoinit\nimport pycuda.driver as drv\nimport numpy\n\nfrom pycuda.compiler import SourceModule\nmod = SourceModule(\"\"\"\n__global__ void multiply_them(float *dest, float *a, float *b)\n{\n const int i = threadIdx.x;\n dest[i] = a[i] * b[i];\n}\n\"\"\")\n\nmultiply_them = mod.get_function(\"multiply_them\")\n\na = numpy.random.randn(400).astype(numpy.float32)\nb = numpy.random.randn(400).astype(numpy.float32)\n\ndest = numpy.zeros_like(a)\nmultiply_them(\n drv.Out(dest), drv.In(a), drv.In(b),\n block=(400,1,1), grid=(1,1))\n\nassert numpy.allclose(dest, a*b)\nprint(dest)\n",
"\"\"\"\nConvolution-like operations with sparse matrix multiplication.\n\nTo read about different sparse formats, see\nU{http://www-users.cs.umn.edu/~saad/software/SPARSKIT/paper.ps}.\n\n@todo: Automatic methods for determining best sparse format?\n\"\"\"\n# COPIED FROM hpu/icml09/sp.py\n\nimport numpy\nimport scipy\nfrom scipy import sparse as scipy_sparse\nfrom six.moves import xrange\n\nimport theano\nimport theano.sparse\nfrom theano import sparse, gof, Op, tensor\nfrom theano.sparse.basic import Remove0, remove0\n\n# To maintain compatibility\nfrom theano.sparse import (\n SpSum, sp_sum,\n ColScaleCSC, RowScaleCSC, col_scale, row_scale,\n Diag, diag, SquareDiagonal, square_diagonal,\n EnsureSortedIndices, ensure_sorted_indices, clean)\n\n\ndef register_specialize(lopt, *tags, **kwargs):\n theano.compile.optdb['specialize'].register(\n (kwargs and kwargs.pop('name')) or lopt.__name__, lopt, 'fast_run',\n *tags)\n\n\nclass ConvolutionIndices(Op):\n \"\"\"Build indices for a sparse CSC matrix that could implement A\n (convolve) B.\n\n This generates a sparse matrix M, which generates a stack of\n image patches when computing the dot product of M with image\n patch. Convolution is then simply the dot product of (img x M)\n and the kernels.\n \"\"\"\n __props__ = ()\n\n @staticmethod\n def sparse_eval(inshp, kshp, nkern, strides=(1, 1), mode='valid'):\n (dx, dy) = strides\n return convolution_indices.evaluate(inshp, kshp, (dx, dy),\n nkern, mode=mode, ws=False)\n\n @staticmethod\n def conv_eval(inshp, kshp, strides=(1, 1), mode='valid'):\n (dx, dy) = strides\n return convolution_indices.evaluate(inshp, kshp, (dx, dy),\n mode=mode, ws=True)\n\n # img_shape and ker_shape are (height,width)\n @staticmethod\n def evaluate(inshp, kshp, strides=(1, 1), nkern=1, mode='valid', ws=True):\n \"\"\"Build a sparse matrix which can be used for performing...\n * convolution: in this case, the dot product of this matrix\n with the input images will generate a stack of images\n patches. Convolution is then a tensordot operation of the\n filters and the patch stack.\n * sparse local connections: in this case, the sparse matrix\n allows us to operate the weight matrix as if it were\n fully-connected. The structured-dot with the input image gives\n the output for the following layer.\n\n :param ker_shape: shape of kernel to apply (smaller than image)\n :param img_shape: shape of input images\n :param mode: 'valid' generates output only when kernel and\n image overlap overlap fully. Convolution obtained\n by zero-padding the input\n :param ws: True if weight sharing, false otherwise\n :param (dx,dy): offset parameter. In the case of no weight sharing,\n gives the pixel offset between two receptive fields.\n With weight sharing gives the offset between the\n top-left pixels of the generated patches\n\n :rtype: tuple(indices, indptr, logical_shape, sp_type, out_img_shp)\n :returns: the structure of a sparse matrix, and the logical dimensions\n of the image which will be the result of filtering.\n \"\"\"\n (dx, dy) = strides\n N = numpy\n\n # inshp contains either 2 entries (height,width) or 3 (nfeatures,h,w)\n # in the first case, default nfeatures to 1\n if N.size(inshp) == 2:\n inshp = (1,) + inshp\n\n inshp = N.array(inshp)\n kshp = N.array(kshp)\n ksize = N.prod(kshp)\n\n kern = ksize - 1 - N.arange(ksize)\n\n # size of output image if doing proper convolution\n # (mode='full',dx=dy=0) outshp is the actual output shape\n # given the parameters\n fulloutshp = inshp[1:] + kshp - 1\n if mode == 'valid':\n s = -1\n else:\n s = 1\n outshp = N.int64(N.ceil((inshp[1:] + s * kshp - s * 1) \\\n / N.array([dy, dx], dtype='float')))\n if any(outshp <= 0):\n err = 'Invalid kernel', kshp, 'and/or step size', (dx, dy),\\\n 'for given input shape', inshp\n raise ValueError(err)\n\n outsize = N.prod(outshp)\n insize = N.prod(inshp)\n\n # range of output units over which to iterate\n if mode == 'valid':\n lbound = N.array([kshp[0] - 1, kshp[1] - 1])\n ubound = lbound + (inshp[1:] - kshp + 1)\n else:\n lbound = N.zeros(2)\n ubound = fulloutshp\n\n # coordinates of image in \"fulloutshp\" coordinates\n topleft = N.array([kshp[0] - 1, kshp[1] - 1])\n # bound when counting the receptive field\n botright = topleft + inshp[1:]\n\n # sparse matrix specifics...\n if ws:\n spmatshp = (outsize * N.prod(kshp) * inshp[0], insize)\n else:\n spmatshp = (nkern * outsize, insize)\n spmat = scipy_sparse.lil_matrix(spmatshp)\n\n # loop over output image pixels\n z, zz = 0, 0\n\n # incremented every time we write something to the sparse\n # matrix this is used to track the ordering of filter tap\n # coefficient in sparse column ordering\n tapi, ntaps = 0, 0\n\n # Note: looping over the number of kernels could've been done\n # more efficiently as the last step (when writing to\n # spmat). However, this messes up the ordering of the column\n # values (order in which you write the values determines how\n # the vectorized data will get used later one)\n\n for fmapi in xrange(inshp[0]): # loop over input features\n # loop over number of kernels (nkern=1 for weight sharing)\n for n in xrange(nkern):\n\n # FOR EACH OUTPUT PIXEL...\n # loop over output image height\n for oy in N.arange(lbound[0], ubound[0], dy):\n # loop over output image width\n for ox in N.arange(lbound[1], ubound[1], dx):\n\n # kern[l] is filter value to apply at (oj,oi)\n # for (iy,ix)\n l = 0\n\n # ... ITERATE OVER INPUT UNITS IN RECEPTIVE FIELD\n for ky in oy + N.arange(kshp[0]):\n for kx in ox + N.arange(kshp[1]):\n\n # verify if we are still within image\n # boundaries. Equivalent to\n # zero-padding of the input image\n if (all((ky, kx) >= topleft) and\n all((ky, kx) < botright)):\n\n # convert to \"valid\" input space\n # coords used to determine column\n # index to write to in sparse mat\n iy, ix = N.array((ky, kx)) - topleft\n # determine raster-index of input pixel...\n\n # taking into account multiple\n # input features\n col = iy * inshp[2] + ix + \\\n fmapi * N.prod(inshp[1:])\n\n # convert oy,ox values to output\n # space coordinates\n if mode == 'full':\n (y, x) = (oy, ox)\n else:\n (y, x) = (oy, ox) - topleft\n # taking into account step size\n (y, x) = N.array([y, x]) / (dy, dx)\n\n # convert to row index of sparse matrix\n if ws:\n row = ((y * outshp[1] + x) *\n inshp[0] * ksize + l + fmapi *\n ksize)\n else:\n row = y * outshp[1] + x\n\n # Store something at that location\n # in sparse matrix. The written\n # value is only useful for the\n # sparse case. It will determine\n # the way kernel taps are mapped\n # onto the sparse columns (idea of\n # kernel map)\n # n*... only for sparse\n spmat[row + n * outsize, col] = tapi + 1\n\n # total number of active taps\n # (used for kmap)\n ntaps += 1\n\n # absolute tap index (total number of taps)\n tapi += 1\n # move on to next filter tap l=(l+1)%ksize\n l += 1\n\n if spmat.format != 'csc':\n spmat = spmat.tocsc().sorted_indices()\n else:\n # BUG ALERT: scipy0.6 has bug where data and indices are written in\n # reverse column ordering.\n # Explicit call to sorted_indices removes this problem.\n spmat = spmat.sorted_indices()\n\n if ws:\n kmap = None\n else:\n kmap = N.zeros(ntaps, dtype='int')\n k = 0\n # print 'TEMPORARY BUGFIX: REMOVE !!!'\n for j in xrange(spmat.shape[1]):\n for i_idx in xrange(spmat.indptr[j], spmat.indptr[j + 1]):\n if spmat.data[i_idx] != 0:\n # this is == spmat[i,j] - 1\n kmap[k] = spmat.data[i_idx] - 1\n k += 1\n\n # when in valid mode, it is more efficient to store in sparse row\n # TODO: need to implement structured dot for csr matrix\n assert spmat.format == 'csc'\n sptype = 'csc'\n #sptype = 'csr' if mode=='valid' else 'csc'\n if 0 and mode == 'valid':\n spmat = spmat.tocsr()\n\n rval = (spmat.indices[:spmat.size],\n spmat.indptr, spmatshp, sptype, outshp)\n if kmap is not None:\n rval += (kmap,)\n\n return rval\n\n def perform(self, node, inputs, outputs):\n (inshp, kshp) = inputs\n (out_indices, out_indptr, spmat_shape) = outputs\n indices, indptr, spmatshp, outshp = self.evaluate(inshp, kshp)\n out_indices[0] = indices\n out_indptr[0] = indptr\n spmat_shape[0] = numpy.asarray(spmatshp)\n\nconvolution_indices = ConvolutionIndices()\n\n\ndef applySparseFilter(kerns, kshp, nkern, images, imgshp,\n step=(1, 1), bias=None, mode='valid'):\n \"\"\"\n \"images\" is assumed to be a matrix of shape batch_size x img_size,\n where the second dimension represents each image in raster order\n\n Output feature map will have shape:\n\n .. code-block:: python\n\n batch_size x number of kernels * output_size\n\n .. note::\n\n IMPORTANT: note that this means that each feature map is\n contiguous in memory.\n\n The memory layout will therefore be:\n [ <feature_map_0> <feature_map_1> ... <feature_map_n>],\n where <feature_map> represents a \"feature map\" in raster order\n\n Note that the concept of feature map doesn't really apply to\n sparse filters without weight sharing. Basically, nkern=1 will\n generate one output img/feature map, nkern=2 a second feature map,\n etc.\n\n kerns is a 1D tensor, and assume to be of shape:\n\n .. code-block:: python\n\n nkern * N.prod(outshp) x N.prod(kshp)\n\n Each filter is applied seperately to consecutive output pixels.\n\n :param kerns: nkern*outsize*ksize vector containing kernels\n :param kshp: tuple containing actual dimensions of kernel (not symbolic)\n :param nkern: number of kernels to apply at each pixel in the\n input image. nkern=1 will apply a single unique\n filter for each input pixel.\n :param images: bsize x imgsize matrix containing images on which\n to apply filters\n :param imgshp: tuple containing actual image dimensions (not symbolic)\n :param step: determines number of pixels between adjacent receptive fields\n (tuple containing dx,dy values)\n :param mode: 'full', 'valid' see CSM.evaluate function for details\n :return: out1, symbolic result\n :return: out2, logical shape of the output img (nkern,height,width)\n (after dot product, not of the sparse matrix!)\n \"\"\"\n\n # inshp contains either 2 entries (height,width) or 3 (nfeatures,h,w)\n # in the first case, default nfeatures to 1\n if numpy.size(imgshp) == 2:\n imgshp = (1,) + imgshp\n\n # construct indices and index pointers for sparse matrix\n indices, indptr, spmat_shape, sptype, outshp, kmap = \\\n convolution_indices.sparse_eval(imgshp, kshp, nkern, step, mode)\n\n # build a sparse weight matrix\n sparsew = theano.sparse.CSM(sptype, kmap)(kerns, indices,\n indptr, spmat_shape)\n output = sparse.structured_dot(sparsew, images.T).T\n if bias is not None:\n output += bias\n\n return output, numpy.hstack((nkern, outshp))\n\n\ndef convolve(kerns, kshp, nkern, images, imgshp, step=(1, 1), bias=None,\n mode='valid', flatten=True):\n \"\"\"Convolution implementation by sparse matrix multiplication.\n\n :note: For best speed, put the matrix which you expect to be\n smaller as the 'kernel' argument\n\n \"images\" is assumed to be a matrix of shape batch_size x img_size,\n where the second dimension represents each image in raster order\n\n If flatten is \"False\", the output feature map will have shape:\n\n .. code-block:: python\n\n batch_size x number of kernels x output_size\n\n If flatten is \"True\", the output feature map will have shape:\n\n .. code-block:: python\n\n batch_size x number of kernels * output_size\n\n .. note::\n\n IMPORTANT: note that this means that each feature map (image\n generate by each kernel) is contiguous in memory. The memory\n layout will therefore be: [ <feature_map_0> <feature_map_1>\n ... <feature_map_n>], where <feature_map> represents a\n \"feature map\" in raster order\n\n kerns is a 2D tensor of shape nkern x N.prod(kshp)\n\n :param kerns: 2D tensor containing kernels which are applied at every pixel\n :param kshp: tuple containing actual dimensions of kernel (not symbolic)\n :param nkern: number of kernels/filters to apply.\n nkern=1 will apply one common filter to all input pixels\n :param images: tensor containing images on which to apply convolution\n :param imgshp: tuple containing image dimensions\n :param step: determines number of pixels between adjacent receptive fields\n (tuple containing dx,dy values)\n :param mode: 'full', 'valid' see CSM.evaluate function for details\n :param sumdims: dimensions over which to sum for the tensordot operation.\n By default ((2,),(1,)) assumes kerns is a nkern x kernsize\n matrix and images is a batchsize x imgsize matrix\n containing flattened images in raster order\n :param flatten: flatten the last 2 dimensions of the output. By default,\n instead of generating a batchsize x outsize x nkern tensor,\n will flatten to batchsize x outsize*nkern\n\n :return: out1, symbolic result\n :return: out2, logical shape of the output img (nkern,heigt,width)\n\n :TODO: test for 1D and think of how to do n-d convolutions\n \"\"\"\n N = numpy\n # start by computing output dimensions, size, etc\n kern_size = N.int64(N.prod(kshp))\n\n # inshp contains either 2 entries (height,width) or 3 (nfeatures,h,w)\n # in the first case, default nfeatures to 1\n if N.size(imgshp) == 2:\n imgshp = (1,) + imgshp\n\n # construct indices and index pointers for sparse matrix, which,\n # when multiplied with input images will generate a stack of image\n # patches\n indices, indptr, spmat_shape, sptype, outshp = \\\n convolution_indices.conv_eval(imgshp, kshp, step, mode)\n\n # build sparse matrix, then generate stack of image patches\n csc = theano.sparse.CSM(sptype)(N.ones(indices.size), indices,\n indptr, spmat_shape)\n patches = (sparse.structured_dot(csc, images.T)).T\n\n # compute output of linear classifier\n pshape = tensor.stack([images.shape[0] * tensor.as_tensor(N.prod(outshp)),\\\n tensor.as_tensor(imgshp[0] * kern_size)])\n patch_stack = tensor.reshape(patches, pshape, ndim=2)\n\n # kern is of shape: nkern x ksize*number_of_input_features\n # output is thus of shape: bsize*outshp x nkern\n output = tensor.dot(patch_stack, kerns.T)\n\n # add bias across each feature map (more efficient to do it now)\n if bias is not None:\n output += bias\n\n # now to have feature maps in raster order ...\n # go from bsize*outshp x nkern to bsize x nkern*outshp\n newshp = tensor.stack([images.shape[0],\\\n tensor.as_tensor(N.prod(outshp)),\\\n tensor.as_tensor(nkern)])\n tensout = tensor.reshape(output, newshp, ndim=3)\n output = tensor.DimShuffle((False,) * tensout.ndim, (0, 2, 1))(tensout)\n if flatten:\n output = tensor.flatten(output, 2)\n\n return output, N.hstack((nkern, outshp))\n\n\ndef max_pool(images, imgshp, maxpoolshp):\n \"\"\"Implements a max pooling layer\n\n Takes as input a 2D tensor of shape batch_size x img_size and\n performs max pooling. Max pooling downsamples by taking the max\n value in a given area, here defined by maxpoolshp. Outputs a 2D\n tensor of shape batch_size x output_size.\n\n :param images: 2D tensor containing images on which to apply convolution.\n Assumed to be of shape batch_size x img_size\n :param imgshp: tuple containing image dimensions\n :param maxpoolshp: tuple containing shape of area to max pool over\n\n :return: out1, symbolic result (2D tensor)\n :return: out2, logical shape of the output\n \"\"\"\n N = numpy\n poolsize = N.int64(N.prod(maxpoolshp))\n\n # imgshp contains either 2 entries (height,width) or 3 (nfeatures,h,w)\n # in the first case, default nfeatures to 1\n if N.size(imgshp) == 2:\n imgshp = (1,) + imgshp\n\n # construct indices and index pointers for sparse matrix, which,\n # when multiplied with input images will generate a stack of image\n # patches\n indices, indptr, spmat_shape, sptype, outshp = \\\n convolution_indices.conv_eval(imgshp, maxpoolshp,\n maxpoolshp, mode='valid')\n\n# print 'XXXXXXXXXXXXXXXX MAX POOLING LAYER XXXXXXXXXXXXXXXXXXXX'\n# print 'imgshp = ', imgshp\n# print 'maxpoolshp = ', maxpoolshp\n# print 'outshp = ', outshp\n\n # build sparse matrix, then generate stack of image patches\n csc = theano.sparse.CSM(sptype)(N.ones(indices.size), indices,\n indptr, spmat_shape)\n patches = sparse.structured_dot(csc, images.T).T\n\n pshape = tensor.stack([images.shape[0] *\\\n tensor.as_tensor(N.prod(outshp)),\n tensor.as_tensor(imgshp[0]),\n tensor.as_tensor(poolsize)])\n patch_stack = tensor.reshape(patches, pshape, ndim=3)\n\n out1 = tensor.max(patch_stack, axis=2)\n\n pshape = tensor.stack([images.shape[0],\n tensor.as_tensor(N.prod(outshp)),\n tensor.as_tensor(imgshp[0])])\n out2 = tensor.reshape(out1, pshape, ndim=3)\n\n out3 = tensor.DimShuffle(out2.broadcastable, (0, 2, 1))(out2)\n\n return tensor.flatten(out3, 2), outshp\n",
"from __future__ import print_function\nimport os\nimport unittest\nimport sys\n\nfrom numpy.testing.nosetester import NoseTester\n\n\n# This class contains code adapted from NumPy,\n# numpy/testing/nosetester.py,\n# Copyright (c) 2005-2011, NumPy Developers\nclass TheanoNoseTester(NoseTester):\n \"\"\"\n Nose test runner.\n\n This class enables running nose tests from inside Theano,\n by calling theano.test().\n This version is more adapted to what we want than Numpy's one.\n \"\"\"\n\n def _test_argv(self, verbose, extra_argv):\n \"\"\"\n Generate argv for nosetest command\n\n :type verbose: int\n :param verbose: Verbosity value for test outputs, in the range 1-10.\n Default is 1.\n\n :type extra_argv: list\n :param extra_argv: List with any extra arguments to pass to nosetests.\n \"\"\"\n # self.package_path = os.path.abspath(self.package_path)\n argv = [__file__, self.package_path]\n argv += ['--verbosity', str(verbose)]\n if extra_argv:\n argv += extra_argv\n return argv\n\n def _show_system_info(self):\n import theano\n print(\"Theano version %s\" % theano.__version__)\n theano_dir = os.path.dirname(theano.__file__)\n print(\"theano is installed in %s\" % theano_dir)\n\n super(TheanoNoseTester, self)._show_system_info()\n\n def prepare_test_args(self, verbose=1, extra_argv=None, coverage=False,\n capture=True, knownfailure=True):\n \"\"\"\n Prepare arguments for the `test` method.\n\n Takes the same arguments as `test`.\n \"\"\"\n import nose.plugins.builtin\n # compile argv\n argv = self._test_argv(verbose, extra_argv)\n\n # numpy way of doing coverage\n if coverage:\n argv += ['--cover-package=%s' % self.package_name,\n '--with-coverage', '--cover-tests',\n '--cover-inclusive', '--cover-erase']\n\n # Capture output only if needed\n if not capture:\n argv += ['-s']\n\n # construct list of plugins\n plugins = []\n if knownfailure:\n from numpy.testing.noseclasses import KnownFailure\n plugins.append(KnownFailure())\n plugins += [p() for p in nose.plugins.builtin.plugins]\n\n return argv, plugins\n\n def test(self, verbose=1, extra_argv=None, coverage=False, capture=True,\n knownfailure=True):\n \"\"\"\n Run tests for module using nose.\n\n :type verbose: int\n :param verbose: Verbosity value for test outputs, in the range 1-10.\n Default is 1.\n\n :type extra_argv: list\n :param extra_argv: List with any extra arguments to pass to nosetests.\n\n :type coverage: bool\n :param coverage: If True, report coverage of Theano\n code. Default is False.\n\n :type capture: bool\n :param capture: If True, capture the standard output of the tests, like\n nosetests does in command-line. The output of failing\n tests will be displayed at the end. Default is True.\n\n :type knownfailure: bool\n :param knownfailure: If True, tests raising KnownFailureTest will\n not be considered Errors nor Failure, but reported as\n \"known failures\" and treated quite like skipped tests.\n Default is True.\n\n :returns: Returns the result of running the tests as a\n ``nose.result.TextTestResult`` object.\n \"\"\"\n from nose.config import Config\n from nose.plugins.manager import PluginManager\n from numpy.testing.noseclasses import NumpyTestProgram\n # Many Theano tests suppose device=cpu, so we need to raise an\n # error if device==gpu.\n if not os.path.exists('theano/__init__.py'):\n try:\n from theano import config\n if config.device != \"cpu\":\n raise ValueError(\"Theano tests must be run with device=cpu.\"\n \" This will also run GPU tests when possible.\\n\"\n \" If you want GPU-related tests to run on a\"\n \" specific GPU device, and not the default one,\"\n \" you should use the init_gpu_device theano flag.\")\n except ImportError:\n pass\n\n # cap verbosity at 3 because nose becomes *very* verbose beyond that\n verbose = min(verbose, 3)\n self._show_system_info()\n\n cwd = os.getcwd()\n if self.package_path in os.listdir(cwd):\n # The tests give weird errors if the package to test is\n # in current directory.\n raise RuntimeError((\n \"This function does not run correctly when, at the time \"\n \"theano was imported, the working directory was theano's \"\n \"parent directory. You should exit your Python prompt, change \"\n \"directory, then launch Python again, import theano, then \"\n \"launch theano.test().\"))\n\n argv, plugins = self.prepare_test_args(verbose, extra_argv, coverage,\n capture, knownfailure)\n\n # The \"plugins\" keyword of NumpyTestProgram gets ignored if config is\n # specified. Moreover, using \"addplugins\" instead can lead to strange\n # errors. So, we specify the plugins in the Config as well.\n cfg = Config(includeExe=True, plugins=PluginManager(plugins=plugins))\n t = NumpyTestProgram(argv=argv, exit=False, config=cfg)\n return t.result\n\n\ndef main(modulename):\n if 0:\n unittest.main()\n elif len(sys.argv) == 2 and sys.argv[1] == \"--debug\":\n module = __import__(modulename)\n tests = unittest.TestLoader().loadTestsFromModule(module)\n tests.debug()\n elif len(sys.argv) == 1:\n module = __import__(modulename)\n tests = unittest.TestLoader().loadTestsFromModule(module)\n unittest.TextTestRunner(verbosity=2).run(tests)\n else:\n print(\"options: [--debug]\")\n",
"\"\"\"\nThis module provides the Scan Op.\n\nScanning is a general form of recurrence, which can be used for looping.\nThe idea is that you *scan* a function along some input sequence, producing\nan output at each time-step that can be seen (but not modified) by the\nfunction at the next time-step. (Technically, the function can see the\nprevious K time-steps of your outputs and L time steps (from past and\nfuture) of your inputs.\n\nSo for example, ``sum()`` could be computed by scanning the ``z+x_i``\nfunction over a list, given an initial state of ``z=0``.\n\nSpecial cases:\n\n* A *reduce* operation can be performed by using only the last\n output of a ``scan``.\n* A *map* operation can be performed by applying a function that\n ignores previous steps of the outputs.\n\nOften a for-loop or while-loop can be expressed as a ``scan()`` operation,\nand ``scan`` is the closest that theano comes to looping. The advantages\nof using ``scan`` over `for` loops in python (amongs other) are:\n\n* it allows the number of iterations to be part of the symbolic graph\n* it allows computing gradients through the for loop\n* there exist a bunch of optimizations that help re-write your loop\nsuch that less memory is used and that it runs faster\n* it ensures that data is not copied from host to gpu and gpu to\nhost at each step\n\nThe Scan Op should typically be used by calling any of the following\nfunctions: ``scan()``, ``map()``, ``reduce()``, ``foldl()``,\n``foldr()``.\n\n\"\"\"\n__docformat__ = 'restructedtext en'\n__authors__ = (\"Razvan Pascanu \"\n \"Frederic Bastien \"\n \"James Bergstra \"\n \"Pascal Lamblin \")\n__copyright__ = \"(c) 2010, Universite de Montreal\"\n__contact__ = \"Razvan Pascanu <r.pascanu@gmail>\"\n\n\nimport logging\nimport numpy\nimport warnings\n\nfrom theano.compat import ifilter, izip\nfrom six import iteritems\nfrom six.moves import xrange\nfrom theano.compile import SharedVariable, function\nfrom theano import compile\nfrom theano import gof\nfrom theano.tensor import opt\nfrom theano import tensor\nfrom theano import config\nfrom theano.updates import OrderedUpdates\nfrom theano.compile import ops\nfrom theano.compat import OrderedDict\n\n\nfrom theano.scan_module import scan_op\nfrom theano.scan_module import scan_utils\nfrom theano.scan_module.scan_utils import safe_new, traverse\n\n# Logging function for sending warning or info\n_logger = logging.getLogger('theano.scan_module.scan')\n\n\ndef scan(fn,\n sequences=None,\n outputs_info=None,\n non_sequences=None,\n n_steps=None,\n truncate_gradient=-1,\n go_backwards=False,\n mode=None,\n name=None,\n profile=False,\n allow_gc=None,\n strict=False):\n \"\"\"\n This function constructs and applies a Scan op to the provided\n arguments.\n\n Parameters\n ----------\n fn\n ``fn`` is a function that describes the operations involved in one\n step of ``scan``. ``fn`` should construct variables describing the\n output of one iteration step. It should expect as input theano\n variables representing all the slices of the input sequences\n and previous values of the outputs, as well as all other arguments\n given to scan as ``non_sequences``. The order in which scan passes\n these variables to ``fn`` is the following :\n\n * all time slices of the first sequence\n * all time slices of the second sequence\n * ...\n * all time slices of the last sequence\n * all past slices of the first output\n * all past slices of the second otuput\n * ...\n * all past slices of the last output\n * all other arguments (the list given as `non_sequences` to\n scan)\n\n The order of the sequences is the same as the one in the list\n `sequences` given to scan. The order of the outputs is the same\n as the order of ``outputs_info``. For any sequence or output the\n order of the time slices is the same as the one in which they have\n been given as taps. For example if one writes the following :\n\n .. code-block:: python\n\n scan(fn, sequences = [ dict(input= Sequence1, taps = [-3,2,-1])\n , Sequence2\n , dict(input = Sequence3, taps = 3) ]\n , outputs_info = [ dict(initial = Output1, taps = [-3,-5])\n , dict(initial = Output2, taps = None)\n , Output3 ]\n , non_sequences = [ Argument1, Argument2])\n\n ``fn`` should expect the following arguments in this given order:\n\n #. ``Sequence1[t-3]``\n #. ``Sequence1[t+2]``\n #. ``Sequence1[t-1]``\n #. ``Sequence2[t]``\n #. ``Sequence3[t+3]``\n #. ``Output1[t-3]``\n #. ``Output1[t-5]``\n #. ``Output3[t-1]``\n #. ``Argument1``\n #. ``Argument2``\n\n The list of ``non_sequences`` can also contain shared variables\n used in the function, though ``scan`` is able to figure those\n out on its own so they can be skipped. For the clarity of the\n code we recommend though to provide them to scan. To some extend\n ``scan`` can also figure out other ``non sequences`` (not shared)\n even if not passed to scan (but used by `fn`). A simple example of\n this would be :\n\n .. code-block:: python\n\n import theano.tensor as TT\n W = TT.matrix()\n W_2 = W**2\n def f(x):\n return TT.dot(x,W_2)\n\n The function is expected to return two things. One is a list of\n outputs ordered in the same order as ``outputs_info``, with the\n difference that there should be only one output variable per\n output initial state (even if no tap value is used). Secondly\n `fn` should return an update dictionary (that tells how to\n update any shared variable after each iteration step). The\n dictionary can optionally be given as a list of tuples. There is\n no constraint on the order of these two list, ``fn`` can return\n either ``(outputs_list, update_dictionary)`` or\n ``(update_dictionary, outputs_list)`` or just one of the two (in\n case the other is empty).\n\n To use ``scan`` as a while loop, the user needs to change the\n function ``fn`` such that also a stopping condition is returned.\n To do so, he/she needs to wrap the condition in an ``until`` class.\n The condition should be returned as a third element, for example:\n\n .. code-block:: python\n\n ...\n return [y1_t, y2_t], {x:x+1}, theano.scan_module.until(x < 50)\n\n Note that a number of steps (considered in here as the maximum\n number of steps ) is still required even though a condition is\n passed (and it is used to allocate memory if needed). = {}):\n\n sequences\n ``sequences`` is the list of Theano variables or dictionaries\n describing the sequences ``scan`` has to iterate over. If a\n sequence is given as wrapped in a dictionary, then a set of optional\n information can be provided about the sequence. The dictionary\n should have the following keys:\n\n * ``input`` (*mandatory*) -- Theano variable representing the\n sequence.\n\n * ``taps`` -- Temporal taps of the sequence required by ``fn``.\n They are provided as a list of integers, where a value ``k``\n impiles that at iteration step ``t`` scan will pass to ``fn``\n the slice ``t+k``. Default value is ``[0]``\n\n Any Theano variable in the list ``sequences`` is automatically\n wrapped into a dictionary where ``taps`` is set to ``[0]``\n\n outputs_info\n ``outputs_info`` is the list of Theano variables or dictionaries\n describing the initial state of the outputs computed\n recurrently. When this initial states are given as dictionary\n optional information can be provided about the output corresponding\n to these initial states. The dictionary should have the following\n keys:\n\n * ``initial`` -- Theano variable that represents the initial\n state of a given output. In case the output is not computed\n recursively (think of a map) and does not require an initial\n state this field can be skipped. Given that (only) the previous\n time step of the output is used by ``fn``, the initial state\n **should have the same shape** as the output and **should not\n involve a downcast** of the data type of the output. If multiple\n time taps are used, the initial state should have one extra\n dimension that should cover all the possible taps. For example\n if we use ``-5``, ``-2`` and ``-1`` as past taps, at step 0,\n ``fn`` will require (by an abuse of notation) ``output[-5]``,\n ``output[-2]`` and ``output[-1]``. This will be given by\n the initial state, which in this case should have the shape\n (5,)+output.shape. If this variable containing the initial\n state is called ``init_y`` then ``init_y[0]`` *corresponds to*\n ``output[-5]``. ``init_y[1]`` *correponds to* ``output[-4]``,\n ``init_y[2]`` corresponds to ``output[-3]``, ``init_y[3]``\n coresponds to ``output[-2]``, ``init_y[4]`` corresponds to\n ``output[-1]``. While this order might seem strange, it comes\n natural from splitting an array at a given point. Assume that\n we have a array ``x``, and we choose ``k`` to be time step\n ``0``. Then our initial state would be ``x[:k]``, while the\n output will be ``x[k:]``. Looking at this split, elements in\n ``x[:k]`` are ordered exactly like those in ``init_y``.\n * ``taps`` -- Temporal taps of the output that will be pass to\n ``fn``. They are provided as a list of *negative* integers,\n where a value ``k`` implies that at iteration step ``t`` scan\n will pass to ``fn`` the slice ``t+k``.\n\n ``scan`` will follow this logic if partial information is given:\n\n * If an output is not wrapped in a dictionary, ``scan`` will wrap\n it in one assuming that you use only the last step of the output\n (i.e. it makes your tap value list equal to [-1]).\n * If you wrap an output in a dictionary and you do not provide any\n taps but you provide an initial state it will assume that you are\n using only a tap value of -1.\n * If you wrap an output in a dictionary but you do not provide any\n initial state, it assumes that you are not using any form of\n taps.\n * If you provide a ``None`` instead of a variable or a empty\n dictionary ``scan`` assumes that you will not use any taps for\n this output (like for example in case of a map)\n\n If ``outputs_info`` is an empty list or None, ``scan`` assumes\n that no tap is used for any of the outputs. If information is\n provided just for a subset of the outputs an exception is\n raised (because there is no convention on how scan should map\n the provided information to the outputs of ``fn``)\n\n non_sequences\n ``non_sequences`` is the list of arguments that are passed to\n ``fn`` at each steps. One can opt to exclude variable\n used in ``fn`` from this list as long as they are part of the\n computational graph, though for clarity we encourage not to do so.\n\n n_steps\n ``n_steps`` is the number of steps to iterate given as an int\n or Theano scalar. If any of the input sequences do not have\n enough elements, scan will raise an error. If the *value is 0* the\n outputs will have *0 rows*. If the value is negative, ``scan``\n will run backwards in time. If the ``go_backwards`` flag is already\n set and also ``n_steps`` is negative, ``scan`` will run forward\n in time. If n_steps is not provided, ``scan`` will figure\n out the amount of steps it should run given its input sequences.\n\n truncate_gradient\n ``truncate_gradient`` is the number of steps to use in truncated\n BPTT. If you compute gradients through a scan op, they are\n computed using backpropagation through time. By providing a\n different value then -1, you choose to use truncated BPTT instead\n of classical BPTT, where you go for only ``truncate_gradient``\n number of steps back in time.\n\n go_backwards\n ``go_backwards`` is a flag indicating if ``scan`` should go\n backwards through the sequences. If you think of each sequence\n as indexed by time, making this flag True would mean that\n ``scan`` goes back in time, namely that for any sequence it\n starts from the end and goes towards 0.\n\n name\n When profiling ``scan``, it is crucial to provide a name for any\n instance of ``scan``. The profiler will produce an overall\n profile of your code as well as profiles for the computation of\n one step of each instance of ``scan``. The ``name`` of the instance\n appears in those profiles and can greatly help to disambiguate\n information.\n\n mode\n It is recommended to leave this argument to None, especially\n when profiling ``scan`` (otherwise the results are not going to\n be accurate). If you prefer the computations of one step of\n ``scan`` to be done differently then the entire function, you\n can use this parameter to describe how the computations in this\n loop are done (see ``theano.function`` for details about\n possible values and their meaning).\n\n profile\n Flag or string. If true, or different from the empty string, a\n profile object will be created and attached to the inner graph of\n scan. In case ``profile`` is True, the profile object will have the\n name of the scan instance, otherwise it will have the passed string.\n Profile object collect (and print) information only when running the\n inner graph with the new cvm linker ( with default modes,\n other linkers this argument is useless)\n\n allow_gc\n Set the value of allow gc for the internal graph of scan. If\n set to None, this will use the value of config.scan.allow_gc.\n\n strict\n If true, all the shared variables used in ``fn`` must be provided as a\n part of ``non_sequences`` or ``sequences``.\n\n Returns\n -------\n tuple\n Tuple of the form (outputs, updates); ``outputs`` is either a\n Theano variable or a list of Theano variables representing the\n outputs of ``scan`` (in the same order as in ``outputs_info``).\n ``updates`` is a subclass of dictionary specifying the update rules for\n all shared variables used in scan.\n This dictionary should be passed to ``theano.function`` when you compile\n your function. The change compared to a normal dictionary is that we\n validate that keys are SharedVariable and addition of those dictionary\n are validated to be consistent.\n\n \"\"\"\n # General observation : this code is executed only once, at creation\n # of the computational graph, so we don't yet need to be smart about\n # anything (to speed things up)\n\n ##\n # Step 1. Wrap all inputs in dictionaries and add default values\n ##\n\n # check if inputs are just single variables instead of lists\n def wrap_into_list(x):\n \"\"\"\n Wrap the input into a list if it is not already a list.\n\n \"\"\"\n if x is None:\n return []\n elif not isinstance(x, (list, tuple)):\n return [x]\n else:\n return list(x)\n\n seqs = wrap_into_list(sequences)\n outs_info = wrap_into_list(outputs_info)\n\n # Make sure we get rid of numpy arrays or ints or anything like that\n # passed as inputs to scan\n non_seqs = []\n for elem in wrap_into_list(non_sequences):\n if not isinstance(elem, gof.Variable):\n non_seqs.append(tensor.as_tensor_variable(elem))\n else:\n non_seqs.append(elem)\n\n # If we provided a known number of steps ( before compilation)\n # and if that number is 1 or -1, then we can skip the Scan Op,\n # and just apply the inner function once\n # To do that we check here to see the nature of n_steps\n n_fixed_steps = None\n\n if isinstance(n_steps, (float, int)):\n n_fixed_steps = int(n_steps)\n else:\n try:\n n_fixed_steps = opt.get_scalar_constant_value(n_steps)\n except tensor.basic.NotScalarConstantError:\n n_fixed_steps = None\n\n # Check n_steps is an int\n if (hasattr(n_steps, 'dtype') and\n str(n_steps.dtype)[:3] not in ('uin', 'int')):\n raise ValueError(' n_steps must be an int. dtype provided '\n 'is %s' % n_steps.dtype)\n\n # compute number of sequences and number of outputs\n n_seqs = len(seqs)\n n_outs = len(outs_info)\n\n return_steps = OrderedDict()\n # wrap sequences in a dictionary if they are not already dictionaries\n for i in xrange(n_seqs):\n if not isinstance(seqs[i], dict):\n seqs[i] = OrderedDict([('input', seqs[i]), ('taps', [0])])\n elif seqs[i].get('taps', None) is not None:\n seqs[i]['taps'] = wrap_into_list(seqs[i]['taps'])\n elif seqs[i].get('taps', None) is None:\n # seqs dictionary does not have the ``taps`` key\n seqs[i]['taps'] = [0]\n\n # wrap outputs info in a dictionary if they are not already in one\n for i in xrange(n_outs):\n if outs_info[i] is not None:\n if isinstance(outs_info[i], dict):\n # DEPRECATED :\n if outs_info[i].get('return_steps', None) is not None:\n raise ValueError(\n \"Using `return_steps` has been deprecated. \"\n \"Simply select the entries you need using a \"\n \"subtensor. Scan will optimize memory \"\n \"consumption, so do not worry about that.\")\n # END\n\n if not isinstance(outs_info[i], dict):\n # by default any output has a tap value of -1\n outs_info[i] = OrderedDict([('initial', outs_info[i]), ('taps', [-1])])\n elif (outs_info[i].get('initial', None) is None and\n outs_info[i].get('taps', None) is not None):\n # ^ no initial state but taps provided\n raise ValueError(('If you are using slices of an output '\n 'you need to provide a initial state '\n 'for it'), outs_info[i])\n elif (outs_info[i].get('initial', None) is not None and\n outs_info[i].get('taps', None) is None):\n # ^ initial state but taps not provided\n if 'taps' in outs_info[i]:\n # ^ explicitly provided a None for taps\n _logger.warning('Output %s ( index %d) has a initial '\n 'state but taps is explicitly set to None ',\n getattr(outs_info[i]['initial'], 'name', 'None'),\n i)\n outs_info[i]['taps'] = [-1]\n else:\n # if a None is provided as the output info we replace it\n # with an empty OrdereDict() to simplify handling\n outs_info[i] = OrderedDict()\n\n ##\n # Step 2. Generate inputs and outputs of the inner functions\n # for compiling a dummy function (Iteration #1)\n ##\n\n # create theano inputs for the recursive function\n # note : this is a first batch of possible inputs that will\n # be compiled in a dummy function; we used this dummy\n # function to detect shared variables and their updates\n # and to construct a new and complete list of inputs and\n # outputs\n\n n_seqs = 0\n scan_seqs = [] # Variables passed as inputs to the scan op\n inner_seqs = [] # Variables passed as inputs to the inner function\n inner_slices = [] # Actual slices if scan is removed from the picture\n # go through sequences picking up time slices as needed\n for i, seq in enumerate(seqs):\n # Note that you can have something like no taps for\n # a sequence, though is highly unlikely in practice\n if 'taps' in seq:\n # go through the indicated slice\n mintap = numpy.min(seq['taps'])\n maxtap = numpy.max(seq['taps'])\n for k in seq['taps']:\n # create one slice of the input\n # Later on, if we decide not to use scan because we are\n # going for just one step, it makes things easier if we\n # compute the correct outputs here. This way we can use\n # the output of the lambda expression directly to replace\n # the output of scan.\n\n # If not we need to use copies, that will be replaced at\n # each frame by the corresponding slice\n actual_slice = seq['input'][k - mintap]\n _seq_val = tensor.as_tensor_variable(seq['input'])\n _seq_val_slice = _seq_val[k - mintap]\n nw_slice = _seq_val_slice.type()\n\n # Try to transfer test_value to the new variable\n if config.compute_test_value != 'off':\n try:\n nw_slice.tag.test_value = gof.Op._get_test_value(\n _seq_val_slice)\n except AttributeError as e:\n if config.compute_test_value != 'ignore':\n # No need to print a warning or raise an error now,\n # it will be done when fn will be called.\n _logger.info(('Cannot compute test value for '\n 'the inner function of scan, input value '\n 'missing %s'), e)\n\n # Add names to slices for debugging and pretty printing ..\n # that is if the input already has a name\n if getattr(seq['input'], 'name', None) is not None:\n if k > 0:\n nw_name = seq['input'].name + '[t+%d]' % k\n elif k == 0:\n nw_name = seq['input'].name + '[t]'\n else:\n nw_name = seq['input'].name + '[t%d]' % k\n nw_slice.name = nw_name\n\n # We cut the sequence such that seq[i] to correspond to\n # seq[i-k]. For the purposes of cutting the sequences, we\n # need to pretend tap 0 is used to avoid cutting the sequences\n # too long if the taps are all lower or all higher than 0.\n maxtap_proxy = max(maxtap, 0)\n mintap_proxy = min(mintap, 0)\n start = (k - mintap_proxy)\n if k == maxtap_proxy:\n nw_seq = seq['input'][start:]\n else:\n end = -(maxtap_proxy - k)\n nw_seq = seq['input'][start:end]\n\n if go_backwards:\n nw_seq = nw_seq[::-1]\n\n scan_seqs.append(nw_seq)\n inner_seqs.append(nw_slice)\n inner_slices.append(actual_slice)\n n_seqs += 1\n\n # Since we've added all sequences now we need to level them up based on\n # n_steps or their different shapes\n lengths_vec = []\n for seq in scan_seqs:\n lengths_vec.append(seq.shape[0])\n\n if not scan_utils.isNaN_or_Inf_or_None(n_steps):\n # ^ N_steps should also be considered\n lengths_vec.append(tensor.as_tensor(n_steps))\n\n if len(lengths_vec) == 0:\n # ^ No information about the number of steps\n raise ValueError('No information about the number of steps '\n 'provided. Either provide a value for '\n 'n_steps argument of scan or provide an input '\n 'sequence')\n\n # If the user has provided the number of steps, do that regardless ( and\n # raise an error if the sequences are not long enough )\n if scan_utils.isNaN_or_Inf_or_None(n_steps):\n actual_n_steps = lengths_vec[0]\n for contestant in lengths_vec[1:]:\n actual_n_steps = tensor.minimum(actual_n_steps, contestant)\n else:\n actual_n_steps = tensor.as_tensor(n_steps)\n\n # Add names -- it helps a lot when debugging\n\n for (nw_seq, seq) in zip(scan_seqs, seqs):\n if getattr(seq['input'], 'name', None) is not None:\n nw_seq.name = seq['input'].name + '[%d:]' % k\n\n scan_seqs = [seq[:actual_n_steps] for seq in scan_seqs]\n # Conventions :\n # mit_mot = multiple input taps, multiple output taps ( only provided\n # by the gradient function )\n # mit_sot = multiple input taps, single output tap (t + 0)\n # sit_sot = single input tap, single output tap (t + 0)\n # nit_sot = no input tap, single output tap (t + 0)\n\n # MIT_MOT -- not provided by the user only by the grad function\n n_mit_mot = 0\n n_mit_mot_outs = 0\n mit_mot_scan_inputs = []\n mit_mot_inner_inputs = []\n mit_mot_inner_outputs = []\n mit_mot_out_slices = []\n mit_mot_rightOrder = []\n\n # SIT_SOT -- provided by the user\n n_mit_sot = 0\n mit_sot_scan_inputs = []\n mit_sot_inner_inputs = []\n mit_sot_inner_slices = []\n mit_sot_inner_outputs = []\n mit_sot_return_steps = OrderedDict()\n mit_sot_tap_array = []\n mit_sot_rightOrder = []\n\n n_sit_sot = 0\n sit_sot_scan_inputs = []\n sit_sot_inner_inputs = []\n sit_sot_inner_slices = []\n sit_sot_inner_outputs = []\n sit_sot_return_steps = OrderedDict()\n sit_sot_rightOrder = []\n\n # go through outputs picking up time slices as needed\n for i, init_out in enumerate(outs_info):\n # Note that our convention dictates that if an output uses\n # just the previous time step, as a initial state we will only\n # provide a tensor of the same dimension as one time step; This\n # makes code much cleaner for those who do not use taps. Otherwise\n # they would always had to shape_padleft the initial state ..\n # which is ugly\n if init_out.get('taps', None) == [-1]:\n\n actual_arg = init_out['initial']\n if not isinstance(actual_arg, tensor.Variable):\n actual_arg = tensor.as_tensor_variable(actual_arg)\n arg = safe_new(actual_arg)\n if isinstance(arg, tensor.Constant):\n # safe new returns a clone of the constants, but that is not\n # what we need for initial states\n arg = arg.type()\n\n # Try to transfer test_value to the new variable\n if config.compute_test_value != 'off':\n try:\n arg.tag.test_value = gof.Op._get_test_value(actual_arg)\n except AttributeError as e:\n if config.compute_test_value != 'ignore':\n # No need to print a warning or raise an error now,\n # it will be done when fn will be called.\n _logger.info(('Cannot compute test value for the '\n 'inner function of scan, input value missing %s'),\n e)\n\n if getattr(init_out['initial'], 'name', None) is not None:\n arg.name = init_out['initial'].name + '[t-1]'\n\n # We need now to allocate space for storing the output and copy\n # the initial state over. We do this using the expand function\n # defined in scan utils\n sit_sot_scan_inputs.append(\n scan_utils.expand(\n tensor.unbroadcast(\n tensor.shape_padleft(actual_arg), 0),\n actual_n_steps\n ))\n\n sit_sot_inner_slices.append(actual_arg)\n if i in return_steps:\n sit_sot_return_steps[n_sit_sot] = return_steps[i]\n sit_sot_inner_inputs.append(arg)\n sit_sot_rightOrder.append(i)\n n_sit_sot += 1\n\n elif init_out.get('taps', None):\n\n if numpy.any(numpy.array(init_out.get('taps', [])) > 0):\n # Make sure we do not have requests for future values of a\n # sequence we can not provide such values\n raise ValueError('Can not use future taps of outputs',\n init_out)\n # go through the taps\n mintap = abs(numpy.min(init_out['taps']))\n mit_sot_tap_array.append(init_out['taps'])\n idx_offset = abs(numpy.min(init_out['taps']))\n # Sequence\n mit_sot_scan_inputs.append(\n scan_utils.expand(init_out['initial'][:mintap],\n actual_n_steps))\n\n if i in return_steps:\n mit_sot_return_steps[n_mit_sot] = return_steps[i]\n mit_sot_rightOrder.append(i)\n n_mit_sot += 1\n for k in init_out['taps']:\n # create a new slice\n actual_nw_slice = init_out['initial'][k + mintap]\n _init_out_var = tensor.as_tensor_variable(init_out['initial'])\n _init_out_var_slice = _init_out_var[k + mintap]\n nw_slice = _init_out_var_slice.type()\n\n # Try to transfer test_value to the new variable\n if config.compute_test_value != 'off':\n try:\n nw_slice.tag.test_value = gof.Op._get_test_value(\n _init_out_var_slice)\n except AttributeError as e:\n if config.compute_test_value != 'ignore':\n # No need to print a warning or raise an error now,\n # it will be done when fn will be called.\n _logger.info(('Cannot compute test value for '\n 'the inner function of scan, input value '\n 'missing. %s'), e)\n\n # give it a name or debugging and pretty printing\n if getattr(init_out['initial'], 'name', None) is not None:\n if k > 0:\n nw_slice.name = (init_out['initial'].name +\n '[t+%d]' % k)\n elif k == 0:\n nw_slice.name = init_out['initial'].name + '[t]'\n else:\n nw_slice.name = (init_out['initial'].name +\n '[t%d]' % k)\n mit_sot_inner_inputs.append(nw_slice)\n mit_sot_inner_slices.append(actual_nw_slice)\n # NOTE: there is another case, in which we do not want to provide\n # any previous value of the output to the inner function (i.e.\n # a map); in that case we do not have to do anything ..\n\n # Re-order args\n max_mit_sot = numpy.max([-1] + mit_sot_rightOrder) + 1\n max_sit_sot = numpy.max([-1] + sit_sot_rightOrder) + 1\n n_elems = numpy.max([max_mit_sot, max_sit_sot])\n _ordered_args = [[] for x in xrange(n_elems)]\n offset = 0\n for idx in xrange(n_mit_sot):\n n_inputs = len(mit_sot_tap_array[idx])\n if n_fixed_steps in [1, -1]:\n _ordered_args[mit_sot_rightOrder[idx]] = \\\n mit_sot_inner_slices[offset:offset + n_inputs]\n else:\n _ordered_args[mit_sot_rightOrder[idx]] = \\\n mit_sot_inner_inputs[offset:offset + n_inputs]\n offset += n_inputs\n\n for idx in xrange(n_sit_sot):\n if n_fixed_steps in [1, -1]:\n _ordered_args[sit_sot_rightOrder[idx]] = \\\n [sit_sot_inner_slices[idx]]\n else:\n _ordered_args[sit_sot_rightOrder[idx]] = \\\n [sit_sot_inner_inputs[idx]]\n\n ordered_args = []\n for ls in _ordered_args:\n ordered_args += ls\n if n_fixed_steps in [1, -1]:\n args = (inner_slices +\n ordered_args +\n non_seqs)\n\n else:\n args = (inner_seqs +\n ordered_args +\n non_seqs)\n\n # add only the non-shared variables and non-constants to the arguments of\n # the dummy function [ a function should not get shared variables or\n # constants as input ]\n dummy_args = [arg for arg in args\n if (not isinstance(arg, SharedVariable) and\n not isinstance(arg, tensor.Constant))]\n # when we apply the lambda expression we get a mixture of update rules\n # and outputs that needs to be separated\n\n condition, outputs, updates = scan_utils.get_updates_and_outputs(fn(*args))\n if condition is not None:\n as_while = True\n else:\n as_while = False\n ##\n # Step 3. Check if we actually need scan and remove it if we don't\n ##\n\n if n_fixed_steps in [1, -1]:\n # We do not need to use the scan op anymore, so we can just return\n # the outputs and updates we have\n if condition is not None:\n _logger.warning(('When the number of steps is fixed and equal '\n 'to 1, the provided stopping condition, ',\n str(condition), ' is ignored'))\n\n for pos, inner_out in enumerate(outputs):\n # we need to see if we need to pad our sequences with an\n # unbroadcastable dimension; case example : we return an\n # output for which we want all intermediate. If n_steps is 1\n # then, if we return the output as given by the innner function\n # this will represent only a slice and it will have one\n # dimension less.\n if (isinstance(inner_out.type, tensor.TensorType) and\n return_steps.get(pos, 0) != 1):\n outputs[pos] = tensor.unbroadcast(\n tensor.shape_padleft(inner_out), 0)\n if len(outputs) == 1:\n outputs = outputs[0]\n\n return (outputs, updates)\n\n ##\n # Step 4. Compile the dummy function\n ##\n\n # We can now compile a dummy function just to see what shared variable\n # we have and what are their update rules (note that the user has\n # the option not to pass the shared variable to scan, so we need to\n # pick them manually and add them to scan)\n # make the compilation as fast as possible by not applying any\n # optimization or conversion to C [ note this region is not important\n # for performance so we can do stuff as unoptimal as we wish ]\n\n # extract still missing inputs (there still might be so) and add them\n # as non sequences at the end of our args\n fake_nonseqs = [x.type() for x in non_seqs]\n fake_outputs = scan_utils.clone(outputs,\n replace=OrderedDict(izip(non_seqs,\n fake_nonseqs)))\n all_inputs = ifilter(\n lambda x: (isinstance(x, gof.Variable) and\n not isinstance(x, SharedVariable) and\n not isinstance(x, gof.Constant)),\n gof.graph.inputs(fake_outputs))\n extra_inputs = [x for x in all_inputs if x not in args + fake_nonseqs]\n non_seqs += extra_inputs\n # Note we do not use all_inputs directly since the order of variables\n # in args is quite important\n dummy_args += extra_inputs\n\n dummy_outs = outputs\n if condition is not None:\n dummy_outs.append(condition)\n dummy_f = function(dummy_args,\n dummy_outs,\n updates=updates,\n mode=compile.mode.Mode(linker='py',\n optimizer=None),\n on_unused_input='ignore',\n profile=False)\n\n ##\n # Step 5. Re-arange inputs of scan into a more strict order\n ##\n\n # Step 5.0 Check the outputs of the dummy function to see if they\n # match with user provided data\n\n # if the number of outputs to the function does not match the number of\n # assumed outputs until now (provided by the user) there can be\n # only one explanation: No information is provided for any of the\n # outputs (i.e. we are dealing with a map)\n tmp_dummy_f_outs = len(dummy_f.maker.outputs)\n if as_while:\n tmp_dummy_f_outs -= 1\n if not (tmp_dummy_f_outs == n_outs or outs_info == []):\n raise ValueError('Please provide None as outputs_info for '\n 'any output that does not feed back into '\n 'scan (i.e. it behaves like a map) ')\n\n if outs_info == []:\n n_outs = len(dummy_f.maker.outputs)\n if as_while:\n n_outs = n_outs - 1\n outs_info = [OrderedDict() for x in xrange(n_outs)]\n\n # Step 5.1 Outputs with taps different then -1\n\n for i, out in enumerate(outs_info):\n if 'taps' in out and out['taps'] != [-1]:\n mit_sot_inner_outputs.append(outputs[i])\n\n # Step 5.2 Outputs with tap equal to -1\n for i, out in enumerate(outs_info):\n if 'taps' in out and out['taps'] == [-1]:\n sit_sot_inner_outputs.append(outputs[i])\n\n # Step 5.3 Outputs that correspond to update rules of shared variables\n givens = OrderedDict()\n n_shared_outs = 0\n shared_scan_inputs = []\n shared_inner_inputs = []\n shared_inner_outputs = []\n sit_sot_shared = []\n for input in dummy_f.maker.expanded_inputs:\n if isinstance(input.variable, SharedVariable) and input.update:\n new_var = safe_new(input.variable)\n if getattr(input.variable, 'name', None) is not None:\n new_var.name = input.variable.name + '_copy'\n if isinstance(new_var.type, ops.expandable_types):\n sit_sot_inner_inputs.append(new_var)\n sit_sot_scan_inputs.append(\n scan_utils.expand(\n tensor.unbroadcast(\n tensor.shape_padleft(input.variable), 0),\n actual_n_steps))\n tensor_update = tensor.as_tensor_variable(input.update)\n sit_sot_inner_outputs.append(tensor_update)\n # Not that pos is not a negative index. The sign of pos is used\n # as a flag to indicate if this output should be part of the\n # update rules or part of the standard outputs of scan.\n # If `pos` is positive than it corresponds to the standard\n # outputs of scan and it refers to output of index `pos`. If `pos`\n # is negative that it corresponds to update rules of scan and it\n # refers to update rule of index -1 - `pos`.\n sit_sot_rightOrder.append(-1 - len(sit_sot_shared))\n sit_sot_shared.append(input.variable)\n givens[input.variable] = new_var\n\n else:\n shared_inner_inputs.append(new_var)\n shared_scan_inputs.append(input.variable)\n shared_inner_outputs.append(input.update)\n givens[input.variable] = new_var\n n_shared_outs += 1\n n_sit_sot = len(sit_sot_inner_inputs)\n # Step 5.4 Outputs with no taps used in the input\n n_nit_sot = 0\n nit_sot_inner_outputs = []\n nit_sot_return_steps = OrderedDict()\n nit_sot_rightOrder = []\n for i, out in enumerate(outs_info):\n if not 'taps' in out:\n nit_sot_inner_outputs.append(outputs[i])\n if i in return_steps:\n nit_sot_return_steps[n_nit_sot] = return_steps[i]\n nit_sot_rightOrder.append(i)\n n_nit_sot += 1\n\n # Step 5.5 all other arguments including extra inputs\n other_scan_args = []\n other_inner_args = []\n\n other_scan_args += [arg for arg in non_seqs\n if (not isinstance(arg, SharedVariable) and\n not isinstance(arg, tensor.Constant))]\n\n # Step 5.6 all shared variables with no update rules\n other_inner_args += [safe_new(arg, '_copy') for arg in non_seqs\n if (not isinstance(arg, SharedVariable) and\n not isinstance(arg, tensor.Constant))]\n\n givens.update(OrderedDict(izip(other_scan_args, other_inner_args)))\n\n if strict:\n non_seqs_set = set(non_sequences if non_sequences != None else [])\n\n other_shared_scan_args = [arg.variable for arg\n in dummy_f.maker.expanded_inputs\n if (isinstance(arg.variable, SharedVariable) and\n not arg.update and\n arg.variable in non_seqs_set)]\n other_shared_inner_args = [safe_new(arg.variable, '_copy') for arg\n in dummy_f.maker.expanded_inputs\n if (isinstance(arg.variable, SharedVariable) and\n not arg.update and\n arg.variable in non_seqs_set)]\n else:\n other_shared_scan_args = [arg.variable for arg\n in dummy_f.maker.expanded_inputs\n if (isinstance(arg.variable, SharedVariable) and\n not arg.update)]\n other_shared_inner_args = [safe_new(arg.variable, '_copy') for arg\n in dummy_f.maker.expanded_inputs\n if (isinstance(arg.variable, SharedVariable) and\n not arg.update)]\n givens.update(OrderedDict(izip(other_shared_scan_args,\n other_shared_inner_args)))\n\n ##\n # Step 6. Re-order the outputs and clone them replacing things\n # using the givens\n ##\n inner_inputs = (inner_seqs +\n mit_mot_inner_inputs +\n mit_sot_inner_inputs +\n sit_sot_inner_inputs +\n shared_inner_inputs +\n other_shared_inner_args +\n other_inner_args)\n\n inner_outs = (mit_mot_inner_outputs +\n mit_sot_inner_outputs +\n sit_sot_inner_outputs +\n nit_sot_inner_outputs +\n shared_inner_outputs)\n if condition is not None:\n inner_outs.append(condition)\n # Cuda and Gpuarray are imported here, instead of being imported on top of\n # the file because that would force on the user some dependencies that we\n # might do not want to. Currently we are working on removing the\n # dependencies on sandbox code completeley.\n from theano.sandbox import cuda, gpuarray\n if cuda.cuda_available or gpuarray.pygpu_activated:\n # very often we end up in this situation when we want to\n # replace w with w_copy, where w is a GPU variable\n # and w_copy is TensorType. This is caused because shared\n # variables are put on GPU right aways >:| ,\n new_givens = OrderedDict()\n\n for w, w_copy in iteritems(givens):\n if ((isinstance(w.type, cuda.CudaNdarrayType) or\n isinstance(w.type, gpuarray.GpuArrayType)) and\n isinstance(w_copy.type, tensor.TensorType)):\n for o in inner_outs:\n new_givens = traverse(o, w, w_copy, new_givens)\n else:\n new_givens[w] = w_copy\n else:\n new_givens = givens\n\n new_outs = scan_utils.clone(inner_outs, replace=new_givens)\n\n ##\n # Step 7. Create the Scan Op\n ##\n\n tap_array = mit_sot_tap_array + [[-1] for x in xrange(n_sit_sot)]\n if allow_gc is None:\n allow_gc = config.scan.allow_gc\n info = OrderedDict()\n\n info['tap_array'] = tap_array\n info['n_seqs'] = n_seqs\n info['n_mit_mot'] = n_mit_mot\n info['n_mit_mot_outs'] = n_mit_mot_outs\n info['mit_mot_out_slices'] = mit_mot_out_slices\n info['n_mit_sot'] = n_mit_sot\n info['n_sit_sot'] = n_sit_sot\n info['n_shared_outs'] = n_shared_outs\n info['n_nit_sot'] = n_nit_sot\n info['truncate_gradient'] = truncate_gradient\n info['name'] = name\n info['mode'] = mode\n info['destroy_map'] = OrderedDict()\n info['gpu'] = False\n info['as_while'] = as_while\n info['profile'] = profile\n info['allow_gc'] = allow_gc\n info['strict'] = strict\n if strict:\n warnings.warn('In the strict mode, all neccessary shared variables '\n 'must be passed as a part of non_sequences', Warning)\n\n local_op = scan_op.Scan(inner_inputs, new_outs, info)\n\n ##\n # Step 8. Compute the outputs using the scan op\n ##\n _scan_inputs = (scan_seqs +\n mit_mot_scan_inputs +\n mit_sot_scan_inputs +\n sit_sot_scan_inputs +\n shared_scan_inputs +\n [actual_n_steps for x in xrange(n_nit_sot)] +\n other_shared_scan_args +\n other_scan_args)\n\n scan_inputs = []\n for arg in [actual_n_steps] + _scan_inputs:\n try:\n arg = tensor.as_tensor_variable(arg)\n except TypeError:\n # This happens for Random States for e.g. but it is a good way\n # to make sure no input is a cuda ndarrays\n pass\n scan_inputs += [arg]\n scan_outs = local_op(*scan_inputs)\n if type(scan_outs) not in (list, tuple):\n scan_outs = [scan_outs]\n ##\n # Step 9. Figure out which outs are update rules for shared variables\n # and so on ...\n ##\n\n update_map = OrderedUpdates()\n\n def remove_dimensions(outs, steps_return, offsets=None):\n out_ls = []\n for idx, out in enumerate(outs):\n if idx in steps_return:\n if steps_return[idx] > 1:\n out_ls.append(out[-steps_return[idx]:])\n else:\n out_ls.append(out[-1])\n else:\n if offsets is None:\n out_ls.append(out)\n else:\n out_ls.append(out[offsets[idx]:])\n return out_ls\n\n offset = n_mit_mot\n offsets = [abs(numpy.min(x)) for x in mit_sot_tap_array]\n mit_sot_outs = remove_dimensions(\n scan_outs[offset:offset + n_mit_sot],\n mit_sot_return_steps,\n offsets)\n\n offset += n_mit_sot\n offsets = [1 for x in xrange(n_sit_sot)]\n sit_sot_outs = remove_dimensions(\n scan_outs[offset:offset + n_sit_sot],\n sit_sot_return_steps,\n offsets)\n\n offset += n_sit_sot\n nit_sot_outs = remove_dimensions(\n scan_outs[offset:offset + n_nit_sot],\n nit_sot_return_steps)\n\n offset += n_nit_sot\n for idx, update_rule in enumerate(\n scan_outs[offset:offset + n_shared_outs]):\n update_map[shared_scan_inputs[idx]] = update_rule\n\n _scan_out_list = (mit_sot_outs +\n sit_sot_outs +\n nit_sot_outs)\n # Step 10. I need to reorder the outputs to be in the order expected by\n # the user\n rightOrder = (mit_sot_rightOrder +\n sit_sot_rightOrder +\n nit_sot_rightOrder)\n scan_out_list = [None] * len(rightOrder)\n for idx, pos in enumerate(rightOrder):\n if pos >= 0:\n scan_out_list[pos] = _scan_out_list[idx]\n else:\n # Not that pos is not a negative index. The sign of pos is used\n # as a flag to indicate if this output should be part of the\n # update rules or part of the standard outputs of scan.\n # If `pos` is positive than it corresponds to the standard\n # outputs of scan and it refers to output of index `pos`. If `pos`\n # is negative that it corresponds to update rules of scan and it\n # refers to update rule of index -1 - `pos`.\n update_map[sit_sot_shared[abs(pos) - 1]] = _scan_out_list[idx][-1]\n scan_out_list = [x for x in scan_out_list if x is not None]\n if len(scan_out_list) == 1:\n scan_out_list = scan_out_list[0]\n elif len(scan_out_list) == 0:\n scan_out_list = None\n return (scan_out_list, update_map)\n",
"from unittest import TestCase\nfrom nose.plugins.skip import SkipTest\n\nimport numpy\n\nimport theano\nfrom theano import tensor\nfrom theano.tests import unittest_tools as utt\nfrom theano.tensor.blas import gemv_inplace, gemm_inplace, _dot22\nfrom theano.tensor.tests.test_blas import TestGer, BaseGemv\n\nfrom .. import gpuarray_shared_constructor\nfrom .test_basic_ops import (makeTester, rand,\n mode_with_gpu)\n\nfrom ..blas import (gpugemv_inplace, gpugemv_no_inplace,\n gpugemm_inplace,\n gpuger_inplace, gpuger_no_inplace,\n GpuGer, gpu_dot22, GpuGemm)\n\n\nGpuGemvTester = makeTester(\n 'GpuGemvTester',\n op=gemv_inplace, gpu_op=gpugemv_inplace,\n cases=dict(dot_vv=[rand(1), 1, rand(1, 2), rand(2), 0],\n dot_vm=[rand(3), 1, rand(3, 2), rand(2), 0],\n # test_02=[rand(0), 1, rand(0, 2), rand(2), 0],\n # test_30=[rand(3), 1, rand(3, 0), rand(0), 0],\n # test_00=[rand(0), 1, rand(0, 0), rand(0), 0],\n test_stride=[rand(3)[::-1], 1, rand(3, 2)[::-1], rand(2)[::-1], 0],\n )\n )\n\n\nclass TestGpuSgemv(TestCase, BaseGemv, utt.TestOptimizationMixin):\n mode = mode_with_gpu\n dtype = 'float32'\n\n gemv = gpugemv_no_inplace\n gemv_inplace = gpugemv_inplace\n\n @staticmethod\n def shared(val):\n try:\n return gpuarray_shared_constructor(val)\n except TypeError:\n return theano.shared(val)\n\n\nGpuGemmTester = makeTester(\n 'GpuGemmTester',\n op=gemm_inplace, gpu_op=gpugemm_inplace,\n cases=dict(test1=[rand(3, 4), 1.0, rand(3, 5), rand(5, 4), 0.0],\n test2=[rand(3, 4), 1.0, rand(3, 5), rand(5, 4), 1.0],\n test3=[rand(3, 4), 1.0, rand(3, 5), rand(5, 4), -1.0],\n test4=[rand(3, 4), 0.0, rand(3, 5), rand(5, 4), 0.0],\n test5=[rand(3, 4), 0.0, rand(3, 5), rand(5, 4), 0.6],\n test6=[rand(3, 4), 0.0, rand(3, 5), rand(5, 4), -1.0],\n test7=[rand(3, 4), -1.0, rand(3, 5), rand(5, 4), 0.0],\n test8=[rand(3, 4), -1.0, rand(3, 5), rand(5, 4), 1.1],\n test9=[rand(3, 4), -1.0, rand(3, 5), rand(5, 4), -1.1],\n # test10=[rand(0, 4), -1.0, rand(0, 5), rand(5, 4), 0.0],\n # test11=[rand(3, 0), -1.0, rand(3, 5), rand(5, 0), 1.1],\n # test12=[rand(3, 4), -1.0, rand(3, 0), rand(0, 4), -1.1],\n # test13=[rand(0, 0), -1.0, rand(0, 0), rand(0, 0), -1.1],\n )\n )\n\n\nclass TestGpuSger(TestGer):\n def setUp(self):\n self.mode = mode_with_gpu\n dtype = self.dtype = 'float32' # optimization isn't dtype-dependent\n self.A = tensor.tensor(dtype=dtype, broadcastable=(False, False))\n self.a = tensor.tensor(dtype=dtype, broadcastable=())\n self.x = tensor.tensor(dtype=dtype, broadcastable=(False,))\n self.y = tensor.tensor(dtype=dtype, broadcastable=(False,))\n self.ger_destructive = gpuger_inplace\n\n # data on the gpu make the op always inplace\n self.ger = gpuger_inplace\n self.gemm = gpugemm_inplace\n\n def test_f32_0_0(self):\n raise SkipTest('0-sized objects not supported')\n\n def test_f32_1_0(self):\n raise SkipTest('0-sized objects not supported')\n\n def test_f32_0_1(self):\n raise SkipTest('0-sized objects not supported')\n\n\nclass TestGpuSgerNoTransfer(TestGpuSger):\n shared = staticmethod(gpuarray_shared_constructor)\n\n\nclass TestGpuGer_OpContract(TestCase, utt.T_OpContractMixin):\n def setUp(self):\n self.ops = [gpuger_no_inplace, gpuger_inplace]\n\n def clone(self, op):\n return GpuGer(destructive=op.destructive)\n\n\nGpuDot22Tester = makeTester(\n 'GpuDot22Tester',\n op=_dot22, gpu_op=gpu_dot22,\n cases=dict(\n test1=[rand(3, 4), rand(4, 5)],\n test2=[rand(1, 4), rand(4, 5)],\n test3=[rand(3, 1), rand(1, 5)],\n test4=[rand(3, 4), rand(4, 1)],\n # test5=[rand(0, 4), rand(4, 5)],\n # test6=[rand(3, 0), rand(0, 5)],\n # test7=[rand(3, 4), rand(4, 0)],\n # test8=[rand(0, 4), rand(4, 0)],\n # test9=[rand(0, 0), rand(0, 0)],\n )\n)\n\n\ndef test_hgemm_swap():\n from theano.sandbox.cuda import nvcc_compiler\n if nvcc_compiler.nvcc_version < '7.5':\n raise SkipTest(\"SgemmEx is only avaialble on cuda 7.5+\")\n\n v = tensor.vector(dtype='float16')\n m = tensor.matrix(dtype='float16')\n m2 = tensor.matrix(dtype='float16')\n m32 = tensor.matrix(dtype='float32')\n\n # test that we don't try to replace anything but matrix x matrix in float16\n f = theano.function([v, m], tensor.dot(v, m), mode=mode_with_gpu)\n assert len([node for node in f.maker.fgraph.apply_nodes\n if isinstance(node.op, GpuGemm)]) == 0\n\n f = theano.function([m32, m], tensor.dot(m32, m), mode=mode_with_gpu)\n assert len([node for node in f.maker.fgraph.apply_nodes\n if isinstance(node.op, GpuGemm)]) == 0\n\n f = theano.function([m, m2], tensor.dot(m, m2), mode=mode_with_gpu)\n assert len([node for node in f.maker.fgraph.apply_nodes\n if isinstance(node.op, GpuGemm)]) == 1\n\n v1 = numpy.random.random((3, 4)).astype('float16')\n v2 = numpy.random.random((4, 2)).astype('float16')\n\n of = f(v1, v2)\n on = numpy.dot(v1, v2)\n\n utt.assert_allclose(of, on)\n\n\ndef test_hgemm_alpha_output_merge():\n from theano.sandbox.cuda import nvcc_compiler\n if nvcc_compiler.nvcc_version < '7.5':\n raise SkipTest(\"SgemmEx is only avaialble on cuda 7.5+\")\n\n m1 = tensor.matrix(dtype='float16')\n m2 = tensor.matrix(dtype='float16')\n\n b = tensor.matrix(dtype='float16')\n\n hgemm = numpy.asarray(0.05, dtype='float16') * (tensor.dot(m1, m2) + b)\n\n f = theano.function([m1, m2, b], hgemm, mode=mode_with_gpu)\n # there should be 3 gpu_from_host, 1 hgemm and 1 host_from_gpu\n assert len(f.maker.fgraph.apply_nodes) == 5\n",
"from nose.plugins.skip import SkipTest\n\nfrom theano.d3viz import has_requirements\nif not has_requirements:\n raise SkipTest('Missing requirements')\n\nimport numpy as np\nimport unittest\n\nimport theano as th\nfrom theano.d3viz.formatting import PyDotFormatter\nfrom theano.d3viz.tests import models\n\n\nclass TestPyDotFormatter(unittest.TestCase):\n\n def setUp(self):\n self.rng = np.random.RandomState(0)\n\n def node_counts(self, graph):\n node_types = [node.get_attributes()['node_type']\n for node in graph.get_nodes()]\n a, b = np.unique(node_types, return_counts=True)\n nc = dict(zip(a, b))\n return nc\n\n def test_mlp(self):\n m = models.Mlp()\n f = th.function(m.inputs, m.outputs)\n pdf = PyDotFormatter()\n graph = pdf(f)\n expected = 11\n if th.config.mode == \"FAST_COMPILE\":\n expected = 12\n self.assertEqual(len(graph.get_nodes()), 12)\n nc = self.node_counts(graph)\n\n if th.config.mode == \"FAST_COMPILE\":\n assert nc['apply'] == 6\n else:\n assert nc['apply'] == 5\n assert nc['output'] == 1\n\n def test_ofg(self):\n m = models.Ofg()\n f = th.function(m.inputs, m.outputs)\n pdf = PyDotFormatter()\n graph = pdf(f)\n assert len(graph.get_nodes()) == 10\n sub_graphs = graph.get_subgraph_list()\n assert len(sub_graphs) == 2\n ofg1, ofg2 = sub_graphs\n if th.config.mode == \"FAST_COMPILE\":\n assert len(ofg1.get_nodes()) == 9\n else:\n assert len(ofg1.get_nodes()) == 5\n assert len(ofg1.get_nodes()) == len(ofg2.get_nodes())\n\n def test_ofg_nested(self):\n m = models.OfgNested()\n f = th.function(m.inputs, m.outputs)\n pdf = PyDotFormatter()\n graph = pdf(f)\n assert len(graph.get_nodes()) == 7\n assert len(graph.get_subgraph_list()) == 1\n ofg1 = graph.get_subgraph_list()[0]\n assert len(ofg1.get_nodes()) == 6\n assert len(ofg1.get_subgraph_list()) == 1\n ofg2 = ofg1.get_subgraph_list()[0]\n assert len(ofg2.get_nodes()) == 4\n"
] |
[
[
"numpy.asarray",
"numpy.random.randn"
],
[
"numpy.random.randn",
"numpy.zeros_like",
"numpy.allclose"
],
[
"numpy.asarray",
"numpy.hstack",
"numpy.size",
"scipy.sparse.lil_matrix"
],
[
"numpy.testing.noseclasses.KnownFailure",
"numpy.testing.noseclasses.NumpyTestProgram"
],
[
"numpy.max",
"numpy.min"
],
[
"numpy.asarray",
"numpy.dot",
"numpy.random.random"
],
[
"numpy.random.RandomState",
"numpy.unique"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Mokuichi147/ML-VRPose
|
[
"900bd12adbef64c8553b63e193d49703b54a134b"
] |
[
"src/main.py"
] |
[
"import cv2\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom utils.camera import WebCam\nfrom utils.ovr import VRTracker\nfrom utils.pose import HumanPose\n\n\nCAMERA_DEVICE = 0\nMAX_POINTS = 100 * 2\n\n# 3D graph\nfig = plt.figure()\nax = Axes3D(fig)\nax.view_init(5, -85)\n\n\nobject_points = []\nimage_points = []\n\n\nwith WebCam(CAMERA_DEVICE) as camera, HumanPose() as pose:\n # Camera Calibration\n camera.StartCalibration(10, 1.85, 7, 7, save_dir='calibration')\n # VR Tracker\n tracker = VRTracker()\n\n counter = 0\n\n while camera.IsOpened():\n if not camera.Read():\n print('Frame acquisition failed', end='\\r', flush=True)\n continue\n elif not tracker.Read():\n print('Failed to get VR tracker', end='\\r', flush=True)\n continue\n elif not pose.Read(camera):\n camera.Show('human pose')\n print('could not find anyone', end='\\r', flush=True)\n continue\n else:\n counter = (counter + 1) % 1\n\n x = np.array([pose.landmarks.landmark[i].x for i in range(pose.landmark_count)])\n y = np.array([pose.landmarks.landmark[i].y for i in range(pose.landmark_count)]) * -1\n z = np.array([pose.landmarks.landmark[i].z for i in range(pose.landmark_count)])\n\n ax.cla()\n ax.scatter(x, z, y)\n plt.pause(0.001)\n\n pose.Draw()\n camera.Show('human pose')\n\n tracking_text = '[HMD] X:{0.px:.3f}, Y:{0.py:.3f}, Z:{0.pz:.3f}\\t'.format(tracker.hmd)\n tracking_text += '[L_CON] X:{0.px:.3f}, Y:{0.py:.3f}, Z:{0.pz:.3f}\\t'.format(tracker.lcon)\n tracking_text += '[R_CON] X:{0.px:.3f}, Y:{0.py:.3f}, Z:{0.pz:.3f}'.format(tracker.rcon)\n print(tracking_text, end='\\r', flush=True)\n\n\n if len(object_points) < MAX_POINTS and counter == 0 and pose.IsVisible(0.8, False):\n print('add')\n object_points.append(tracker.rcon.position)\n object_points.append(tracker.lcon.position)\n image_points.append(pose.rhand)\n image_points.append(pose.lhand)\n if len(object_points) == MAX_POINTS:\n success, rvec, tvec, inliers = cv2.solvePnPRansac(\n np.array(object_points, dtype=np.float32),\n np.array(image_points, dtype=np.float32),\n camera.new_cam_mtx,\n camera.calibration.dist_coeffs)\n print('rvec', rvec)\n print('tvec', tvec)\n print('inliers', inliers)\n \n imgpts, jac = cv2.projectPoints(np.array(object_points, dtype=np.float32), rvec, tvec, camera.new_cam_mtx, camera.calibration.dist_coeffs)\n \n sa = 0\n for i in range(len(object_points)):\n for j in range(2):\n sa += abs(imgpts[i][0][j] - image_points[i][j])\n print(i, object_points[i], imgpts[i][0], image_points[i])\n sa /= MAX_POINTS\n print(sa)\n if sa > 50:\n object_points = []\n image_points = []\n else:\n a = -np.matrix(cv2.Rodrigues(rvec)[0]).T * np.matrix(tvec)\n print('Position', a)\n break\n\n if camera.Wait():\n break"
] |
[
[
"numpy.matrix",
"numpy.array",
"matplotlib.pyplot.pause",
"matplotlib.pyplot.figure"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
metabolize/trimesh
|
[
"3c9a76c9c7714ffb0eb6e68152e6a189469a4005"
] |
[
"trimesh/convex.py"
] |
[
"\"\"\"\nconvex.py\n\nDeal with creating and checking convex objects in 2, 3 and N dimensions.\n\nConvex is defined as:\n1) \"Convex, meaning \"curving out\" or \"extending outward\" (compare to concave)\n2) having an outline or surface curved like the exterior of a circle or sphere.\n3) (of a polygon) having only interior angles measuring less than 180\n\"\"\"\n\nimport numpy as np\n\nfrom .constants import tol\n\nfrom . import util\nfrom . import triangles\n\n\ntry:\n from scipy import spatial\nexcept ImportError as E:\n from .exceptions import ExceptionModule\n spatial = ExceptionModule(E)\n\n\ndef convex_hull(obj, qhull_options='QbB Pp QJn'):\n \"\"\"\n Get a new Trimesh object representing the convex hull of the\n current mesh, with proper normals and watertight.\n Requires scipy >.12.\n\n Arguments\n --------\n obj : Trimesh, or (n,3) float\n Mesh or cartesian points\n\n Returns\n --------\n convex : Trimesh\n Mesh of convex hull\n \"\"\"\n from .base import Trimesh\n\n if isinstance(obj, Trimesh):\n points = obj.vertices.view(np.ndarray)\n else:\n # will remove subclassing\n points = np.asarray(obj, dtype=np.float64)\n if not util.is_shape(points, (-1, 3)):\n raise ValueError('Object must be Trimesh or (n,3) points!')\n\n hull = spatial.ConvexHull(points,\n qhull_options=qhull_options)\n\n # hull object doesn't remove unreferenced vertices\n # create a mask to re- index faces for only referenced vertices\n vid = np.sort(hull.vertices)\n mask = np.zeros(len(hull.points), dtype=np.int64)\n mask[vid] = np.arange(len(vid))\n # remove unreferenced vertices here\n faces = mask[hull.simplices].copy()\n\n # rescale vertices back to original size\n vertices = hull.points[vid].copy()\n\n # qhull returns faces with random winding\n # calculate the returned normal of each face\n crosses = triangles.cross(vertices[faces])\n\n # qhull returns zero magnitude faces like an asshole\n normals, valid = util.unitize(crosses, check_valid=True)\n\n # remove zero magnitude faces\n faces = faces[valid]\n crosses = crosses[valid]\n\n # each triangle area and mean center\n triangles_area = triangles.area(crosses=crosses, sum=False)\n triangles_center = vertices[faces].mean(axis=1)\n\n # since the convex hull is (hopefully) convex, the vector from\n # the centroid to the center of each face\n # should have a positive dot product with the normal of that face\n # if it doesn't it is probably backwards\n # note that this sometimes gets screwed up by precision issues\n centroid = np.average(triangles_center,\n weights=triangles_area,\n axis=0)\n # a vector from the centroid to a point on each face\n test_vector = triangles_center - centroid\n # check the projection against face normals\n backwards = util.diagonal_dot(normals,\n test_vector) < 0.0\n\n # flip the winding outward facing\n faces[backwards] = np.fliplr(faces[backwards])\n # flip the normal\n normals[backwards] *= -1.0\n\n # save the work we did to the cache so it doesn't have to be recomputed\n initial_cache = {'triangles_cross': crosses,\n 'triangles_center': triangles_center,\n 'area_faces': triangles_area,\n 'centroid': centroid}\n\n # create the Trimesh object for the convex hull\n convex = Trimesh(vertices=vertices,\n faces=faces,\n face_normals=normals,\n initial_cache=initial_cache,\n process=True,\n validate=False)\n\n # we did the gross case above, but sometimes precision issues\n # leave some faces backwards anyway\n # this call will exit early if the winding is consistent\n # and if not will fix it by traversing the adjacency graph\n convex.fix_normals(multibody=False)\n\n # sometimes the QbB option will cause precision issues\n # so try the hull again without it and\n # check for qhull_options is None to avoid infinite recursion\n if (qhull_options is not None and\n not convex.is_winding_consistent):\n return convex_hull(convex, qhull_options=None)\n\n return convex\n\n\ndef adjacency_projections(mesh):\n \"\"\"\n Test if a mesh is convex by projecting the vertices of\n a triangle onto the normal of its adjacent face.\n\n Parameters\n ----------\n mesh : Trimesh\n Input geometry\n\n Returns\n ----------\n projection : (len(mesh.face_adjacency),) float\n Distance of projection of adjacent vertex onto plane\n \"\"\"\n # normals and origins from the first column of face adjacency\n normals = mesh.face_normals[mesh.face_adjacency[:, 0]]\n # one of the vertices on the shared edge\n origins = mesh.vertices[mesh.face_adjacency_edges[:, 0]]\n\n # faces from the second column of face adjacency\n vid_other = mesh.face_adjacency_unshared[:, 1]\n vector_other = mesh.vertices[vid_other] - origins\n\n # get the projection with a dot product\n dots = util.diagonal_dot(vector_other, normals)\n\n return dots\n\n\ndef is_convex(mesh):\n \"\"\"\n Check if a mesh is convex.\n\n Parameters\n -----------\n mesh : Trimesh\n Input geometry\n\n Returns\n -----------\n convex : bool\n Was passed mesh convex or not\n \"\"\"\n # don't consider zero- area faces\n nonzero = mesh.area_faces > tol.merge\n\n # adjacencies with two nonzero faces\n adj_ok = nonzero[mesh.face_adjacency].all(axis=1)\n\n # make threshold of convexity scale- relative\n threshold = tol.planar * mesh.scale\n # if projections of vertex onto plane of adjacent\n # face is negative, it means the face pair is locally\n # convex, and if that is true for all faces the mesh is convex\n convex = bool(mesh.face_adjacency_projections[adj_ok].max() < threshold)\n\n return convex\n\n\ndef hull_points(obj, qhull_options='QbB Pp'):\n \"\"\"\n Try to extract a convex set of points from multiple input formats.\n\n Parameters\n ---------\n obj: Trimesh object\n (n,d) points\n (m,) Trimesh objects\n\n Returns\n --------\n points: (o,d) convex set of points\n \"\"\"\n if hasattr(obj, 'convex_hull'):\n return obj.convex_hull.vertices\n\n initial = np.asanyarray(obj, dtype=np.float64)\n if len(initial.shape) != 2:\n raise ValueError('points must be (n, dimension)!')\n\n hull = spatial.ConvexHull(initial, qhull_options=qhull_options)\n points = hull.points[hull.vertices]\n\n return points\n"
] |
[
[
"numpy.fliplr",
"numpy.asarray",
"numpy.sort",
"numpy.asanyarray",
"numpy.average"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
svitlanavyetrenko/abides
|
[
"11ae144f4c81de0baaa031ca2c4afb6dc9519e4d"
] |
[
"util/OrderBook.py"
] |
[
"# Basic class for an order book for one symbol, in the style of the major US Stock Exchanges.\n# List of bid prices (index zero is best bid), each with a list of LimitOrders.\n# List of ask prices (index zero is best ask), each with a list of LimitOrders.\nimport sys\n\nfrom message.Message import Message\nfrom util.util import log_print, be_silent\n\nfrom copy import deepcopy\nimport pandas as pd\nfrom pandas.io.json import json_normalize\nfrom pprint import pprint\nfrom functools import reduce\n\nclass OrderBook:\n\n # An OrderBook requires an owning agent object, which it will use to send messages\n # outbound via the simulator Kernel (notifications of order creation, rejection,\n # cancellation, execution, etc).\n def __init__(self, owner, symbol):\n self.owner = owner\n self.symbol = symbol\n self.bids = []\n self.asks = []\n self.last_trade = None\n\n # Create an empty list of dictionaries to log the full order book depth (price and volume) each time it changes.\n self.book_log = []\n self.quotes_seen = set()\n\n # Create an order history for the exchange to report to certain agent types.\n self.history = [{}]\n\n # Last timestamp the orderbook for that symbol was updated\n self.last_update_ts = None\n\n def handleLimitOrder(self, order):\n # Matches a limit order or adds it to the order book. Handles partial matches piecewise,\n # consuming all possible shares at the best price before moving on, without regard to\n # order size \"fit\" or minimizing number of transactions. Sends one notification per\n # match.\n if order.symbol != self.symbol:\n log_print(\"{} order discarded. Does not match OrderBook symbol: {}\", order.symbol, self.symbol)\n return\n\n if (order.quantity <= 0) or (int(order.quantity) != order.quantity):\n log_print(\"{} order discarded. Quantity ({}) must be a positive integer.\", order.symbol, order.quantity)\n return\n\n # Add the order under index 0 of history: orders since the most recent trade.\n self.history[0][order.order_id] = {'entry_time': self.owner.currentTime,\n 'quantity': order.quantity, 'is_buy_order': order.is_buy_order,\n 'limit_price': order.limit_price, 'transactions': [],\n 'modifications': [],\n 'cancellations': []}\n\n matching = True\n\n self.prettyPrint()\n\n executed = []\n\n while matching:\n matched_order = deepcopy(self.executeOrder(order))\n\n if matched_order:\n # Decrement quantity on new order and notify traders of execution.\n filled_order = deepcopy(order)\n filled_order.quantity = matched_order.quantity\n filled_order.fill_price = matched_order.fill_price\n\n order.quantity -= filled_order.quantity\n\n log_print(\"MATCHED: new order {} vs old order {}\", filled_order, matched_order)\n log_print(\"SENT: notifications of order execution to agents {} and {} for orders {} and {}\",\n filled_order.agent_id, matched_order.agent_id, filled_order.order_id, matched_order.order_id)\n\n self.owner.sendMessage(order.agent_id, Message({\"msg\": \"ORDER_EXECUTED\", \"order\": filled_order}))\n self.owner.sendMessage(matched_order.agent_id,\n Message({\"msg\": \"ORDER_EXECUTED\", \"order\": matched_order}))\n\n # Accumulate the volume and average share price of the currently executing inbound trade.\n executed.append((filled_order.quantity, filled_order.fill_price))\n\n if order.quantity <= 0:\n matching = False\n\n else:\n # No matching order was found, so the new order enters the order book. Notify the agent.\n self.enterOrder(deepcopy(order))\n\n log_print(\"ACCEPTED: new order {}\", order)\n log_print(\"SENT: notifications of order acceptance to agent {} for order {}\",\n order.agent_id, order.order_id)\n\n self.owner.sendMessage(order.agent_id, Message({\"msg\": \"ORDER_ACCEPTED\", \"order\": order}))\n\n matching = False\n\n if not matching:\n # Now that we are done executing or accepting this order, log the new best bid and ask.\n if self.bids:\n self.owner.logEvent('BEST_BID', \"{},{},{}\".format(self.symbol,\n self.bids[0][0].limit_price,\n sum([o.quantity for o in self.bids[0]])))\n\n if self.asks:\n self.owner.logEvent('BEST_ASK', \"{},{},{}\".format(self.symbol,\n self.asks[0][0].limit_price,\n sum([o.quantity for o in self.asks[0]])))\n\n # Also log the last trade (total share quantity, average share price).\n if executed:\n trade_qty = 0\n trade_price = 0\n for q, p in executed:\n log_print(\"Executed: {} @ {}\", q, p)\n trade_qty += q\n trade_price += (p * q)\n\n avg_price = int(round(trade_price / trade_qty))\n log_print(\"Avg: {} @ ${:0.4f}\", trade_qty, avg_price)\n self.owner.logEvent('LAST_TRADE', \"{},${:0.4f}\".format(trade_qty, avg_price))\n\n self.last_trade = avg_price\n\n # Transaction occurred, so advance indices.\n self.history.insert(0, {})\n\n # Truncate history to required length.\n self.history = self.history[:self.owner.stream_history + 1]\n\n # Finally, log the full depth of the order book, ONLY if we have been requested to store the order book\n # for later visualization. (This is slow.)\n if self.owner.book_freq is not None:\n row = {'QuoteTime': self.owner.currentTime}\n for quote in self.quotes_seen:\n row[quote] = 0\n for quote, volume in self.getInsideBids():\n row[quote] = -volume\n self.quotes_seen.add(quote)\n for quote, volume in self.getInsideAsks():\n if quote in row:\n if row[quote] != 0:\n print(\n \"WARNING: THIS IS A REAL PROBLEM: an order book contains bids and asks at the same quote price!\")\n row[quote] = volume\n self.quotes_seen.add(quote)\n self.book_log.append(row)\n self.last_update_ts = self.owner.currentTime\n self.prettyPrint()\n\n def executeOrder(self, order):\n # Finds a single best match for this order, without regard for quantity.\n # Returns the matched order or None if no match found. DOES remove,\n # or decrement quantity from, the matched order from the order book\n # (i.e. executes at least a partial trade, if possible).\n\n # Track which (if any) existing order was matched with the current order.\n if order.is_buy_order:\n book = self.asks\n else:\n book = self.bids\n\n # TODO: Simplify? It is ever possible to actually select an execution match\n # other than the best bid or best ask? We may not need these execute loops.\n\n # First, examine the correct side of the order book for a match.\n if not book:\n # No orders on this side.\n return None\n elif not self.isMatch(order, book[0][0]):\n # There were orders on the right side, but the prices do not overlap.\n # Or: bid could not match with best ask, or vice versa.\n # Or: bid offer is below the lowest asking price, or vice versa.\n return None\n else:\n # There are orders on the right side, and the new order's price does fall\n # somewhere within them. We can/will only match against the oldest order\n # among those with the best price. (i.e. best price, then FIFO)\n\n # Note that book[i] is a LIST of all orders (oldest at index book[i][0]) at the same price.\n\n # The matched order might be only partially filled. (i.e. new order is smaller)\n if order.quantity >= book[0][0].quantity:\n # Consumed entire matched order.\n matched_order = book[0].pop(0)\n\n # If the matched price now has no orders, remove it completely.\n if not book[0]:\n del book[0]\n\n else:\n # Consumed only part of matched order.\n matched_order = deepcopy(book[0][0])\n matched_order.quantity = order.quantity\n\n book[0][0].quantity -= matched_order.quantity\n\n # When two limit orders are matched, they execute at the price that\n # was being \"advertised\" in the order book.\n matched_order.fill_price = matched_order.limit_price\n\n # Record the transaction in the order history and push the indices\n # out one, possibly truncating to the maximum history length.\n\n # The incoming order is guaranteed to exist under index 0.\n self.history[0][order.order_id]['transactions'].append((self.owner.currentTime, order.quantity))\n\n # The pre-existing order may or may not still be in the recent history.\n for idx, orders in enumerate(self.history):\n if matched_order.order_id not in orders: continue\n\n # Found the matched order in history. Update it with this transaction.\n self.history[idx][matched_order.order_id]['transactions'].append(\n (self.owner.currentTime, matched_order.quantity))\n\n # Return (only the executed portion of) the matched order.\n return matched_order\n\n def isMatch(self, order, o):\n # Returns True if order 'o' can be matched against input 'order'.\n if order.is_buy_order == o.is_buy_order:\n print(\"WARNING: isMatch() called on orders of same type: {} vs {}\".format(order, o))\n return False\n\n if order.is_buy_order and (order.limit_price >= o.limit_price):\n return True\n\n if not order.is_buy_order and (order.limit_price <= o.limit_price):\n return True\n\n return False\n\n def enterOrder(self, order):\n # Enters a limit order into the OrderBook in the appropriate location.\n # This does not test for matching/executing orders -- this function\n # should only be called after a failed match/execution attempt.\n\n if order.is_buy_order:\n book = self.bids\n else:\n book = self.asks\n\n if not book:\n # There were no orders on this side of the book.\n book.append([order])\n elif not self.isBetterPrice(order, book[-1][0]) and not self.isEqualPrice(order, book[-1][0]):\n # There were orders on this side, but this order is worse than all of them.\n # (New lowest bid or highest ask.)\n book.append([order])\n else:\n # There are orders on this side. Insert this order in the correct position in the list.\n # Note that o is a LIST of all orders (oldest at index 0) at this same price.\n for i, o in enumerate(book):\n if self.isBetterPrice(order, o[0]):\n book.insert(i, [order])\n break\n elif self.isEqualPrice(order, o[0]):\n book[i].append(order)\n break\n\n def cancelOrder(self, order):\n # Attempts to cancel (the remaining, unexecuted portion of) a trade in the order book.\n # By definition, this pretty much has to be a limit order. If the order cannot be found\n # in the order book (probably because it was already fully executed), presently there is\n # no message back to the agent. This should possibly change to some kind of failed\n # cancellation message. (?) Otherwise, the agent receives ORDER_CANCELLED with the\n # order as the message body, with the cancelled quantity correctly represented as the\n # number of shares that had not already been executed.\n\n if order.is_buy_order:\n book = self.bids\n else:\n book = self.asks\n\n # If there are no orders on this side of the book, there is nothing to do.\n if not book: return\n\n # There are orders on this side. Find the price level of the order to cancel,\n # then find the exact order and cancel it.\n # Note that o is a LIST of all orders (oldest at index 0) at this same price.\n for i, o in enumerate(book):\n if self.isEqualPrice(order, o[0]):\n # This is the correct price level.\n for ci, co in enumerate(book[i]):\n if order.order_id == co.order_id:\n # Cancel this order.\n cancelled_order = book[i].pop(ci)\n\n # Record cancellation of the order if it is still present in the recent history structure.\n for idx, orders in enumerate(self.history):\n if cancelled_order.order_id not in orders: continue\n\n # Found the cancelled order in history. Update it with the cancelation.\n self.history[idx][cancelled_order.order_id]['cancellations'].append(\n (self.owner.currentTime, cancelled_order.quantity))\n\n # If the cancelled price now has no orders, remove it completely.\n if not book[i]:\n del book[i]\n\n log_print(\"CANCELLED: order {}\", order)\n log_print(\"SENT: notifications of order cancellation to agent {} for order {}\",\n cancelled_order.agent_id, cancelled_order.order_id)\n\n self.owner.sendMessage(order.agent_id,\n Message({\"msg\": \"ORDER_CANCELLED\", \"order\": cancelled_order}))\n # We found the order and cancelled it, so stop looking.\n self.last_update_ts = self.owner.currentTime\n return\n\n def modifyOrder(self, order, new_order):\n # Modifies the quantity of an existing limit order in the order book\n if not self.isSameOrder(order, new_order): return\n book = self.bids if order.is_buy_order else self.asks\n if not book: return\n for i, o in enumerate(book):\n if self.isEqualPrice(order, o[0]):\n for mi, mo in enumerate(book[i]):\n if order.order_id == mo.order_id:\n book[i][0] = new_order\n for idx, orders in enumerate(self.history):\n if new_order.order_id not in orders: continue\n self.history[idx][new_order.order_id]['modifications'].append(\n (self.owner.currentTime, new_order.quantity))\n log_print(\"MODIFIED: order {}\", order)\n log_print(\"SENT: notifications of order modification to agent {} for order {}\",\n new_order.agent_id, new_order.order_id)\n self.owner.sendMessage(order.agent_id,\n Message({\"msg\": \"ORDER_MODIFIED\", \"new_order\": new_order}))\n if order.is_buy_order:\n self.bids = book\n else:\n self.asks = book\n self.last_update_ts = self.owner.currentTime\n\n # Get the inside bid price(s) and share volume available at each price, to a limit\n # of \"depth\". (i.e. inside price, inside 2 prices) Returns a list of tuples:\n # list index is best bids (0 is best); each tuple is (price, total shares).\n def getInsideBids(self, depth=sys.maxsize):\n book = []\n for i in range(min(depth, len(self.bids))):\n qty = 0\n price = self.bids[i][0].limit_price\n for o in self.bids[i]:\n qty += o.quantity\n book.append((price, qty))\n\n return book\n\n # As above, except for ask price(s).\n def getInsideAsks(self, depth=sys.maxsize):\n book = []\n for i in range(min(depth, len(self.asks))):\n qty = 0\n price = self.asks[i][0].limit_price\n for o in self.asks[i]:\n qty += o.quantity\n book.append((price, qty))\n\n return book\n\n def get_transacted_volume(self, lookback_period='10min'):\n \"\"\" Method retrieves the total transacted volume for a symbol over a lookback period finishing at the current\n simulation time. \"\"\"\n\n unrolled_history = []\n for elem in self.history:\n for _, val in elem.items():\n unrolled_history.append(val)\n\n unrolled_history_df = json_normalize(unrolled_history)\n\n if unrolled_history_df.empty:\n return 0\n\n executed_transactions = unrolled_history_df[\n unrolled_history_df['transactions'].map(lambda d: len(d)) > 0] # remove cells that are an empty list\n\n # Reshape into DataFrame with columns ['execution_time', 'quantity']\n unrolled_transactions = executed_transactions['transactions'].apply(pd.Series)\n unrolled_transactions = reduce(lambda col1, col2: pd.concat([col1, col2], axis=0),\n [unrolled_transactions[col] for col in unrolled_transactions.columns])\n unrolled_transactions = unrolled_transactions.dropna()\n unrolled_transactions = unrolled_transactions.apply(pd.Series)\n unrolled_transactions = unrolled_transactions.rename(columns={\n 0: 'execution_time',\n 1: 'quantity'\n })\n unrolled_transactions = unrolled_transactions.sort_values(by=['execution_time'])\n unrolled_transactions = unrolled_transactions.drop_duplicates(keep='last')\n\n # Get transacted volume in time window\n lookback_pd = pd.to_timedelta(lookback_period)\n window_start = self.owner.currentTime - lookback_pd\n executed_within_lookback_period = unrolled_transactions[unrolled_transactions['execution_time'] >= window_start]\n transacted_volume = executed_within_lookback_period['quantity'].sum()\n\n return transacted_volume\n\n # These could be moved to the LimitOrder class. We could even operator overload them\n # into >, <, ==, etc.\n def isBetterPrice(self, order, o):\n # Returns True if order has a 'better' price than o. (That is, a higher bid\n # or a lower ask.) Must be same order type.\n if order.is_buy_order != o.is_buy_order:\n print(\"WARNING: isBetterPrice() called on orders of different type: {} vs {}\".format(order, o))\n return False\n\n if order.is_buy_order and (order.limit_price > o.limit_price):\n return True\n\n if not order.is_buy_order and (order.limit_price < o.limit_price):\n return True\n\n return False\n\n def isEqualPrice(self, order, o):\n return order.limit_price == o.limit_price\n\n def isSameOrder(self, order, new_order):\n return order.order_id == new_order.order_id\n\n # Print a nicely-formatted view of the current order book.\n def prettyPrint(self, silent=False):\n # Start at the highest ask price and move down. Then switch to the highest bid price and move down.\n # Show the total volume at each price. If silent is True, return the accumulated string and print nothing.\n\n # If the global silent flag is set, skip prettyPrinting entirely, as it takes a LOT of time.\n if be_silent: return ''\n\n book = \"{} order book as of {}\\n\".format(self.symbol, self.owner.currentTime)\n book += \"Last trades: simulated {:d}, historical {:d}\\n\".format(self.last_trade,\n self.owner.oracle.observePrice(self.symbol,\n self.owner.currentTime,\n sigma_n=0,\n random_state=self.owner.random_state))\n\n book += \"{:10s}{:10s}{:10s}\\n\".format('BID', 'PRICE', 'ASK')\n book += \"{:10s}{:10s}{:10s}\\n\".format('---', '-----', '---')\n\n for quote, volume in self.getInsideAsks()[-1::-1]:\n book += \"{:10s}{:10s}{:10s}\\n\".format(\"\", \"{:d}\".format(quote), \"{:d}\".format(volume))\n\n for quote, volume in self.getInsideBids():\n book += \"{:10s}{:10s}{:10s}\\n\".format(\"{:d}\".format(volume), \"{:d}\".format(quote), \"\")\n\n if silent: return book\n\n log_print(book)"
] |
[
[
"pandas.to_timedelta",
"pandas.io.json.json_normalize",
"pandas.concat"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"0.19",
"0.24",
"0.20",
"0.25"
],
"scipy": [],
"tensorflow": []
}
] |
AutoLV/GANmut
|
[
"a98d00dbe63d18ba2c55b948158bfe0c81e2189e"
] |
[
"solver.py"
] |
[
"import os\nimport time\nimport datetime\nimport sys\n\nfrom torchvision.utils import save_image\nimport torch\nimport torch.nn.functional as F\nimport numpy as np\n\nimport models.model_linear_2d\nimport models.model_gaussian_2d\nimport models.model_linear_3d\nimport models.model_gaussian_3d\n\n\nclass Solver(object):\n \"\"\"Solver for training and testing StarGAN.\"\"\"\n\n def __init__(self, loader, config):\n \"\"\"Initialize configurations.\"\"\"\n\n # Data loader.\n self.loader = loader\n\n # Model configurations.\n self.c_dim = config.c_dim\n self.image_size = config.image_size\n self.g_conv_dim = config.g_conv_dim\n self.d_conv_dim = config.d_conv_dim\n self.g_repeat_num = config.g_repeat_num\n self.d_repeat_num = config.d_repeat_num\n self.lambda_cls = config.lambda_cls\n self.lambda_rec = config.lambda_rec\n self.lambda_regularization = config.lambda_regularization\n self.regularization_type = config.regularization_type\n self.lambda_d_strength = config.lambda_d_strength\n self.lambda_g_strength = config.lambda_g_strength\n self.lambda_g_info = config.lambda_g_info\n self.lambda_d_info = config.lambda_d_info\n self.tridimensional = config.tridimensional\n self.parametrization = config.parametrization\n self.lambda_expr = config.lambda_expr\n self.lambda_prediction = config.lambda_prediction\n self.architecture_v2 = config.architecture_v2\n\n # Training configurations.\n self.batch_size = config.batch_size\n self.num_iters = config.num_iters\n self.num_iters_decay = config.num_iters_decay\n self.g_lr = config.g_lr\n self.d_lr = config.d_lr\n self.n_critic = config.n_critic\n self.beta1 = config.beta1\n self.beta2 = config.beta2\n self.resume_iter = config.resume_iter\n self.n_r_l = config.n_r_l\n self.n_r_g = config.n_r_g\n self.cycle_loss = config.cycle_loss\n\n # Test configurations.\n self.test_iters = config.test_iters\n\n # Miscellaneous.\n self.use_tensorboard = config.use_tensorboard\n self.device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n print(\"Used device: \", self.device)\n\n # Directories.\n self.log_dir = config.log_dir\n self.sample_dir = config.sample_dir\n self.model_save_dir = config.model_save_dir\n self.result_dir = config.result_dir\n\n # Step size.\n self.log_step = config.log_step\n self.sample_step = config.sample_step\n self.model_save_step = config.model_save_step\n self.lr_update_step = config.lr_update_step\n\n # losses\n self.KLDivLoss = torch.nn.KLDivLoss(reduction=\"batchmean\")\n self.LogSoftmax = torch.nn.LogSoftmax(dim=1)\n self.Softmax = torch.nn.Softmax(dim=1)\n\n # Build the model and tensorboard.\n self.build_model()\n if self.use_tensorboard:\n self.build_tensorboard()\n\n def build_model(self):\n \"\"\"Create a generator and a discriminator.\"\"\"\n\n if self.parametrization == \"linear\":\n \n if self.tridimensional:\n\n self.G = models.model_linear_3d.Generator(\n self.device,\n self.g_conv_dim,\n self.c_dim,\n self.g_repeat_num,\n n_r=self.n_r_l,\n )\n self.D = models.model_linear_3d.Discriminator(\n self.image_size, self.d_conv_dim, self.c_dim, self.d_repeat_num\n )\n \n else:\n \n self.G = models.model_linear_2d.Generator(\n self.device,\n self.g_conv_dim,\n self.c_dim,\n self.g_repeat_num,\n n_r=self.n_r_l,\n )\n self.D = models.model_linear_2d.Discriminator(\n self.image_size, self.d_conv_dim, self.c_dim, self.d_repeat_num\n )\n \n \n elif self.parametrization == \"gaussian\":\n \n if self.tridimensional:\n\n self.G = models.model_gaussian_3d.Generator(\n self.device, self.g_conv_dim, self.c_dim, self.g_repeat_num\n )\n self.D = models.model_gaussian_3d.Discriminator(\n self.image_size, self.d_conv_dim, self.c_dim, self.d_repeat_num\n )\n \n else:\n \n self.G = models.model_gaussian_3d.Generator(\n self.device, self.g_conv_dim, self.c_dim, self.g_repeat_num\n )\n self.D = models.model_gaussian_3d.Discriminator(\n self.image_size, self.d_conv_dim, self.c_dim, self.d_repeat_num\n )\n\n self.g_optimizer = torch.optim.Adam(\n self.G.parameters(), self.g_lr, [self.beta1, self.beta2]\n )\n self.d_optimizer = torch.optim.Adam(\n self.D.parameters(), self.d_lr, [self.beta1, self.beta2]\n )\n self.print_network(self.G, \"G\")\n self.print_network(self.D, \"D\")\n\n self.G.to(self.device)\n self.D.to(self.device)\n\n def print_network(self, model, name):\n \"\"\"Print out the network information.\"\"\"\n num_params = 0\n for p in model.parameters():\n num_params += p.numel()\n print(model)\n print(name)\n print(\"The number of parameters: {}\".format(num_params))\n\n def restore_model(self, resume_iter):\n \"\"\"Restore the trained generator and discriminator.\"\"\"\n print(\"Loading the trained models from step {}...\".format(resume_iter))\n G_path = os.path.join(self.model_save_dir, \"{}-G.ckpt\".format(resume_iter))\n D_path = os.path.join(self.model_save_dir, \"{}-D.ckpt\".format(resume_iter))\n self.G.load_state_dict(\n torch.load(G_path, map_location=lambda storage, loc: storage)\n )\n self.D.load_state_dict(\n torch.load(D_path, map_location=lambda storage, loc: storage)\n )\n\n def build_tensorboard(self):\n \"\"\"Build a tensorboard logger.\"\"\"\n from utils.logger import Logger\n\n self.logger = Logger(self.log_dir)\n\n def update_lr(self, g_lr, d_lr):\n \"\"\"Decay learning rates of the generator and discriminator.\"\"\"\n for param_group in self.g_optimizer.param_groups:\n param_group[\"lr\"] = g_lr\n for param_group in self.d_optimizer.param_groups:\n param_group[\"lr\"] = d_lr\n\n def reset_grad(self):\n \"\"\"Reset the gradient buffers.\"\"\"\n self.g_optimizer.zero_grad()\n self.d_optimizer.zero_grad()\n\n def denorm(self, x):\n \"\"\"Convert the range from [-1, 1] to [0, 1].\"\"\"\n out = (x + 1) / 2\n return out.clamp_(0, 1)\n\n def gradient_penalty(self, y, x):\n \"\"\"Compute gradient penalty: (L2_norm(dy/dx) - 1)**2.\"\"\"\n weight = torch.ones(y.size()).to(self.device)\n dydx = torch.autograd.grad(\n outputs=y,\n inputs=x,\n grad_outputs=weight,\n retain_graph=True,\n create_graph=True,\n only_inputs=True,\n )[0]\n\n dydx = dydx.view(dydx.size(0), -1)\n dydx_l2norm = torch.sqrt(torch.sum(dydx ** 2, dim=1))\n return torch.mean((dydx_l2norm - 1) ** 2)\n\n # Copied from StarGAN v2 code\n def r1_reg(self, d_out, x_in):\n # zero-centered gradient penalty for real images\n batch_size = x_in.size(0)\n grad_dout = torch.autograd.grad(\n outputs=d_out.sum(),\n inputs=x_in,\n create_graph=True,\n retain_graph=True,\n only_inputs=True,\n )[0]\n grad_dout2 = grad_dout.pow(2)\n assert grad_dout2.size() == x_in.size()\n reg = 0.5 * grad_dout2.view(batch_size, -1).sum(1).mean(0)\n return reg\n\n def label2onehot(self, labels, dim):\n \"\"\"Convert label indices to one-hot vectors.\"\"\"\n batch_size = labels.size(0)\n out = torch.zeros(batch_size, dim)\n out[np.arange(batch_size), labels.long()] = 1\n return out\n\n def create_labels(self, c_org, c_dim=5):\n \"\"\"Generate target domain labels for debugging and testing.\"\"\"\n\n c_trg_list = []\n for i in range(c_dim):\n\n c_trg = self.label2onehot(torch.ones(c_org.size(0)) * i, c_dim)\n c_trg_list.append(c_trg.to(self.device))\n return c_trg_list\n\n def classification_loss(self, logit, target):\n \"\"\"Compute cross entropy loss.\"\"\"\n\n return F.cross_entropy(logit, target)\n\n def train(self):\n \"\"\"Train GANmut.\"\"\"\n # Set data loader.\n data_loader = self.loader\n\n # Fetch fixed inputs for debugging.\n\n data_iter = iter(data_loader)\n\n x_fixed, c_org, _ = next(data_iter)\n\n x_fixed = x_fixed.to(self.device)\n c_fixed_list = self.create_labels(c_org, self.c_dim)\n label_emotions = torch.tensor(\n [em for em in range(self.c_dim)], device=self.device, dtype=torch.long\n )\n\n # Learning rate cache for decaying.\n g_lr = self.g_lr\n d_lr = self.d_lr\n\n # Start training from scratch or resume training.\n start_iter = 0\n if self.resume_iter:\n start_iter = self.resume_iter\n self.restore_model(self.resume_iter)\n\n # Start training.\n print(\"Start training...\")\n start_time = time.time()\n for i in range(start_iter, self.num_iters):\n\n # =================================================================================== #\n # 1. Preprocess input data #\n # =================================================================================== #\n\n # Fetch real images and labels.\n\n try:\n x_real, label_org, _ = next(data_iter)\n except:\n data_iter = iter(data_loader)\n x_real, label_org, _ = next(data_iter)\n\n # Generate target domain labels randomly.\n rand_idx = torch.randperm(label_org.size(0))\n label_trg = label_org[rand_idx]\n\n c_org = self.label2onehot(label_org, self.c_dim)\n c_trg = self.label2onehot(label_trg, self.c_dim)\n\n x_real = x_real.to(self.device) # Input images.\n c_org = c_org.to(self.device) # Original domain labels.\n c_trg = c_trg.to(self.device) # Target domain labels.\n label_org = label_org.to(\n self.device\n ) # Labels for computing classification loss.\n label_trg = label_trg.to(\n self.device\n ) # Labels for computing classification loss.\n\n if self.parametrization == \"linear\":\n # expression strength of all expressions except neutral set into the interval [0.2,1]\n expression_strength = (\n torch.rand(x_real.size(0), device=self.device) * 0.8 + 0.2\n )\n\n # neutral expression strength set to 0\n expression_strength[label_trg.eq(0)] = (\n 0.2 * (expression_strength[label_trg.eq(0)] - 0.2) / 0.8\n )\n neutral_mask = (expression_strength > 0.2).to(torch.float)\n\n # =================================================================================== #\n # 2. Train the discriminator #\n # =================================================================================== #\n # Compute loss with real images.\n x_real.requires_grad_()\n out_src, out_cls, _ = self.D(x_real)\n\n d_loss_real = -torch.mean(out_src)\n d_loss_cls = self.classification_loss(out_cls, label_org)\n\n # Compute loss with fake images.\n x_fake, cord = self.G(x_real, c_trg, expression_strength)\n out_src_fake, out_cls, cord_hat = self.D(x_fake.detach())\n d_loss_fake = torch.mean(out_src_fake)\n d_loss_info = F.mse_loss(cord_hat, cord)\n\n # Compute regularization loss\n if self.regularization_type == \"gp\":\n alpha = torch.rand(x_real.size(0), 1, 1, 1).to(self.device)\n x_hat = (\n alpha * x_real.data + (1 - alpha) * x_fake.data\n ).requires_grad_(True)\n out_src, _, _ = self.D(x_hat)\n d_loss_regularization = self.gradient_penalty(out_src, x_hat)\n elif self.regularization_type == \"R1\":\n d_loss_regularization = self.r1_reg(out_src, x_real)\n else:\n sys.exit(\"Regularization not supported\")\n\n # Backward and optimize.\n d_loss = (\n d_loss_real\n + d_loss_fake\n + self.lambda_cls * d_loss_cls\n + self.lambda_regularization * d_loss_regularization\n + self.lambda_d_info * d_loss_info\n )\n self.reset_grad()\n d_loss.backward()\n self.d_optimizer.step()\n\n # Logging.\n loss = {}\n loss[\"D/loss_real\"] = d_loss_real.item()\n loss[\"D/loss_fake\"] = d_loss_fake.item()\n loss[\"D/loss_cls\"] = d_loss_cls.item()\n loss[\"D/loss_regularization\"] = d_loss_regularization.item()\n loss[\"D/loss_info\"] = d_loss_info.item()\n\n # =================================================================================== #\n # 3. Train the generator #\n # =================================================================================== #\n\n if (i + 1) % self.n_critic == 0:\n # Original-to-target domain.\n with torch.no_grad():\n _, cls_real, cord_hat_real = self.D(x_real)\n\n x_fake, cord = self.G(x_real, c_trg, expression_strength)\n out_src, out_cls, cord_hat = self.D(x_fake)\n g_loss_fake = -torch.mean(out_src)\n g_loss_cls = self.classification_loss(out_cls[5:], label_trg[5:])\n expr_strength_hat = (F.softmax(out_cls, dim=1)).max(1)[0]\n\n g_loss_expression_strength = (\n F.mse_loss(\n expr_strength_hat * neutral_mask,\n expression_strength * neutral_mask,\n reduction=\"sum\",\n )\n / torch.sum(neutral_mask)\n )\n g_loss_info = torch.nn.functional.mse_loss(cord_hat, cord)\n\n # Target-to-original domain..shape\n\n if self.cycle_loss == \"approximate\":\n # if True:\n expr_strength_real = (F.softmax(cls_real, dim=1)).max(1)[0]\n x_reconst, _ = self.G(x_fake, c_org, expr_strength_real)\n elif self.cycle_loss == \"original\":\n x_reconst, _ = self.G(\n x_fake,\n c_org,\n None,\n mode=\"manual_selection\",\n manual_expr=cord_hat_real,\n )\n else:\n sys.exit(\n \"cycle loss can be either 'approximate' either 'original' \"\n )\n\n g_loss_rec = torch.mean(torch.abs(x_real - x_reconst))\n\n # Backward and optimize.\n g_loss = (\n g_loss_fake\n + self.lambda_rec * g_loss_rec\n + self.lambda_cls * g_loss_cls\n + self.lambda_g_strength * g_loss_expression_strength\n + self.lambda_g_info * g_loss_info\n )\n self.reset_grad()\n g_loss.backward()\n self.g_optimizer.step()\n\n # Logging.\n loss[\"G/loss_fake\"] = g_loss_fake.item()\n loss[\"G/loss_rec\"] = g_loss_rec.item()\n loss[\"G/loss_cls\"] = g_loss_cls.item()\n loss[\"G/loss_expr_strength\"] = g_loss_expression_strength.item()\n loss[\"G/loss_info\"] = g_loss_info.item()\n\n if self.parametrization == \"gaussian\":\n\n # =================================================================================== #\n # 2. Train the discriminator #\n # =================================================================================== #\n\n # Compute loss with real images.\n x_real.requires_grad_()\n out_src, out_cls = self.D(x_real)\n\n d_loss_real = -torch.mean(out_src)\n d_loss_cls = self.classification_loss(\n out_cls[:, 0 : self.c_dim], label_org\n )\n\n # Compute loss with fake images.\n x_fake, label_trg, expr = self.G(x_real)\n out_src_fake, out_cls = self.D(x_fake.detach())\n d_loss_fake = torch.mean(out_src_fake)\n d_loss_expr = F.mse_loss(out_cls[:, self.c_dim :], expr)\n\n if self.regularization_type == \"gp\":\n alpha = torch.rand(x_real.size(0), 1, 1, 1).to(self.device)\n x_hat = (\n alpha * x_real.data + (1 - alpha) * x_fake.data\n ).requires_grad_(True)\n out_src, _ = self.D(x_hat)\n d_loss_regularization = self.gradient_penalty(out_src, x_hat)\n elif self.regularization_type == \"R1\":\n d_loss_regularization = self.r1_reg(out_src, x_real)\n else:\n sys.exit(\"Regularization not supported\")\n\n # Backward and optimize.\n d_loss = (\n d_loss_real\n + d_loss_fake\n + self.lambda_cls * d_loss_cls\n + self.lambda_regularization * d_loss_regularization\n + self.lambda_expr * d_loss_expr\n )\n\n self.reset_grad()\n d_loss.backward()\n self.d_optimizer.step()\n\n # Logging.\n loss = {}\n loss[\"D/loss_real\"] = d_loss_real.item()\n loss[\"D/loss_fake\"] = d_loss_fake.item()\n loss[\"D/loss_cls\"] = d_loss_cls.item()\n loss[\"D/loss_reguarization\"] = d_loss_regularization.item()\n\n # =================================================================================== #\n # 3. Train the generator #\n # =================================================================================== #\n\n if (i + 1) % self.n_critic == 0:\n # Original-to-target domain.\n with torch.no_grad():\n _, expr_org = self.D(x_real)\n\n x_fake, mahalanobis_distances, _ = self.G(x_real)\n out_src, out_cls = self.D(x_fake)\n g_loss_fake = -torch.mean(out_src)\n\n probabilities_trg = self.Softmax(-mahalanobis_distances)\n log_probabilities_trg = self.LogSoftmax(-mahalanobis_distances)\n\n g_loss_cls = self.KLDivLoss(\n self.LogSoftmax(out_cls[:, 0 : self.c_dim]), probabilities_trg\n ) + self.KLDivLoss(\n log_probabilities_trg, self.Softmax(out_cls[:, 0 : self.c_dim])\n )\n\n g_loss_prediction = self.classification_loss(\n out_cls[0 : self.c_dim, 0 : self.c_dim], label_emotions\n )\n # Target-to-original domain..shape\n\n x_reconst, _, _ = self.G(x_fake, expr_org[:, self.c_dim:])\n g_loss_rec = torch.mean(torch.abs(x_real - x_reconst))\n\n g_loss = (\n g_loss_fake\n + self.lambda_rec * g_loss_rec\n + self.lambda_cls * g_loss_cls\n + self.lambda_prediction * g_loss_prediction\n )\n self.reset_grad()\n g_loss.backward()\n self.g_optimizer.step()\n\n # Logging.\n loss[\"G/loss_fake\"] = g_loss_fake.item()\n loss[\"G/loss_rec\"] = g_loss_rec.item()\n loss[\"G/loss_cls\"] = g_loss_cls.item()\n\n # =================================================================================== #\n # 4. Miscellaneous #\n # =================================================================================== #\n\n # Print out training information.\n if (i + 1) % self.log_step == 0:\n\n elapsed_time = time.time() - start_time\n elapsed_time = str(datetime.timedelta(seconds=elapsed_time))[:-7]\n log = \"Elapsed [{}], Iteration [{}/{}]\".format(\n elapsed_time, i + 1, self.num_iters\n )\n for tag, value in loss.items():\n log += \", {}: {:.4f}\".format(tag, value)\n print(log)\n\n if self.use_tensorboard:\n for tag, value in loss.items():\n self.logger.scalar_summary(tag, value, i + 1)\n\n if (i + 1) % self.sample_step == 0:\n\n with torch.no_grad():\n if self.parametrization == \"linear\":\n\n print(\"AXES:\")\n print(self.G.print_axes())\n\n expression_str = 0.9 * torch.ones(\n x_fixed.size(0), dtype=torch.float, device=self.device\n )\n x_fake_list = [x_fixed]\n for c_fixed in c_fixed_list:\n\n x_fake_list.append(\n self.G(\n x_fixed,\n c_fixed[: x_fixed.size(0)],\n expression_str,\n \"test\",\n )[0][:, [2, 1, 0], :, :]\n )\n\n x_concat = torch.cat(x_fake_list, dim=3)\n sample_path = os.path.join(\n self.sample_dir, \"{}-images.jpg\".format(i + 1)\n )\n save_image(\n self.denorm(x_concat.data.cpu()),\n sample_path,\n nrow=1,\n padding=0,\n )\n print(\n \"Saved real and fake images into {}...\".format(sample_path)\n )\n\n for emotion in range(1, self.c_dim):\n\n c_emotion = c_fixed_list[emotion]\n x_fake_list = [x_fixed]\n\n for strength in range(0, 11):\n\n x_fake_list.append(\n self.G(\n x_fixed,\n c_emotion[: x_fixed.size(0)],\n strength * 0.1,\n \"test\",\n )[0][:, [2, 1, 0], :, :]\n )\n\n x_concat = torch.cat(x_fake_list, dim=3)\n sample_path = os.path.join(\n self.sample_dir, f\"{i+1}-images_emotion_{emotion}.jpg\"\n )\n save_image(\n self.denorm(x_concat.data.cpu()),\n sample_path,\n nrow=1,\n padding=0,\n )\n print(\n \"Saved real and fake images into {}...\".format(\n sample_path\n )\n )\n\n if self.parametrization == \"gaussian\":\n\n print(\"MODES:\")\n print(self.G.print_expr())\n\n x_fake_list = [x_fixed]\n for expression in range(self.c_dim):\n\n x_fake_list.append(\n self.G(\n x_fixed,\n self.G.mu.weight[expression]\n .unsqueeze(0)\n .repeat(x_fixed.size(0), 1),\n )[0][:, [2, 1, 0], :, :]\n )\n\n x_concat = torch.cat(x_fake_list, dim=3)\n sample_path = os.path.join(\n self.sample_dir, \"{}-images.jpg\".format(i + 1)\n )\n save_image(\n self.denorm(x_concat.data.cpu()),\n sample_path,\n nrow=1,\n padding=0,\n )\n print(\n \"Saved real and fake images into {}...\".format(sample_path)\n )\n\n # Save model checkpoints.\n if (i + 1) % self.model_save_step == 0:\n G_path = os.path.join(self.model_save_dir, \"{}-G.ckpt\".format(i + 1))\n D_path = os.path.join(self.model_save_dir, \"{}-D.ckpt\".format(i + 1))\n torch.save(self.G.state_dict(), G_path)\n torch.save(self.D.state_dict(), D_path)\n print(\"Saved model checkpoints into {}...\".format(self.model_save_dir))\n\n # Decay learning rates.\n if (i + 1) % self.lr_update_step == 0 and (i + 1) > (\n self.num_iters - self.num_iters_decay\n ):\n g_lr -= self.g_lr / float(self.num_iters_decay)\n d_lr -= self.d_lr / float(self.num_iters_decay)\n self.update_lr(g_lr, d_lr)\n print(\"Decayed learning rates, g_lr: {}, d_lr: {}.\".format(g_lr, d_lr))\n"
] |
[
[
"torch.nn.Softmax",
"torch.mean",
"torch.nn.KLDivLoss",
"torch.nn.LogSoftmax",
"torch.abs",
"torch.nn.functional.softmax",
"torch.zeros",
"torch.load",
"numpy.arange",
"torch.cat",
"torch.nn.functional.cross_entropy",
"torch.sum",
"torch.nn.functional.mse_loss",
"torch.no_grad",
"torch.cuda.is_available",
"torch.autograd.grad"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
tobiasw225/python-pso
|
[
"38cfb7775b296cdf3cdca0caf760737155e150f1"
] |
[
"pso/particle.py"
] |
[
"import sys\nimport numpy as np\n\n\nclass Particle:\n\n __slots__ = ['x', 'v', 'dims', 'best_point', 'best_solution']\n\n def __init__(self, n: int, dims: int):\n \"\"\"\n\n ensure particle starts with random velocity\n at random position.\n\n :param n:\n :param dims:\n \"\"\"\n self.x = 2 * n * np.random.ranf(dims) - n\n self.v = np.random.random(dims)*n + 1\n self.best_solution = sys.maxsize\n self.best_point = np.zeros(dims)\n self.dims = dims\n"
] |
[
[
"numpy.random.ranf",
"numpy.random.random",
"numpy.zeros"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
dseuss/kinkycrystals
|
[
"4d314ea52802c637acd243f5e986434b97a38e91",
"4d314ea52802c637acd243f5e986434b97a38e91"
] |
[
"kcrecog/imgproc.py",
"kcrecog/atrous.py"
] |
[
"#!/usr/bin/env python\n# encoding: utf-8\n\"\"\"Routines for extracting data from the images.\"\"\"\nfrom __future__ import division, print_function\n\nimport cv2 as cv\nimport numpy as np\nfrom itertools import izip\nfrom skimage.measure import label, regionprops\n\nimport conf\nimport dataio as io\n\ntry:\n from tools.helpers import Progress\nexcept ImportError:\n def Progress(iter):\n return iter\n\n\n###############################################################################\n# Loading sequences & frame finding #\n###############################################################################\n\ndef read_seq(fnames):\n \"\"\"Reads a sequence of images from the files given. In this sequence the\n crystal is expected to be in the same region for all pictures. This is\n used to extract to the bounding box.\n\n :param list fnames: List of filenames to load\n :returns: imgs, props\n imgs: Dict of 2D arrays with same shape containing the images with\n the key given by the label of the file (the part of the filename\n between the last '_' and the file extension)\n props: Dict with additional properties of the sequence\n - x0: upper left corner of the relevant region in the original\n image.\n - max: Maximum brightness value over the whole sequence\n\n \"\"\"\n assert(len(fnames) > 0)\n imgs = [_load_img(fname) for fname in Progress(fnames)]\n\n # Compute \"mean\" image and scale to fit datatype\n sumimg = sum(img.astype(np.float) for img in imgs)\n sumimg -= sumimg.min()\n sumimg = (255 / sumimg.max() * sumimg).astype(np.uint8)\n\n x0, x1 = find_frame(sumimg)\n\n imgs = dict(izip((io.extract_label(fname) for fname in fnames),\n (img[x0[1]:x1[1], x0[0]:x1[0]] for img in imgs)))\n props = {'x0': x0, 'max': max(np.max(img) for img in imgs.itervalues())}\n return imgs, props\n\n\ndef find_frame(data):\n \"\"\"Finds the interesting frame of an image.\n\n :param data: Image as 2D array of type uint8\n :returns: (x0, x1), where x0 is the upper left corner and x1 the lower\n right corner of the bounding rectangle\n\n \"\"\"\n assert data.dtype == np.uint8, \"Image has wrong dtype.\"\n\n # TODO Separate preprocessing routine for less noisy images\n buf = _extract_interesting_region(data)\n bbox = regionprops(buf)[0].bbox\n x0, x1 = _postprocess_bb(x0=(bbox[1], bbox[0]),\n x1=(bbox[3], bbox[2]),\n imgsize=buf.shape)\n return x0, x1\n\n\ndef load_valid_img(fname):\n \"\"\"Loads the image from the given b16 file and checks whether the image\n contains some information (i.e. is not unicolor).\n\n :param fname: Path to file to read from\n :returns: Image as 2D array in uint8 format\n\n \"\"\"\n data = io.read_b16(fname)\n max_val, min_val = data.max(), data.min()\n if (max_val > 255) or (min_val < 0):\n raise InvalidImage(\"Image data range too large for uint8 format.\")\n if (max_val == min_val):\n raise InvalidImage(\"Image is blank.\")\n return np.array(data, dtype=np.uint8)\n\n\ndef _load_img(fname):\n \"\"\"Reads the data from the file and converts it to the dataformat used.\n\n :param fname: Path to file to read from\n :returns: Image as 2D array in uint8 format\n\n \"\"\"\n data = io.read_b16(fname)\n if (data.max() > 255) or (data.min() < 0):\n raise InvalidImage(\"Image data range too large for uint8 format.\")\n return np.array(data, dtype=np.uint8)\n\n\ndef _extract_interesting_region(data):\n \"\"\"Preprocesses image for frame finding. Steps taken are\n * Otsu thresholding\n * small-kernel-area opening to get rid of single/isolated bright pixels\n which are assumend to be noise\n * large-kernel-area opening to inverse the effect of the prior opening;\n this also serves the purpose to connect close bright areas (for the\n next step). Due to the distinct elongation of the kernel in the x-\n direction this especially favors horizontal structures.\n * finding largest connected region\n\n :param np.ndarray data: Image as 2D array of type uint8\n :returns: Mask as 2D array labeling the \"interesting area\" in that picture\n\n \"\"\"\n assert data.dtype == np.uint8, \"Image has wrong dtype.\"\n _, buf = cv.threshold(data, 0, 255, cv.THRESH_BINARY + cv.THRESH_OTSU)\n\n # Opening to get rid of noise\n kernel = cv.getStructuringElement(cv.MORPH_ELLIPSE, (3, 3))\n buf = cv.morphologyEx(buf, cv.MORPH_OPEN, kernel, iterations=3)\n\n # Closing to get connected area where signal should be\n kernel = cv.getStructuringElement(cv.MORPH_ELLIPSE, (31, 7))\n buf = cv.morphologyEx(buf, cv.MORPH_CLOSE, kernel, iterations=5)\n\n # Find the largest connected component\n cc = label(buf, neighbors=4, background=0) + 1\n largest_component = np.argmax(np.bincount(cc.ravel())[1:]) + 1\n return cc == largest_component\n\n\ndef _postprocess_bb(x0, x1, imgsize):\n \"\"\"Performs checking and scaling of the bounding box.\n\n :param x0: Upper left corner of the bounding box\n :param x1: Lower right corner of the bounding box\n :param imgisze: Maximum values for the position of the lower right corner\n :returns: New values for x0 and x1\n\n \"\"\"\n xlen, ylen = x1[0] - x0[0], x1[1] - x0[1]\n # Check whether the bbox is to small\n if (xlen < conf.BBOX_MIN_X) or (ylen < conf.BBOX_MIN_Y):\n msg = \"Bounding box too small: {} should be at least {}\" \\\n .format((xlen, ylen),\n (conf.BBOX_MIN_X, conf.BBOX_MIN_Y))\n raise InvalidImage(msg, debuginfo=(x0, x1))\n\n # Scale the rectangle\n x0 = (max(x0[0] - xlen * (conf.BBOX_SCALE_X - 1) / 2, 0),\n max(x0[1] - ylen * (conf.BBOX_SCALE_Y - 1) / 2, 0))\n x1 = (min(x1[0] + xlen * (conf.BBOX_SCALE_X - 1) / 2, imgsize[1]),\n min(x1[1] + ylen * (conf.BBOX_SCALE_Y - 1) / 2, imgsize[0]))\n\n return np.floor(x0).astype(int), np.ceil(x1).astype(int)\n\n\n###############################################################################\n# Ion recognition #\n###############################################################################\n\n###############################################################################\n# Exceptions #\n###############################################################################\n\nclass InvalidImage(Exception):\n \"\"\"Exception class to be raised when image file fails preprocessing\"\"\"\n\n def __init__(self, value, debuginfo=None):\n self._value = value\n self._debuginfo = debuginfo\n\n def __str__(self):\n return repr(self._value)\n",
"#!/usr/bin/env python\n# encoding: utf-8\n\nfrom __future__ import division, print_function\n\nimport matplotlib.pyplot as pl\nimport numpy as np\nfrom scipy.ndimage.filters import convolve\nfrom skimage.filter import threshold_otsu\n\nfrom .dataio import read_b16\nfrom tools.helpers import Progress\n\n# 3-spline mother kernel\nMOTHER_WAVELET = [1/16, 1/4, 3/8, 1/4, 1/16]\n\n\ndef atrous_wavelet(source, level, mother_wavelet=MOTHER_WAVELET):\n \"\"\"@todo: Docstring for atrous_wavelet.\n\n :param img: @todo\n :param level: @todo\n :param mother_wavelet: @todo\n :returns: [A_level, W_level, ..., W_1]\n\n \"\"\"\n img = source.copy()\n kernel = np.array(mother_wavelet)\n wavelets = []\n\n for i in Progress(range(level)):\n img_smooth = _smoothen(img, kernel)\n kernel = _interleave_zeros(kernel)\n wavelet = img_smooth - img\n _threshold_to_zero(wavelet, _estimate_threshold(wavelet))\n wavelets.append(wavelet)\n img = img_smooth\n\n return [img] + wavelets\n\n\ndef _smoothen(img, kernel=MOTHER_WAVELET):\n \"\"\"Convolves the image in each dimension with the 1D convolution kernel.\n\n :param img: 2D array containing grayscale image\n :param kernel: 1D convolution Kernel of shape (2N + 1)\n :returns: 2D array containing the convolved image in grayscale\n\n \"\"\"\n kernel_arr = np.asarray(kernel)\n kernel_full = kernel_arr[None, :] * kernel_arr[:, None]\n return convolve(img, kernel_full, mode='reflect')\n\n\ndef _interleave_zeros(arr):\n \"\"\"Interleaves zeros between the values of arr, i.e.\n\n (x_0,...,x_n) -> (x_0, 0, x_1, ..., 0, x_n)\n\n :param arr: Array to be interleaved with zeros\n :returns: Array interleaved with zeros\n\n >>> list(interleave_zeros([1, 2, 3]))\n [1, 0, 2, 0, 3]\n \"\"\"\n newarr = [0 * arr[0]] * (2 * len(arr) - 1)\n newarr[::2] = arr\n return np.array(newarr)\n\n\ndef _threshold_to_zero(img, threshold):\n \"\"\"Replaces values in img below threshold by 0 in place.\n\n :param img: ndarray to be thresholded\n :param threshold: Threshold\n\n >>> a = np.array([1, 3, 2, 4]); _threshold_to_zero(a, 2.5); list(a)\n [0, 3, 0, 4]\n \"\"\"\n img[img < threshold] = 0\n\n\ndef _estimate_threshold(img, coeff=3):\n \"\"\"Estimates the threshold used for noise supression using the MAD est.\n\n t = coeff * sigma / 0.67 ,\n\n where sigma is the media absolute deviation of the wavelet coefficients\n from their median and the coeff is customary taken to be 3.\n\n :param img: Image to be thresholded\n :param coeff: Coefficient used in the thresholding formula (default: 3)\n :returns: Thresholding value t\n\n \"\"\"\n sigma = np.median(np.abs(img - np.median(img)))\n return coeff * sigma / 0.67\n\n\nif __name__ == '__main__':\n from tools.plot import imsshow\n pl.figure(0, figsize=(8, 16))\n\n # img = read_b16('tests/data/2015_02_13_17_14_0033.b16')\n img = read_b16('tests/data/2015_02_13_17_14_0048.b16')\n # img = read_b16('/Volumes/TOSHIBA EXT/PCO/2014_12_10_10ms_0182.b16')\n\n LEVELS_MIN = 4\n LEVELS_MAX = 7\n wavelets = atrous_wavelet(img, LEVELS_MAX)\n recov = [np.log(1 + np.prod(wavelets[1:i], axis=0))\n for i in range(LEVELS_MIN, LEVELS_MAX + 1)]\n\n axs = imsshow([img] + recov, layout='v', show=False)\n labels = ['Original'] + ['Recov. for J={}'.format(i)\n for i in range(LEVELS_MIN, LEVELS_MAX + 1)]\n # labels += ['W_{}'.format(i) for i in range(1, len(wavelets))]\n for ax, label in zip(axs, labels):\n ax.set_title(label)\n\n pl.tight_layout()\n pl.show()\n"
] |
[
[
"numpy.max",
"numpy.array",
"numpy.floor",
"numpy.ceil"
],
[
"matplotlib.pyplot.tight_layout",
"numpy.asarray",
"numpy.median",
"numpy.prod",
"scipy.ndimage.filters.convolve",
"numpy.array",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"0.15",
"1.4",
"0.16",
"1.0",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"0.10",
"0.17",
"1.3"
],
"tensorflow": []
}
] |
bsmind/dpc
|
[
"e07fef88ee5899aa340b415104048456d1811ac4"
] |
[
"core/HXN_databroker.py"
] |
[
"#from hxntools.handlers import register\n#import filestore\nfrom metadatastore.mds import MDS\nfrom databroker import Broker\nfrom filestore.fs import FileStore\n\n# database #1\n_mds_config = {'host': 'xf03id-ca1',\n 'port': 27017,\n 'database': 'datastore-new',\n 'timezone': 'US/Eastern'}\nmds = MDS(_mds_config)\n_fs_config = {'host': 'xf03id-ca1',\n 'port': 27017,\n 'database': 'filestore-new'}\ndb1 = Broker(mds, FileStore(_fs_config))\n\n# database #2\n_mds_config = {'host': 'xf03id-ca1',\n 'port': 27017,\n 'database': 'datastore-1',\n 'timezone': 'US/Eastern'}\nmds = MDS(_mds_config)\n_fs_config = {'host': 'xf03id-ca1',\n 'port': 27017,\n 'database': 'filestore-1'}\ndb2 = Broker(mds, FileStore(_fs_config))\n\n# database old\n_mds_config_old = {'host': 'xf03id-ca1',\n 'port': 27017,\n 'database': 'datastore',\n 'timezone': 'US/Eastern'}\nmds_old = MDS(_mds_config_old)\n\n_fs_config_old = {'host': 'xf03id-ca1',\n 'port': 27017,\n 'database': 'filestore'}\ndb_old = Broker(mds_old, FileStore(_fs_config_old))\n\nfrom hxntools.handlers.timepix import TimepixHDF5Handler\nfrom hxntools.handlers.xspress3 import Xspress3HDF5Handler\ndb1.reg.register_handler(TimepixHDF5Handler._handler_name, TimepixHDF5Handler, overwrite=True)\ndb2.reg.register_handler(TimepixHDF5Handler._handler_name, TimepixHDF5Handler, overwrite=True)\ndb_old.reg.register_handler(Xspress3HDF5Handler.HANDLER_NAME, Xspress3HDF5Handler)\ndb_old.reg.register_handler(TimepixHDF5Handler._handler_name, TimepixHDF5Handler, overwrite=True)\n\nimport numpy as np\nimport sys, os\nimport h5py\ntry:\n from core.widgets.imgTools import rm_outlier_pixels\nexcept ModuleNotFoundError:\n # for test purpose\n from widgets.imgTools import rm_outlier_pixels\n#######################################\n\n\ndef load_metadata(db, scan_num:int, det_name:str):\n '''\n Get all metadata for the given scan number and detector name\n\n Parameters:\n - db: \n a Broker instance. For HXN experiments they are db1, db2, and db_old\n - scan_num: int\n the scan number\n - det_name: str\n the detector name\n\n Return:\n A dictionary that holds the metadata (except for those directly related to the image)\n '''\n sid = scan_num\n metadata = dict()\n header = db[sid]\n\n plan_args = header.start['plan_args']\n scan_type = header.start['plan_name']\n scan_motors = header.start['motors']\n items = [det_name, 'sclr1_ch3', 'sclr1_ch4'] + scan_motors\n bl = db.get_table(header, stream_name='baseline')\n df = db.get_table(header, fields=items, fill=False)\n #images = db_old.get_images(db_old[sid], name=det_name)\n\n # get energy_kev\n dcm_th = bl.dcm_th[1]\n energy_kev = 12.39842 / (2.*3.1355893 * np.sin(dcm_th * np.pi / 180.))\n metadata['xray_energy_kev'] = energy_kev\n\n # get scan_type, x_range, y_range, dr_x, dr_y\n if scan_type == 'FlyPlan2D':\n x_range = plan_args['scan_end1']-plan_args['scan_start1']\n y_range = plan_args['scan_end2']-plan_args['scan_start2']\n x_num = plan_args['num1']\n y_num = plan_args['num2']\n dr_x = 1.*x_range/x_num\n dr_y = 1.*y_range/y_num\n x_range = x_range - dr_x\n y_range = y_range - dr_y\n elif scan_type == 'rel_spiral_fermat':\n x_range = plan_args['x_range']\n y_range = plan_args['y_range']\n dr_x = plan_args['dr']\n dr_y = 0\n else:\n x_range = plan_args['args'][2]-plan_args['args'][1]\n y_range = plan_args['args'][6]-plan_args['args'][5]\n x_num = plan_args['args'][3]\n y_num = plan_args['args'][7]\n dr_x = 1.*x_range/x_num\n dr_y = 1.*y_range/y_num\n x_range = x_range - dr_x\n y_range = y_range - dr_y\n metadata['scan_type'] = scan_type\n metadata['dr_x'] = dr_x\n metadata['dr_y'] = dr_y\n metadata['x_range'] = x_range\n metadata['y_range'] = y_range\n\n # get points\n num_frame, count = np.shape(df)\n points = np.zeros((2, num_frame))\n points[0] = np.array(df[scan_motors[0]])\n points[1] = np.array(df[scan_motors[1]])\n metadata['points'] = points\n\n # get angle, ic\n if scan_motors[1] == 'dssy':\n angle = bl.dsth[1]\n ic = np.asfarray(df['sclr1_ch4'])\n elif scan_motors[1] == 'zpssy':\n angle = bl.zpsth[1]\n ic = np.asfarray(df['sclr1_ch3'])\n metadata['angle'] = angle\n metadata['ic'] = ic\n \n # get ccd_pixel_um\n ccd_pixel_um = 55.\n metadata['ccd_pixel_um'] = ccd_pixel_um\n\n # get diffamp dimensions (uncropped!)\n nz, = df[det_name].shape\n mds_table = df[det_name]\n metadata['nz'] = nz\n metadata['mds_table'] = mds_table\n\n return metadata\n\n\ndef save_data(db, param, scan_num:int, n:int, nn:int, cx=110, cy=160, threshold=1., bad_pixels=None, zero_out=None):\n '''\n Save metadata and diffamp for the given scan number to a HDF5 file.\n\n Parameters:\n - db: \n a Broker instance. For HXN experiments they are db1, db2, and db_old\n - param: Param\n a Param instance containing the metadata and other information from the GUI\n - scan_num: int\n the scan number\n - n: int\n the x dimension of the ROI window (nx_prb)\n - nn: int\n the y dimension of the ROI window (nx_prb)\n - cx: int\n x index of the center of mass\n - cy: int\n y index of the center of mass\n - threshold: float, optional\n the threshold of raw image, below which the data is removed\n - bad_pixels: list of two lists, optional\n the data structure is [[x1, x2, ...], [y1, y2, ...]]. If given, they will be removed from the images.\n - zero_out: list of tuples, optional\n zero out the given rois [(x0, y0, w0, h0), (x1, y1, w1, h1), ...]\n\n Notes:\n 1. the detector distance is assumed existent as param.z_m\n '''\n det_distance_m = param.z_m\n det_pixel_um = param.ccd_pixel_um\n num_frame = param.nz\n angle = param.angle\n lambda_nm = param.lambda_nm\n ic = param.ic\n #energy_kev = param.energy_kev\n\n #print('energy:', energy_kev)\n #print('angle: ', angle)\n #lambda_nm = 1.2398/energy_kev\n x_pixel_m = lambda_nm * 1.e-9 * det_distance_m / (n * det_pixel_um * 1e-6)\n y_pixel_m = lambda_nm * 1.e-9 * det_distance_m / (nn * det_pixel_um * 1e-6)\n x_depth_of_field_m = lambda_nm * 1.e-9 / (n/2 * det_pixel_um*1.e-6 / det_distance_m)**2\n y_depth_of_field_m = lambda_nm * 1.e-9 / (nn/2 * det_pixel_um*1.e-6 / det_distance_m)**2\n #print('pixel size: ', x_pixel_m, y_pixel_m)\n #print('depth of field: ', x_depth_of_field_m, y_depth_of_field_m)\n \n # get data array\n data = np.zeros((num_frame, n, nn)) # nz*nx*ny\n for i in range(num_frame):\n #print(param.mds_table.iat[i], file=sys.stderr)\n img = db.reg.retrieve(param.mds_table.iat[i])[0]\n #img = np.rot90(img, axes=(1,0)) #equivalent to tt = np.flipud(tt).T\n ny, nx = np.shape(img)\n\n img = img * ic[0] / ic[i]\n\n if bad_pixels is not None:\n img = rm_outlier_pixels(img, bad_pixels[0], bad_pixels[1])\n\n if zero_out is not None:\n for blue_roi in zero_out:\n x0 = blue_roi[0]\n y0 = blue_roi[1]\n w = blue_roi[2]\n h = blue_roi[3]\n img[y0:y0+h, x0:x0+w] = 0.\n\n if n < nx:\n # assuming n=nn???\n #print(nx, ny, file=sys.stderr)\n #print(cx-n//2, cx+n//2, cy-nn//2, cy+nn//2, file=sys.stderr)\n #tmptmp = img[cx-n//2:cx+n//2, cy-nn//2:cy+nn//2]\n tmptmp = img[cy-nn//2:cy+nn//2, cx-n//2:cx+n//2]\n #print(tmptmp.shape, file=sys.stderr)\n else: \n raise Exception(\"zero padding not completed yet\")\n # # is this part necessary???\n # #tmptmp = t\n # tmptmp = np.zeros((n, n))\n # #tmptmp[3:-3,:] = t[:,cy-n//2:cy+n//2]\n # tmptmp[4:-8, :] = img[:, cy-n//2:cy+n//2]\n\n #if i == 0:\n # import matplotlib.pyplot as plt\n # plt.imshow(tmptmp, vmin=np.min(img), vmax=np.max(img))\n # plt.savefig(\"ttttt.png\")\n # return\n \n tmptmp = np.rot90(tmptmp, axes=(1,0)) #equivalent to np.flipud(tmptmp).T\n data[i] = np.fft.fftshift(tmptmp)\n\n data[data < threshold] = 0.\n data = np.sqrt(data)\n # data array got\n print('array size:', np.shape(data))\n \n # create a folder\n try:\n os.mkdir(param.working_directory + '/h5_data/')\n except FileExistsError:\n pass \n\n file_path = param.working_directory + '/h5_data/scan_' + str(scan_num) + '.h5'\n with h5py.File(file_path, 'w') as hf:\n dset = hf.create_dataset('diffamp', data=data)\n dset = hf.create_dataset('points', data=param.points)\n dset = hf.create_dataset('x_range', data=param.x_range)\n dset = hf.create_dataset('y_range', data=param.y_range)\n dset = hf.create_dataset('dr_x', data=param.dr_x)\n dset = hf.create_dataset('dr_y', data=param.dr_y)\n dset = hf.create_dataset('z_m', data=det_distance_m)\n dset = hf.create_dataset('lambda_nm', data=lambda_nm)\n dset = hf.create_dataset('ccd_pixel_um', data=det_pixel_um)\n dset = hf.create_dataset('angle', data=angle)\n dset = hf.create_dataset('ic', data=ic)\n dset = hf.create_dataset('x_pixel_m', data=x_pixel_m)\n dset = hf.create_dataset('y_pixel_m', data=y_pixel_m)\n dset = hf.create_dataset('x_depth_field_m', data=x_depth_of_field_m)\n dset = hf.create_dataset('y_depth_field_m', data=y_depth_of_field_m)\n\n # symlink so ptycho can find it\n try:\n symlink_path = param.working_directory + '/scan_' + str(scan_num) + '.h5'\n os.symlink(file_path, symlink_path)\n except FileExistsError:\n os.remove(symlink_path)\n os.symlink(file_path, symlink_path)\n"
] |
[
[
"numpy.rot90",
"numpy.sqrt",
"numpy.fft.fftshift",
"numpy.sin",
"numpy.asfarray",
"numpy.shape",
"numpy.array",
"numpy.zeros"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
lukasHoel/AdversarialTexture
|
[
"d56e4817e725f45c911f66574f4f164f4e7b371f"
] |
[
"src/preprocessing/CudaRender/example.py"
] |
[
"import numpy as np\nimport sys\nimport skimage.io as sio\nimport os\n#from gen_poses import GetPoses,WavePose\nimport shutil\nfrom objloader import LoadTextureOBJ\nimport render\nimport objloader\n\n\ninput_obj = sys.argv[1]\nV, F, VT, FT, VN, FN, face_mat, kdmap = objloader.LoadTextureOBJ(input_obj)\n\n# set up camera information\ninfo = {'Height':960, 'Width':1280, 'fx':575*2, 'fy':575*2, 'cx':640, 'cy':480}\nrender.setup(info)\n\ncontext = render.SetMesh(V, F)\n\ncam2world = np.array([[ 0.85408425, 0.31617427, -0.375678 , 0.56351697 * 2],\n [ 0. , -0.72227067, -0.60786998, 0.91180497 * 2],\n [-0.52013469, 0.51917219, -0.61688 , 0.92532003 * 2],\n [ 0. , 0. , 0. , 1. ]], dtype=np.float32)\n\nworld2cam = np.linalg.inv(cam2world).astype('float32')\nrender.render(context, world2cam)\ndepth = render.getDepth(info)\nvindices, vweights, findices = render.getVMap(context, info)\n\nsio.imsave('depth.png', depth / np.max(depth))\nsio.imsave('vweights.png', vweights)"
] |
[
[
"numpy.linalg.inv",
"numpy.max",
"numpy.array"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
davidby332/shap
|
[
"e6e5e0287a5f0503d0c6127d29c10ea2d8dbfafa"
] |
[
"shap/plots/_scatter.py"
] |
[
"from __future__ import division\n\nimport numpy as np\nimport warnings\ntry:\n import matplotlib.pyplot as pl\n import matplotlib\nexcept ImportError:\n warnings.warn(\"matplotlib could not be loaded!\")\n pass\nfrom ._labels import labels\nfrom . import colors\nfrom ..utils import convert_name, approximate_interactions\nfrom ..utils._general import encode_array_if_needed\nfrom .._explanation import Explanation\n\n\n# TODO: Make the color bar a one-sided beeswarm plot so we can see the density along the color axis\ndef scatter(shap_values, color=\"#1E88E5\", hist=True, axis_color=\"#333333\", cmap=colors.red_blue,\n dot_size=16, x_jitter=\"auto\", alpha=1, title=None, xmin=None, xmax=None, ymin=None, ymax=None,\n overlay=None, ax=None, ylabel=\"SHAP value\", show=True):\n \"\"\" Create a SHAP dependence scatter plot, colored by an interaction feature.\n\n Plots the value of the feature on the x-axis and the SHAP value of the same feature\n on the y-axis. This shows how the model depends on the given feature, and is like a\n richer extenstion of classical parital dependence plots. Vertical dispersion of the\n data points represents interaction effects. Grey ticks along the y-axis are data\n points where the feature's value was NaN.\n\n Note that if you want to change the data being displayed you can update the\n shap_values.display_features attribute and it will then be used for plotting instead of\n shap_values.data.\n\n\n Parameters\n ----------\n shap_values : shap.Explanation\n A single column of a SHAP Explanation object (i.e. shap_values[:,\"Feature A\"]).\n\n color : string or shap.Explanation\n How to color the scatter plot points. This can be a fixed color string, or a Explanation object.\n If it is an explanation object then the scatter plot points are colored by the feature that\n seems to have the strongest interaction effect with the feature given by the shap_values argument.\n This is calculated using shap.utils.approximate_interactions.\n If only a single column of an Explanation object is passed then that feature column will be used\n to color the data points.\n\n hist : bool\n Whether to show a light histogram along the x-axis to show the density of the data. Note that the\n histogram is normalized that that if all the point were in a single bin then that bin would span\n the full height of the plot.\n\n x_jitter : 'auto' or float (0 - 1)\n Adds random jitter to feature values. May increase plot readability when a feature\n is discrete. By default x_jitter is chosen base on auto-detection of categorical features\n\n alpha : float\n The transparency of the data points (between 0 and 1). This can be useful to the\n show density of the data points when using a large dataset.\n\n xmin : float or string\n Represents the lower bound of the plot's x-axis. It can be a string of the format\n \"percentile(float)\" to denote that percentile of the feature's value used on the x-axis.\n\n xmax : float or string\n Represents the upper bound of the plot's x-axis. It can be a string of the format\n \"percentile(float)\" to denote that percentile of the feature's value used on the x-axis.\n\n ax : matplotlib Axes object\n Optionally specify an existing matplotlib Axes object, into which the plot will be placed.\n In this case we do not create a Figure, otherwise we do.\n\n \"\"\"\n\n\n assert str(type(shap_values)).endswith(\"Explanation'>\"), \"The shap_values paramemter must be a shap.Explanation object!\"\n\n # see if we are plotting multiple columns\n if not isinstance(shap_values.feature_names, str) and len(shap_values.feature_names) > 0:\n inds = np.argsort(np.abs(shap_values.values).mean(0))\n nan_min = np.nanmin(shap_values.values)\n nan_max = np.nanmax(shap_values.values)\n if ymin is None:\n ymin = nan_min - (nan_max - nan_min)/20\n if ymax is None:\n ymax = nan_max + (nan_max - nan_min)/20\n f = pl.subplots(1, len(inds), figsize=(min(6 * len(inds), 15), 5))\n for i in inds:\n ax = pl.subplot(1,len(inds),i+1)\n scatter(shap_values[:,i], show=False, ax=ax, ymin=ymin, ymax=ymax)\n if overlay is not None:\n line_styles = [\"solid\", \"dotted\", \"dashed\"]\n for j, name in enumerate(overlay):\n vals = overlay[name]\n if isinstance(vals[i][0][0], (float, int)):\n pl.plot(vals[i][0], vals[i][1], color=\"#000000\", linestyle=line_styles[j], label=name)\n if i == 0:\n ax.set_ylabel(ylabel)\n else:\n ax.set_ylabel(\"\")\n ax.set_yticks([])\n ax.spines['left'].set_visible(False)\n if overlay is not None:\n pl.legend()\n if show:\n pl.show()\n return\n\n if len(shap_values.shape) != 1:\n raise Exception(\"The passed Explanation object has multiple columns, please pass a single feature column to \" + \\\n \"shap.plots.dependence like: shap_values[:,column]\")\n\n # this unpacks the explanation object for the code that was written earlier\n feature_names = [shap_values.feature_names]\n ind = 0\n shap_values_arr = shap_values.values.reshape(-1, 1)\n features = shap_values.data.reshape(-1, 1)\n if shap_values.display_data is None:\n display_features = features\n else:\n display_features = shap_values.display_data.reshape(-1, 1)\n interaction_index = None\n\n # unwrap explanation objects used for bounds\n if issubclass(type(xmin), Explanation):\n xmin = xmin.data\n if issubclass(type(xmax), Explanation):\n xmax = xmax.data\n if issubclass(type(ymin), Explanation):\n ymin = ymin.values\n if issubclass(type(ymax), Explanation):\n ymax = ymax.values\n\n # wrap np.arrays as Explanations\n if isinstance(color, np.ndarray):\n color = Explanation(values=color, base_values=None, data=color)\n \n # TODO: This stacking could be avoided if we use the new shap.utils.potential_interactions function\n if str(type(color)).endswith(\"Explanation'>\"):\n shap_values2 = color\n if issubclass(type(shap_values2.feature_names), (str, int)):\n feature_names.append(shap_values2.feature_names)\n shap_values_arr = np.hstack([shap_values_arr, shap_values2.values.reshape(-1, len(feature_names)-1)])\n features = np.hstack([features, shap_values2.data.reshape(-1, len(feature_names)-1)])\n if shap_values2.display_data is None:\n display_features = np.hstack([display_features, shap_values2.data.reshape(-1, len(feature_names)-1)])\n else:\n display_features = np.hstack([display_features, shap_values2.display_data.reshape(-1, len(feature_names)-1)])\n else:\n feature_names2 = np.array(shap_values2.feature_names)\n mask = ~(feature_names[0] == feature_names2)\n feature_names.extend(feature_names2[mask])\n shap_values_arr = np.hstack([shap_values_arr, shap_values2.values[:,mask]])\n features = np.hstack([features, shap_values2.data[:,mask]])\n if shap_values2.display_data is None:\n display_features = np.hstack([display_features, shap_values2.data[:,mask]])\n else:\n display_features = np.hstack([display_features, shap_values2.display_data[:,mask]])\n color = None\n interaction_index = \"auto\"\n\n\n if type(shap_values_arr) is list:\n raise TypeError(\"The passed shap_values_arr are a list not an array! If you have a list of explanations try \" \\\n \"passing shap_values_arr[0] instead to explain the first output class of a multi-output model.\")\n\n # convert from DataFrames if we got any\n if str(type(features)).endswith(\"'pandas.core.frame.DataFrame'>\"):\n if feature_names is None:\n feature_names = features.columns\n features = features.values\n\n if feature_names is None:\n feature_names = [labels['FEATURE'] % str(i) for i in range(shap_values_arr.shape[1])]\n\n # allow vectors to be passed\n if len(shap_values_arr.shape) == 1:\n shap_values_arr = np.reshape(shap_values_arr, len(shap_values_arr), 1)\n if len(features.shape) == 1:\n features = np.reshape(features, len(features), 1)\n\n ind = convert_name(ind, shap_values_arr, feature_names)\n\n # pick jitter for categorical features\n vals = np.sort(np.unique(features[:,ind]))\n min_dist = np.inf\n for i in range(1,len(vals)):\n d = vals[i] - vals[i-1]\n if d > 1e-8 and d < min_dist:\n min_dist = d\n num_points_per_value = len(features[:,ind]) / len(vals)\n if num_points_per_value < 10:\n #categorical = False\n if x_jitter == \"auto\":\n x_jitter = 0\n elif num_points_per_value < 100:\n #categorical = True\n if x_jitter == \"auto\":\n x_jitter = min_dist * 0.1\n else:\n #categorical = True\n if x_jitter == \"auto\":\n x_jitter = min_dist * 0.2\n\n # guess what other feature as the stongest interaction with the plotted feature\n if not hasattr(ind, \"__len__\"):\n if interaction_index == \"auto\":\n interaction_index = approximate_interactions(ind, shap_values_arr, features)[0]\n interaction_index = convert_name(interaction_index, shap_values_arr, feature_names)\n categorical_interaction = False\n\n # create a matplotlib figure, if `ax` hasn't been specified.\n if not ax:\n figsize = (7.5, 5) if interaction_index != ind and interaction_index is not None else (6, 5)\n fig = pl.figure(figsize=figsize)\n ax = fig.gca()\n else:\n fig = ax.get_figure()\n\n # plotting SHAP interaction values\n if len(shap_values_arr.shape) == 3 and hasattr(ind, \"__len__\") and len(ind) == 2:\n ind1 = convert_name(ind[0], shap_values_arr, feature_names)\n ind2 = convert_name(ind[1], shap_values_arr, feature_names)\n if ind1 == ind2:\n proj_shap_values_arr = shap_values_arr[:, ind2, :]\n else:\n proj_shap_values_arr = shap_values_arr[:, ind2, :] * 2 # off-diag values are split in half\n\n # there is no interaction coloring for the main effect\n if ind1 == ind2:\n fig.set_size_inches(6, 5, forward=True)\n\n # TODO: remove recursion; generally the functions should be shorter for more maintainable code\n dependence_legacy(\n ind1, proj_shap_values_arr, features, feature_names=feature_names,\n interaction_index=(None if ind1 == ind2 else ind2), display_features=display_features, ax=ax, show=False,\n xmin=xmin, xmax=xmax, x_jitter=x_jitter, alpha=alpha\n )\n if ind1 == ind2:\n ax.set_ylabel(labels['MAIN_EFFECT'] % feature_names[ind1])\n else:\n ax.set_ylabel(labels['INTERACTION_EFFECT'] % (feature_names[ind1], feature_names[ind2]))\n\n if show:\n pl.show()\n return\n\n assert shap_values_arr.shape[0] == features.shape[0], \\\n \"'shap_values_arr' and 'features' values must have the same number of rows!\"\n assert shap_values_arr.shape[1] == features.shape[1], \\\n \"'shap_values_arr' must have the same number of columns as 'features'!\"\n\n # get both the raw and display feature values\n oinds = np.arange(shap_values_arr.shape[0]) # we randomize the ordering so plotting overlaps are not related to data ordering\n np.random.shuffle(oinds)\n xv = encode_array_if_needed(features[oinds, ind])\n xd = display_features[oinds, ind]\n \n s = shap_values_arr[oinds, ind]\n if type(xd[0]) == str:\n name_map = {}\n for i in range(len(xv)):\n name_map[xd[i]] = xv[i]\n xnames = list(name_map.keys())\n \n # allow a single feature name to be passed alone\n if type(feature_names) == str:\n feature_names = [feature_names]\n name = feature_names[ind]\n\n # get both the raw and display color values\n color_norm = None\n if interaction_index is not None:\n interaction_feature_values = encode_array_if_needed(features[:, interaction_index])\n cv = interaction_feature_values\n cd = display_features[:, interaction_index]\n clow = np.nanpercentile(cv.astype(np.float), 5)\n chigh = np.nanpercentile(cv.astype(np.float), 95)\n if clow == chigh:\n clow = np.nanmin(cv.astype(np.float))\n chigh = np.nanmax(cv.astype(np.float))\n if type(cd[0]) == str:\n cname_map = {}\n for i in range(len(cv)):\n cname_map[cd[i]] = cv[i]\n cnames = list(cname_map.keys())\n categorical_interaction = True\n elif clow % 1 == 0 and chigh % 1 == 0 and chigh - clow < 10:\n categorical_interaction = True\n\n # discritize colors for categorical features\n if categorical_interaction and clow != chigh:\n clow = np.nanmin(cv.astype(np.float))\n chigh = np.nanmax(cv.astype(np.float))\n bounds = np.linspace(clow, chigh, min(int(chigh - clow + 2), cmap.N-1))\n color_norm = matplotlib.colors.BoundaryNorm(bounds, cmap.N-1)\n\n # optionally add jitter to feature values\n xv_no_jitter = xv.copy()\n if x_jitter > 0:\n if x_jitter > 1: x_jitter = 1\n xvals = xv.copy()\n if isinstance(xvals[0], float):\n xvals = xvals.astype(np.float)\n xvals = xvals[~np.isnan(xvals)]\n xvals = np.unique(xvals) # returns a sorted array\n if len(xvals) >= 2:\n smallest_diff = np.min(np.diff(xvals))\n jitter_amount = x_jitter * smallest_diff\n xv += (np.random.random_sample(size = len(xv))*jitter_amount) - (jitter_amount/2)\n\n \n # the actual scatter plot, TODO: adapt the dot_size to the number of data points?\n xv_nan = np.isnan(xv)\n xv_notnan = np.invert(xv_nan)\n if interaction_index is not None:\n\n # plot the nan values in the interaction feature as grey\n cvals = features[oinds, interaction_index].astype(np.float64)\n cvals_imp = cvals.copy()\n cvals_imp[np.isnan(cvals)] = (clow + chigh) / 2.0\n cvals[cvals_imp > chigh] = chigh\n cvals[cvals_imp < clow] = clow\n if color_norm is None:\n vmin = clow\n vmax = chigh\n else:\n vmin = vmax = None\n ax.axhline(0, color=\"#888888\", lw=0.5, dashes=(1, 5), zorder=-1)\n p = ax.scatter(\n xv[xv_notnan], s[xv_notnan], s=dot_size, linewidth=0, c=cvals[xv_notnan],\n cmap=cmap, alpha=alpha, vmin=vmin, vmax=vmax,\n norm=color_norm, rasterized=len(xv) > 500\n )\n p.set_array(cvals[xv_notnan])\n else:\n p = ax.scatter(xv, s, s=dot_size, linewidth=0, color=color,\n alpha=alpha, rasterized=len(xv) > 500)\n\n if interaction_index != ind and interaction_index is not None:\n # draw the color bar\n if type(cd[0]) == str:\n tick_positions = np.array([cname_map[n] for n in cnames])\n tick_positions *= 1 - 1 / len(cnames)\n tick_positions += 0.5 * (chigh - clow) / (chigh - clow + 1)\n cb = pl.colorbar(p, ticks=tick_positions, ax=ax)\n cb.set_ticklabels(cnames)\n else:\n cb = pl.colorbar(p, ax=ax)\n\n cb.set_label(feature_names[interaction_index], size=13)\n cb.ax.tick_params(labelsize=11)\n if categorical_interaction:\n cb.ax.tick_params(length=0)\n cb.set_alpha(1)\n cb.outline.set_visible(False)\n bbox = cb.ax.get_window_extent().transformed(fig.dpi_scale_trans.inverted())\n # cb.ax.set_aspect((bbox.height - 0.7) * 20)\n\n # handles any setting of xmax and xmin\n # note that we handle None,float, or \"percentile(float)\" formats\n if xmin is not None or xmax is not None:\n if type(xmin) == str and xmin.startswith(\"percentile\"):\n xmin = np.nanpercentile(xv, float(xmin[11:-1]))\n if type(xmax) == str and xmax.startswith(\"percentile\"):\n xmax = np.nanpercentile(xv, float(xmax[11:-1]))\n\n if xmin is None or xmin == np.nanmin(xv):\n xmin = np.nanmin(xv) - (xmax - np.nanmin(xv))/20\n if xmax is None or xmax == np.nanmax(xv):\n xmax = np.nanmax(xv) + (np.nanmax(xv) - xmin)/20\n\n ax.set_xlim(xmin, xmax)\n\n if ymin is not None or ymax is not None:\n # if type(ymin) == str and ymin.startswith(\"percentile\"):\n # ymin = np.nanpercentile(xv, float(ymin[11:-1]))\n # if type(ymax) == str and ymax.startswith(\"percentile\"):\n # ymax = np.nanpercentile(xv, float(ymax[11:-1]))\n\n if ymin is None or ymin == np.nanmin(xv):\n ymin = np.nanmin(xv) - (ymax - np.nanmin(xv))/20\n if ymax is None or ymax == np.nanmax(xv):\n ymax = np.nanmax(xv) + (np.nanmax(xv) - ymin)/20\n\n ax.set_ylim(ymin, ymax)\n\n # plot any nan feature values as tick marks along the y-axis\n xlim = ax.get_xlim()\n if interaction_index is not None:\n p = ax.scatter(\n xlim[0] * np.ones(xv_nan.sum()), s[xv_nan], marker=1,\n linewidth=2, c=cvals_imp[xv_nan], cmap=cmap, alpha=alpha,\n vmin=clow, vmax=chigh\n )\n p.set_array(cvals[xv_nan])\n else:\n ax.scatter(\n xlim[0] * np.ones(xv_nan.sum()), s[xv_nan], marker=1,\n linewidth=2, color=color, alpha=alpha\n )\n ax.set_xlim(xlim)\n\n # the histogram of the data\n if hist:\n ax2 = ax.twinx()\n #n, bins, patches = \n xlim = ax.get_xlim()\n xvals = np.unique(xv_no_jitter)\n\n if len(xvals) / len(xv_no_jitter) < 0.2 and len(xvals) < 75 and np.max(xvals) < 75 and np.min(xvals) >= 0:\n np.sort(xvals)\n bin_edges = []\n for i in range(int(np.max(xvals)+1)):\n bin_edges.append(i-0.5)\n\n #bin_edges.append((xvals[i] + xvals[i+1])/2)\n bin_edges.append(int(np.max(xvals))+0.5)\n\n lim = np.floor(np.min(xvals) - 0.5) + 0.5, np.ceil(np.max(xvals) + 0.5) - 0.5\n ax.set_xlim(lim)\n else:\n if len(xv_no_jitter) >= 500:\n bin_edges = 50\n elif len(xv_no_jitter) >= 200:\n bin_edges = 20\n elif len(xv_no_jitter) >= 100:\n bin_edges = 10\n else:\n bin_edges = 5\n \n ax2.hist(xv[~np.isnan(xv)], bin_edges, density=False, facecolor='#000000', alpha=0.1, range=(xlim[0], xlim[1]), zorder=-1)\n ax2.set_ylim(0,len(xv))\n\n ax2.xaxis.set_ticks_position('bottom')\n ax2.yaxis.set_ticks_position('left')\n ax2.yaxis.set_ticks([])\n ax2.spines['right'].set_visible(False)\n ax2.spines['top'].set_visible(False)\n ax2.spines['left'].set_visible(False)\n ax2.spines['bottom'].set_visible(False)\n\n pl.sca(ax)\n\n # make the plot more readable\n ax.set_xlabel(name, color=axis_color, fontsize=13)\n ax.set_ylabel(labels['VALUE_FOR'] % name, color=axis_color, fontsize=13)\n if title is not None:\n ax.set_title(title, color=axis_color, fontsize=13)\n ax.xaxis.set_ticks_position('bottom')\n ax.yaxis.set_ticks_position('left')\n ax.spines['right'].set_visible(False)\n ax.spines['top'].set_visible(False)\n ax.tick_params(color=axis_color, labelcolor=axis_color, labelsize=11)\n for spine in ax.spines.values():\n spine.set_edgecolor(axis_color)\n if type(xd[0]) == str:\n ax.set_xticks([name_map[n] for n in xnames])\n ax.set_xticklabels(xnames, dict(rotation='vertical', fontsize=11))\n if show:\n with warnings.catch_warnings(): # ignore expected matplotlib warnings\n warnings.simplefilter(\"ignore\", RuntimeWarning)\n pl.show()\n\n\n\n\ndef dependence_legacy(ind, shap_values=None, features=None, feature_names=None, display_features=None,\n interaction_index=\"auto\",\n color=\"#1E88E5\", axis_color=\"#333333\", cmap=None,\n dot_size=16, x_jitter=0, alpha=1, title=None, xmin=None, xmax=None, ax=None, show=True,\n feature_map = dict()):\n \"\"\" Create a SHAP dependence plot, colored by an interaction feature.\n\n Plots the value of the feature on the x-axis and the SHAP value of the same feature\n on the y-axis. This shows how the model depends on the given feature, and is like a\n richer extenstion of the classical parital dependence plots. Vertical dispersion of the\n data points represents interaction effects. Grey ticks along the y-axis are data\n points where the feature's value was NaN.\n\n\n Parameters\n ----------\n ind : int or string\n If this is an int it is the index of the feature to plot. If this is a string it is\n either the name of the feature to plot, or it can have the form \"rank(int)\" to specify\n the feature with that rank (ordered by mean absolute SHAP value over all the samples).\n\n shap_values : numpy.array\n Matrix of SHAP values (# samples x # features).\n\n features : numpy.array or pandas.DataFrame\n Matrix of feature values (# samples x # features).\n\n feature_names : list\n Names of the features (length # features).\n\n display_features : numpy.array or pandas.DataFrame\n Matrix of feature values for visual display (such as strings instead of coded values).\n\n interaction_index : \"auto\", None, int, or string\n The index of the feature used to color the plot. The name of a feature can also be passed\n as a string. If \"auto\" then shap.common.approximate_interactions is used to pick what\n seems to be the strongest interaction (note that to find to true stongest interaction you\n need to compute the SHAP interaction values).\n\n x_jitter : float (0 - 1)\n Adds random jitter to feature values. May increase plot readability when feature\n is discrete.\n\n alpha : float\n The transparency of the data points (between 0 and 1). This can be useful to the\n show density of the data points when using a large dataset.\n\n xmin : float or string\n Represents the lower bound of the plot's x-axis. It can be a string of the format\n \"percentile(float)\" to denote that percentile of the feature's value used on the x-axis.\n\n xmax : float or string\n Represents the upper bound of the plot's x-axis. It can be a string of the format\n \"percentile(float)\" to denote that percentile of the feature's value used on the x-axis.\n\n ax : matplotlib Axes object\n Optionally specify an existing matplotlib Axes object, into which the plot will be placed.\n In this case we do not create a Figure, otherwise we do.\n\n \"\"\"\n\n if cmap is None:\n cmap = colors.red_blue\n\n if type(shap_values) is list:\n raise TypeError(\"The passed shap_values are a list not an array! If you have a list of explanations try \" \\\n \"passing shap_values[0] instead to explain the first output class of a multi-output model.\")\n\n # convert from DataFrames if we got any\n if str(type(features)).endswith(\"'pandas.core.frame.DataFrame'>\"):\n if feature_names is None:\n feature_names = features.columns\n features = features.values\n if str(type(display_features)).endswith(\"'pandas.core.frame.DataFrame'>\"):\n if feature_names is None:\n feature_names = display_features.columns\n display_features = display_features.values\n elif display_features is None:\n display_features = features\n\n if feature_names is None:\n feature_names = [labels['FEATURE'] % str(i) for i in range(shap_values.shape[1])]\n\n # allow vectors to be passed\n if len(shap_values.shape) == 1:\n shap_values = np.reshape(shap_values, len(shap_values), 1)\n if len(features.shape) == 1:\n features = np.reshape(features, len(features), 1)\n\n ind = convert_name(ind, shap_values, feature_names)\n\n # guess what other feature as the stongest interaction with the plotted feature\n if not hasattr(ind, \"__len__\"):\n if interaction_index == \"auto\":\n interaction_index = approximate_interactions(ind, shap_values, features)[0]\n interaction_index = convert_name(interaction_index, shap_values, feature_names)\n categorical_interaction = False\n\n # create a matplotlib figure, if `ax` hasn't been specified.\n if not ax:\n figsize = (7.5, 5) if interaction_index != ind and interaction_index is not None else (6, 5)\n fig = pl.figure(figsize=figsize)\n ax = fig.gca()\n else:\n fig = ax.get_figure()\n\n # plotting SHAP interaction values\n if len(shap_values.shape) == 3 and hasattr(ind, \"__len__\") and len(ind) == 2:\n ind1 = convert_name(ind[0], shap_values, feature_names)\n ind2 = convert_name(ind[1], shap_values, feature_names)\n if ind1 == ind2:\n proj_shap_values = shap_values[:, ind2, :]\n else:\n proj_shap_values = shap_values[:, ind2, :] * 2 # off-diag values are split in half\n\n # there is no interaction coloring for the main effect\n if ind1 == ind2:\n fig.set_size_inches(6, 5, forward=True)\n\n # TODO: remove recursion; generally the functions should be shorter for more maintainable code\n dependence_legacy(\n ind1, proj_shap_values, features, feature_names=feature_names,\n interaction_index=(None if ind1 == ind2 else ind2), display_features=display_features, ax=ax, show=False,\n xmin=xmin, xmax=xmax, x_jitter=x_jitter, alpha=alpha\n )\n if ind1 == ind2:\n ax.set_ylabel(labels['MAIN_EFFECT'] % feature_names[ind1])\n else:\n ax.set_ylabel(labels['INTERACTION_EFFECT'] % (feature_names[ind1], feature_names[ind2]))\n\n if show:\n pl.show()\n return\n\n assert shap_values.shape[0] == features.shape[0], \\\n \"'shap_values' and 'features' values must have the same number of rows!\"\n assert shap_values.shape[1] == features.shape[1], \\\n \"'shap_values' must have the same number of columns as 'features'!\"\n\n # get both the raw and display feature values\n oinds = np.arange(shap_values.shape[0]) # we randomize the ordering so plotting overlaps are not related to data ordering\n np.random.shuffle(oinds)\n \n xv = encode_array_if_needed(features[oinds, ind])\n\n xd = display_features[oinds, ind]\n s = shap_values[oinds, ind]\n if type(xd[0]) == str:\n name_map = {}\n for i in range(len(xv)):\n name_map[xd[i]] = xv[i]\n xnames = list(name_map.keys())\n\n # allow a single feature name to be passed alone\n if type(feature_names) == str:\n feature_names = [feature_names]\n\n name = feature_names[ind]\n\n if bool(feature_map) == True:\n name = feature_map[feature_names[ind]]\n\n # get both the raw and display color values\n color_norm = None\n if interaction_index is not None:\n interaction_feature_values = encode_array_if_needed(features[:, interaction_index])\n cv = interaction_feature_values\n cd = display_features[:, interaction_index]\n clow = np.nanpercentile(cv.astype(np.float), 5)\n chigh = np.nanpercentile(cv.astype(np.float), 95)\n if clow == chigh:\n clow = np.nanmin(cv.astype(np.float))\n chigh = np.nanmax(cv.astype(np.float))\n if type(cd[0]) == str:\n cname_map = {}\n for i in range(len(cv)):\n cname_map[cd[i]] = cv[i]\n cnames = list(cname_map.keys())\n categorical_interaction = True\n elif clow % 1 == 0 and chigh % 1 == 0 and chigh - clow < 10:\n categorical_interaction = True\n\n # discritize colors for categorical features\n if categorical_interaction and clow != chigh:\n clow = np.nanmin(cv.astype(np.float))\n chigh = np.nanmax(cv.astype(np.float))\n bounds = np.linspace(clow, chigh, min(int(chigh - clow + 2), cmap.N-1))\n color_norm = matplotlib.colors.BoundaryNorm(bounds, cmap.N-1)\n\n # optionally add jitter to feature values\n if x_jitter > 0:\n if x_jitter > 1: x_jitter = 1\n xvals = xv.copy()\n if isinstance(xvals[0], float):\n xvals = xvals.astype(np.float)\n xvals = xvals[~np.isnan(xvals)]\n xvals = np.unique(xvals) # returns a sorted array\n if len(xvals) >= 2:\n smallest_diff = np.min(np.diff(xvals))\n jitter_amount = x_jitter * smallest_diff\n xv += (np.random.random_sample(size = len(xv))*jitter_amount) - (jitter_amount/2)\n\n # the actual scatter plot, TODO: adapt the dot_size to the number of data points?\n xv_nan = np.isnan(xv)\n xv_notnan = np.invert(xv_nan)\n if interaction_index is not None:\n\n # plot the nan values in the interaction feature as grey\n cvals = interaction_feature_values[oinds].astype(np.float64)\n cvals_imp = cvals.copy()\n cvals_imp[np.isnan(cvals)] = (clow + chigh) / 2.0\n cvals[cvals_imp > chigh] = chigh\n cvals[cvals_imp < clow] = clow\n p = ax.scatter(\n xv[xv_notnan], s[xv_notnan], s=dot_size, linewidth=0, c=cvals[xv_notnan],\n cmap=cmap, alpha=alpha, vmin=clow, vmax=chigh,\n norm=color_norm, rasterized=len(xv) > 500\n )\n p.set_array(cvals[xv_notnan])\n else:\n p = ax.scatter(xv, s, s=dot_size, linewidth=0, color=color,\n alpha=alpha, rasterized=len(xv) > 500)\n\n if interaction_index != ind and interaction_index is not None:\n # draw the color bar\n if type(cd[0]) == str:\n tick_positions = [cname_map[n] for n in cnames]\n if len(tick_positions) == 2:\n tick_positions[0] -= 0.25\n tick_positions[1] += 0.25\n cb = pl.colorbar(p, ticks=tick_positions, ax=ax)\n cb.set_ticklabels(cnames)\n else:\n cb = pl.colorbar(p, ax=ax)\n\n if bool(feature_map) == True:\n cb.set_label(feature_map[feature_names[interaction_index]], size=13)\n else:\n cb.set_label(feature_names[interaction_index], size=13)\n\n cb.ax.tick_params(labelsize=11)\n if categorical_interaction:\n cb.ax.tick_params(length=0)\n cb.set_alpha(1)\n cb.outline.set_visible(False)\n bbox = cb.ax.get_window_extent().transformed(fig.dpi_scale_trans.inverted())\n # cb.ax.set_aspect((bbox.height - 0.7) * 20)\n\n # handles any setting of xmax and xmin\n # note that we handle None,float, or \"percentile(float)\" formats\n if xmin is not None or xmax is not None:\n if type(xmin) == str and xmin.startswith(\"percentile\"):\n xmin = np.nanpercentile(xv, float(xmin[11:-1]))\n if type(xmax) == str and xmax.startswith(\"percentile\"):\n xmax = np.nanpercentile(xv, float(xmax[11:-1]))\n\n if xmin is None or xmin == np.nanmin(xv):\n xmin = np.nanmin(xv) - (xmax - np.nanmin(xv))/20\n if xmax is None or xmax == np.nanmax(xv):\n xmax = np.nanmax(xv) + (np.nanmax(xv) - xmin)/20\n\n ax.set_xlim(xmin, xmax)\n\n # plot any nan feature values as tick marks along the y-axis\n xlim = ax.get_xlim()\n if interaction_index is not None:\n p = ax.scatter(\n xlim[0] * np.ones(xv_nan.sum()), s[xv_nan], marker=1,\n linewidth=2, c=cvals_imp[xv_nan], cmap=cmap, alpha=alpha,\n vmin=clow, vmax=chigh\n )\n p.set_array(cvals[xv_nan])\n else:\n ax.scatter(\n xlim[0] * np.ones(xv_nan.sum()), s[xv_nan], marker=1,\n linewidth=2, color=color, alpha=alpha\n )\n ax.set_xlim(xlim)\n\n # make the plot more readable\n ax.set_xlabel(name, color=axis_color, fontsize=13)\n ax.set_ylabel(labels['VALUE_FOR'] % name, color=axis_color, fontsize=13)\n if title is not None:\n ax.set_title(title, color=axis_color, fontsize=13)\n ax.xaxis.set_ticks_position('bottom')\n ax.yaxis.set_ticks_position('left')\n ax.spines['right'].set_visible(False)\n ax.spines['top'].set_visible(False)\n ax.tick_params(color=axis_color, labelcolor=axis_color, labelsize=11)\n for spine in ax.spines.values():\n spine.set_edgecolor(axis_color)\n if type(xd[0]) == str:\n ax.set_xticks([name_map[n] for n in xnames])\n ax.set_xticklabels(xnames, dict(rotation='vertical', fontsize=11))\n if show:\n with warnings.catch_warnings(): # ignore expected matplotlib warnings\n warnings.simplefilter(\"ignore\", RuntimeWarning)\n pl.show()\n"
] |
[
[
"numpy.nanmax",
"matplotlib.pyplot.legend",
"matplotlib.colors.BoundaryNorm",
"numpy.nanmin",
"matplotlib.pyplot.plot",
"numpy.max",
"numpy.hstack",
"numpy.unique",
"numpy.arange",
"numpy.diff",
"matplotlib.pyplot.figure",
"numpy.invert",
"numpy.min",
"numpy.isnan",
"numpy.array",
"matplotlib.pyplot.show",
"numpy.abs",
"matplotlib.pyplot.sca",
"numpy.random.shuffle",
"numpy.sort",
"matplotlib.pyplot.colorbar"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ngthianhphuong/disaster-response
|
[
"7585dae9187edf3b364fdb16570f52516ac56bc4"
] |
[
"data/process_data.py"
] |
[
"import sys\nimport pandas as pd \nfrom sqlalchemy import create_engine\n\ndef load_data(messages_filepath, categories_filepath):\n '''Function to load and merge data from 2 input datasets that contain messages and categories.\n Args:\n messages_filepath: path to dataset that contains messages\n categories_filepath: path to dataset that contains categories\n Returns:\n df: merged dataset from messages and categories datasets\n '''\n # load data from 2 provided datasets\n messages = pd.read_csv(messages_filepath)\n categories = pd.read_csv(categories_filepath)\n \n # drop duplicated rows\n messages = messages.drop_duplicates()\n categories = categories.drop_duplicates()\n \n # same IDs may correspond to different messages or categories\n # drop them to obtain unique IDs in each dataset before merging\n messages.drop_duplicates(subset='id', keep = False, inplace = True)\n categories.drop_duplicates(subset='id', keep = False, inplace = True)\n \n # merge on IDs\n df = pd.merge(messages, categories, on='id', how='inner')\n return df\n\ndef clean_data(df):\n '''Function to clean data, essentially to split categories column so that each category becomes a separate column.\n Args: \n df: dataframe obtained from load_data() function\n Returns:\n df_new: dataframe after cleaning\n '''\n # create a new df named cats_df that contains 36 categories columns\n cats_df = df.categories.str.split(';', expand=True)\n \n # use first row of cats_df dataframe to extract a list of new column names for cats_df\n # rename the columns of cats_df\n cats_df.columns = cats_df.iloc[0,:].apply(lambda x: x[:-2])\n \n # convert category values to just numbers 0 or 1\n for column in cats_df:\n # set each value to be the last character of the string\n cats_df[column] = cats_df[column].apply(lambda x: x[-1])\n # convert column from string to numeric\n cats_df[column] = pd.to_numeric(cats_df[column])\n \n # drop the original categories column from `df`\n df.drop(columns= 'categories', axis = 1, inplace = True)\n # concatenate the original df with the new cats_df dataframe\n df_new = pd.concat([df, cats_df], axis = 1)\n return df_new\n\ndef save_data(df, database_filename):\n '''Function to save cleaned dataframe to database.\n Args:\n df: dataframe after cleaning, obtained from clean_data() function\n database_filename: database file name. Example: MyDatabase.db\n Returns:\n None\n '''\n # save the clean dataset into an sqlite database\n engine = create_engine('sqlite:///{}'.format(database_filename))\n df.to_sql(database_filename, engine, index=False, if_exists='replace') \n \ndef main():\n if len(sys.argv) == 4:\n\n messages_filepath, categories_filepath, database_filepath = sys.argv[1:]\n\n print('Loading data...\\n MESSAGES: {}\\n CATEGORIES: {}'\n .format(messages_filepath, categories_filepath))\n df = load_data(messages_filepath, categories_filepath)\n\n print('Cleaning data...')\n df = clean_data(df)\n \n print('Saving data...\\n DATABASE: {}'.format(database_filepath))\n save_data(df, database_filepath)\n \n print('Cleaned data saved to database!')\n \n else:\n print('Please provide the filepaths of the messages and categories '\\\n 'datasets as the first and second argument respectively, as '\\\n 'well as the filepath of the database to save the cleaned data '\\\n 'to as the third argument. \\n\\nExample: python process_data.py '\\\n 'disaster_messages.csv disaster_categories.csv '\\\n 'DisasterResponse.db')\n\n\nif __name__ == '__main__':\n main()"
] |
[
[
"pandas.merge",
"pandas.read_csv",
"pandas.to_numeric",
"pandas.concat"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
cizhenshi/mmdetection
|
[
"b0fe89677020ebe9e6a736b98d3e791ca0e6536d"
] |
[
"mmdet/models/losses/cross_entropy_loss.py"
] |
[
"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom ..registry import LOSSES\nfrom .utils import weight_reduce_loss\nfrom icecream import ic\n\ndef cross_entropy(pred, label, weight=None, reduction='mean', avg_factor=None):\n # element-wise losses\n loss = F.cross_entropy(pred, label, reduction='none')\n\n # apply weights and do the reduction\n if weight is not None:\n weight = weight.float()\n loss = weight_reduce_loss(\n loss, weight=weight, reduction=reduction, avg_factor=avg_factor)\n\n return loss\n\n\ndef _expand_binary_labels(labels, label_weights, label_channels):\n bin_labels = labels.new_full((labels.size(0), label_channels), 0)\n inds = torch.nonzero(labels >= 1).squeeze()\n if inds.numel() > 0:\n bin_labels[inds, labels[inds] - 1] = 1\n if label_weights is None:\n bin_label_weights = None\n else:\n bin_label_weights = label_weights.view(-1, 1).expand(\n label_weights.size(0), label_channels)\n return bin_labels, bin_label_weights\n\n\ndef binary_cross_entropy(pred,\n label,\n weight=None,\n reduction='mean',\n avg_factor=None):\n if pred.dim() != label.dim():\n label, weight = _expand_binary_labels(label, weight, pred.size(-1))\n\n # weighted element-wise losses\n if weight is not None:\n weight = weight.float()\n loss = F.binary_cross_entropy_with_logits(\n pred, label.float(), weight, reduction='none')\n # do the reduction for the weighted loss\n loss = weight_reduce_loss(loss, reduction=reduction, avg_factor=avg_factor)\n\n return loss\n\n\ndef mask_cross_entropy(pred, target, label, reduction='mean', avg_factor=None):\n # TODO: handle these two reserved arguments\n assert reduction == 'mean' and avg_factor is None\n num_rois = pred.size()[0]\n inds = torch.arange(0, num_rois, dtype=torch.long, device=pred.device)\n pred_slice = pred[inds, label].squeeze(1)\n return F.binary_cross_entropy_with_logits(\n pred_slice, target, reduction='mean')[None]\n\n\[email protected]_module\nclass CrossEntropyLoss(nn.Module):\n\n def __init__(self,\n use_sigmoid=False,\n use_mask=False,\n reduction='mean',\n loss_weight=1.0):\n super(CrossEntropyLoss, self).__init__()\n assert (use_sigmoid is False) or (use_mask is False)\n self.use_sigmoid = use_sigmoid\n self.use_mask = use_mask\n self.reduction = reduction\n self.loss_weight = loss_weight\n\n if self.use_sigmoid:\n self.cls_criterion = binary_cross_entropy\n elif self.use_mask:\n self.cls_criterion = mask_cross_entropy\n else:\n self.cls_criterion = cross_entropy\n\n def forward(self,\n cls_score,\n label,\n weight=None,\n avg_factor=None,\n reduction_override=None,\n **kwargs):\n assert reduction_override in (None, 'none', 'mean', 'sum')\n reduction = (\n reduction_override if reduction_override else self.reduction)\n loss_cls = self.loss_weight * self.cls_criterion(\n cls_score,\n label,\n weight,\n reduction=reduction,\n avg_factor=avg_factor,\n **kwargs)\n return loss_cls\n"
] |
[
[
"torch.nn.functional.binary_cross_entropy_with_logits",
"torch.nn.functional.cross_entropy",
"torch.nonzero",
"torch.arange"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
keiv-fly/wot_ai
|
[
"8f073968ae7c4eb88351ebf99fb2428a9862ab75"
] |
[
"find_service/gray_conv_cy/setup.py"
] |
[
"from distutils.core import setup\nfrom distutils.extension import Extension\nfrom Cython.Build import cythonize\nimport numpy as np\n\next_modules=[\n Extension(\"gray_conv_cy\",\n sources=[\"gray_conv_cy.pyx\"],\n include_dirs=[np.get_include()]\n )\n]\n\nsetup(\n name = \"find_close_cy\",\n ext_modules = cythonize(ext_modules,\n annotate=True)\n)"
] |
[
[
"numpy.get_include"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
rohitsroch/recurrent_bert
|
[
"9b47acd081433280fd68849a07ca3ca9b924a1f6"
] |
[
"estimator/modeling_test.py"
] |
[
"# coding=utf-8\n# Copyright 2018 The Google AI Language Team Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\nimport json\nimport random\nimport re\n\nimport modeling\nimport six\nimport tensorflow as tf\n\n\nclass BertModelTest(tf.test.TestCase):\n\n class BertModelTester(object):\n\n def __init__(self,\n parent,\n batch_size=13,\n seq_length=7,\n is_training=True,\n use_input_mask=True,\n use_token_type_ids=True,\n vocab_size=99,\n hidden_size=32,\n num_hidden_layers=5,\n num_attention_heads=4,\n intermediate_size=37,\n hidden_act=\"gelu\",\n hidden_dropout_prob=0.1,\n attention_probs_dropout_prob=0.1,\n max_position_embeddings=512,\n type_vocab_size=16,\n initializer_range=0.02,\n scope=None):\n self.parent = parent\n self.batch_size = batch_size\n self.seq_length = seq_length\n self.is_training = is_training\n self.use_input_mask = use_input_mask\n self.use_token_type_ids = use_token_type_ids\n self.vocab_size = vocab_size\n self.hidden_size = hidden_size\n self.num_hidden_layers = num_hidden_layers\n self.num_attention_heads = num_attention_heads\n self.intermediate_size = intermediate_size\n self.hidden_act = hidden_act\n self.hidden_dropout_prob = hidden_dropout_prob\n self.attention_probs_dropout_prob = attention_probs_dropout_prob\n self.max_position_embeddings = max_position_embeddings\n self.type_vocab_size = type_vocab_size\n self.initializer_range = initializer_range\n self.scope = scope\n\n def create_model(self):\n input_ids = BertModelTest.ids_tensor([self.batch_size,\n self.seq_length],\n self.vocab_size)\n\n input_mask = None\n if self.use_input_mask:\n input_mask = BertModelTest.ids_tensor(\n [self.batch_size, self.seq_length], vocab_size=2)\n\n token_type_ids = None\n if self.use_token_type_ids:\n token_type_ids = BertModelTest.ids_tensor(\n [self.batch_size, self.seq_length], self.type_vocab_size)\n\n config = modeling.BertConfig(\n vocab_size=self.vocab_size,\n hidden_size=self.hidden_size,\n num_hidden_layers=self.num_hidden_layers,\n num_attention_heads=self.num_attention_heads,\n intermediate_size=self.intermediate_size,\n hidden_act=self.hidden_act,\n hidden_dropout_prob=self.hidden_dropout_prob,\n attention_probs_dropout_prob=self.attention_probs_dropout_prob,\n max_position_embeddings=self.max_position_embeddings,\n type_vocab_size=self.type_vocab_size,\n initializer_range=self.initializer_range)\n\n model = modeling.BertModel(\n config=config,\n is_training=self.is_training,\n input_ids=input_ids,\n input_mask=input_mask,\n token_type_ids=token_type_ids,\n scope=self.scope)\n\n outputs = {\n \"embedding_output\": model.get_embedding_output(),\n \"sequence_output\": model.get_sequence_output(),\n \"pooled_output\": model.get_pooled_output(),\n \"all_encoder_layers\": model.get_all_encoder_layers(),\n }\n return outputs\n\n def check_output(self, result):\n self.parent.assertAllEqual(\n result[\"embedding_output\"].shape,\n [self.batch_size, self.seq_length, self.hidden_size])\n\n self.parent.assertAllEqual(\n result[\"sequence_output\"].shape,\n [self.batch_size, self.seq_length, self.hidden_size])\n\n self.parent.assertAllEqual(result[\"pooled_output\"].shape,\n [self.batch_size, self.hidden_size])\n\n def test_default(self):\n self.run_tester(BertModelTest.BertModelTester(self))\n\n def test_config_to_json_string(self):\n config = modeling.BertConfig(vocab_size=99, hidden_size=37)\n obj = json.loads(config.to_json_string())\n self.assertEqual(obj[\"vocab_size\"], 99)\n self.assertEqual(obj[\"hidden_size\"], 37)\n\n def run_tester(self, tester):\n with self.test_session() as sess:\n ops = tester.create_model()\n init_op = tf.group(tf.compat.v1.global_variables_initializer(),\n tf.compat.v1.local_variables_initializer())\n sess.run(init_op)\n output_result = sess.run(ops)\n tester.check_output(output_result)\n\n self.assert_all_tensors_reachable(sess, [init_op, ops])\n\n @classmethod\n def ids_tensor(cls, shape, vocab_size, rng=None, name=None):\n \"\"\"Creates a random int32 tensor of the shape within the vocab size.\"\"\"\n if rng is None:\n rng = random.Random()\n\n total_dims = 1\n for dim in shape:\n total_dims *= dim\n\n values = []\n for _ in range(total_dims):\n values.append(rng.randint(0, vocab_size - 1))\n\n return tf.constant(value=values, dtype=tf.int32,\n shape=shape, name=name)\n\n def assert_all_tensors_reachable(self, sess, outputs):\n \"\"\"Checks that all the tensors in graph are reachable from outputs.\"\"\"\n graph = sess.graph\n\n ignore_strings = [\n \"^.*/assert_less_equal/.*$\",\n \"^.*/dilation_rate$\",\n \"^.*/Tensordot/concat$\",\n \"^.*/Tensordot/concat/axis$\",\n \"^testing/.*$\",\n ]\n\n ignore_regexes = [re.compile(x) for x in ignore_strings]\n\n unreachable = self.get_unreachable_ops(graph, outputs)\n filtered_unreachable = []\n for x in unreachable:\n do_ignore = False\n for r in ignore_regexes:\n m = r.match(x.name)\n if m is not None:\n do_ignore = True\n if do_ignore:\n continue\n filtered_unreachable.append(x)\n unreachable = filtered_unreachable\n\n self.assertEqual(\n len(unreachable), 0, \"The following ops are unreachable: %s\" %\n (\" \".join([x.name for x in unreachable])))\n\n @classmethod\n def get_unreachable_ops(cls, graph, outputs):\n \"\"\"Finds all of the tensors in graph that \\\n are unreachable from outputs.\"\"\"\n outputs = cls.flatten_recursive(outputs)\n output_to_op = collections.defaultdict(list)\n op_to_all = collections.defaultdict(list)\n assign_out_to_in = collections.defaultdict(list)\n\n for op in graph.get_operations():\n for x in op.inputs:\n op_to_all[op.name].append(x.name)\n for y in op.outputs:\n output_to_op[y.name].append(op.name)\n op_to_all[op.name].append(y.name)\n if str(op.type) == \"Assign\":\n for y in op.outputs:\n for x in op.inputs:\n assign_out_to_in[y.name].append(x.name)\n\n assign_groups = collections.defaultdict(list)\n for out_name in assign_out_to_in.keys():\n name_group = assign_out_to_in[out_name]\n for n1 in name_group:\n assign_groups[n1].append(out_name)\n for n2 in name_group:\n if n1 != n2:\n assign_groups[n1].append(n2)\n\n seen_tensors = {}\n stack = [x.name for x in outputs]\n while stack:\n name = stack.pop()\n if name in seen_tensors:\n continue\n seen_tensors[name] = True\n\n if name in output_to_op:\n for op_name in output_to_op[name]:\n if op_name in op_to_all:\n for input_name in op_to_all[op_name]:\n if input_name not in stack:\n stack.append(input_name)\n\n expanded_names = []\n if name in assign_groups:\n for assign_name in assign_groups[name]:\n expanded_names.append(assign_name)\n\n for expanded_name in expanded_names:\n if expanded_name not in stack:\n stack.append(expanded_name)\n\n unreachable_ops = []\n for op in graph.get_operations():\n is_unreachable = False\n all_names = [x.name for x in op.inputs] + \\\n [x.name for x in op.outputs]\n for name in all_names:\n if name not in seen_tensors:\n is_unreachable = True\n if is_unreachable:\n unreachable_ops.append(op)\n return unreachable_ops\n\n @classmethod\n def flatten_recursive(cls, item):\n \"\"\"Flattens (potentially nested) a tuple/dictionary/list to a list.\"\"\"\n output = []\n if isinstance(item, list):\n output.extend(item)\n elif isinstance(item, tuple):\n output.extend(list(item))\n elif isinstance(item, dict):\n for (_, v) in six.iteritems(item):\n output.append(v)\n else:\n return [item]\n\n flat_output = []\n for x in output:\n flat_output.extend(cls.flatten_recursive(x))\n return flat_output\n\n\nif __name__ == \"__main__\":\n tf.test.main()\n"
] |
[
[
"tensorflow.compat.v1.local_variables_initializer",
"tensorflow.constant",
"tensorflow.test.main",
"tensorflow.compat.v1.global_variables_initializer"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
MikhailRyazanov/PyAbel
|
[
"38728a6ef2321d1325ad96597f56a835de8423be",
"38728a6ef2321d1325ad96597f56a835de8423be",
"38728a6ef2321d1325ad96597f56a835de8423be"
] |
[
"abel/tools/polar.py",
"abel/tools/circularize.py",
"abel/tests/test_tools_center.py"
] |
[
"# -*- coding: utf-8 -*-\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport numpy as np\nfrom scipy.ndimage import map_coordinates\nfrom scipy.ndimage.interpolation import shift\nfrom scipy.optimize import curve_fit, minimize\n\n\ndef reproject_image_into_polar(data, origin=None, Jacobian=False,\n dr=1, dt=None):\n \"\"\"\n Reprojects a 2D numpy array (**data**) into a polar coordinate system,\n with the pole placed at **origin** and the angle measured clockwise from\n the upward direction. The resulting array has rows corresponding to the\n radial grid, and columns corresponding to the angular grid.\n\n Parameters\n ----------\n data : 2D np.array\n the image array\n origin : tuple or None\n (row, column) coordinates of the image origin. If ``None``, the\n geometric center of the image is used.\n Jacobian : bool\n Include `r` intensity scaling in the coordinate transform.\n This should be included to account for the changing pixel size that\n occurs during the transform.\n dr : float\n radial coordinate spacing for the grid interpolation.\n Tests show that there is not much point in going below 0.5.\n dt : float or None\n angular coordinate spacing (in radians).\n If ``None``, the number of angular grid points will be set to the\n largest dimension (the height or the width) of the image.\n\n Returns\n -------\n output : 2D np.array\n the polar image (r, theta)\n r_grid : 2D np.array\n meshgrid of radial coordinates\n theta_grid : 2D np.array\n meshgrid of angular coordinates\n\n Notes\n -----\n Adapted from:\n https://stackoverflow.com/questions/3798333/image-information-along-a-polar-coordinate-system\n\n \"\"\"\n ny, nx = data.shape[:2]\n if origin is None:\n origin = (ny // 2, nx // 2)\n else:\n origin = list(origin)\n # wrap negative coordinates\n if origin[0] < 0:\n origin[0] += ny\n if origin[1] < 0:\n origin[1] += nx\n\n # Determine what the min and max r and theta coords will be...\n x, y = index_coords(data, origin=origin) # (x,y) coordinates of each pixel\n r, theta = cart2polar(x, y) # convert (x,y) -> (r,θ), note θ=0 is vertical\n\n nr = int(np.ceil((r.max() - r.min()) / dr))\n\n if dt is None:\n nt = max(nx, ny)\n else:\n # dt in radians\n nt = int(np.ceil((theta.max() - theta.min()) / dt))\n\n # Make a regular (in polar space) grid based on the min and max r & theta\n r_i = np.linspace(r.min(), r.max(), nr, endpoint=False)\n theta_i = np.linspace(theta.min(), theta.max(), nt, endpoint=False)\n theta_grid, r_grid = np.meshgrid(theta_i, r_i)\n\n # Convert the r and theta grids to Cartesian coordinates\n X, Y = polar2cart(r_grid, theta_grid)\n # then to a 2×n array of row and column indices for np.map_coordinates()\n rowi = (origin[0] - Y).flatten()\n coli = (X + origin[1]).flatten()\n coords = np.vstack((rowi, coli))\n\n # Remap with interpolation\n # (making an array of floats even if the data has an integer type)\n zi = map_coordinates(data, coords, output=float)\n output = zi.reshape((nr, nt))\n\n if Jacobian:\n output *= r_i[:, np.newaxis]\n\n return output, r_grid, theta_grid\n\n\ndef index_coords(data, origin=None):\n \"\"\"\n Creates `x` and `y` coordinates for the indices in a numpy array, relative\n to the **origin**, with the `x` axis going to the right, and the `y` axis\n going `up`.\n\n Parameters\n ----------\n data : numpy array\n 2D data. Only the array shape is used.\n origin : tuple or None\n (row, column). Defaults to the geometric center of the image.\n\n Returns\n -------\n x, y : 2D numpy arrays\n \"\"\"\n ny, nx = data.shape[:2]\n if origin is None:\n origin_x, origin_y = nx // 2, ny // 2\n else:\n origin_y, origin_x = origin\n # wrap negative coordinates\n if origin_y < 0:\n origin_y += ny\n if origin_x < 0:\n origin_x += nx\n\n x, y = np.meshgrid(np.arange(float(nx)) - origin_x,\n origin_y - np.arange(float(ny)))\n return x, y\n\n\ndef cart2polar(x, y):\n \"\"\"\n Transform Cartesian coordinates to polar.\n\n Parameters\n ----------\n x, y : floats or arrays\n Cartesian coordinates\n\n Returns\n -------\n r, theta : floats or arrays\n Polar coordinates\n\n \"\"\"\n r = np.sqrt(x**2 + y**2)\n theta = np.arctan2(x, y) # θ referenced to vertical\n return r, theta\n\n\ndef polar2cart(r, theta):\n \"\"\"\n Transform polar coordinates to Cartesian.\n\n Parameters\n -------\n r, theta : floats or arrays\n Polar coordinates\n\n Returns\n ----------\n x, y : floats or arrays\n Cartesian coordinates\n \"\"\"\n y = r * np.cos(theta) # θ referenced to vertical\n x = r * np.sin(theta)\n return x, y\n",
"# -*- coding: utf-8 -*-\nfrom __future__ import division\nimport numpy as np\nfrom scipy.ndimage.interpolation import map_coordinates\nfrom scipy.interpolate import UnivariateSpline, splrep, splev\nfrom scipy.optimize import leastsq\n\nimport abel\n\nfrom abel import _deprecated, _deprecate\n\n#########################################################################\n# circularize.py\n#\n# Image circularization by following peak intensity vs angle\n# see https://github.com/PyAbel/PyAbel/issues/186 for discussion\n# and https://github.com/PyAbel/PyAbel/pull/195\n#\n# Steve Gibson and Dan Hickstein - ideas/code\n# Jason Gascooke - ideas\n#\n# February 2017\n#########################################################################\n\n\ndef circularize_image(IM, method=\"lsq\", origin=None, radial_range=None,\n dr=0.5, dt=0.5, smooth=_deprecated, ref_angle=None,\n inverse=False, return_correction=False, tol=0,\n center=_deprecated):\n r\"\"\"\n Corrects image distortion on the basis that the structure should be\n circular.\n\n This is a simplified radial scaling version of the algorithm described in\n J. R. Gascooke, S. T. Gibson, W. D. Lawrance,\n \"A 'circularisation' method to repair deformations and determine the centre\n of velocity map images\",\n `J. Chem. Phys. 147, 013924 (2017)\n <https://dx.doi.org/10.1063/1.4981024>`_.\n\n This function is especially useful for correcting the image obtained with\n a velocity-map-imaging spectrometer, in the case where there is distortion\n of the Newton sphere (ring) structure due to an imperfect electrostatic\n lens or stray electromagnetic fields. The correction allows the\n highest-resolution 1D photoelectron distribution to be extracted.\n\n The algorithm splits the image into \"slices\" at many different angles\n (set by **dt**) and compares the radial intensity profile of adjacent slices.\n A scaling factor is found which aligns each slice profile with the previous\n slice. The image is then corrected using a spline function that smoothly\n connects the discrete scaling factors as a continuous function of angle.\n\n This circularization algorithm should only be applied to a well-centered\n image, otherwise use the **origin** keyword (described below) to\n center it.\n\n\n Parameters\n ----------\n IM : numpy 2D array\n Image to be circularized.\n\n method : str\n Method used to determine the radial correction factor to align slice\n profiles:\n\n ``argmax``\n compare intensity-profile.argmax() of each radial slice.\n This method is quick and reliable, but it assumes that\n the radial intensity profile has an obvious maximum.\n The positioning is limited to the nearest pixel.\n\n ``lsq``\n minimize the difference between a slice intensity-profile\n with its adjacent slice.\n This method is slower and may fail to converge, but it\n may be applied to images with any (circular) structure.\n It aligns the slices with sub-pixel precision.\n\n origin : float tuple, str or None\n Pre-center image using :func:`abel.tools.center.center_image`.\n May be an explicit (row, column) tuple or a method name: ``'com'``,\n ``'convolution'``, ``'gaussian;``, ``'image_center'``, ``'slice'``.\n ``None`` (default) assumes that the image is already centered.\n\n radial_range : tuple or None\n Limit slice comparison to the radial range tuple (rmin, rmax), in\n pixels, from the image origin. Use to determine the distortion\n correction associated with particular peaks. It is recommended to\n select a region of your image where the signal-to-noise ratio is\n highest, with sharp persistent (in angle) features.\n\n dr : float\n Radial grid size for the polar coordinate image, default = 0.5 pixel.\n This is passed to :func:`abel.tools.polar.reproject_image_into_polar`.\n\n Small values may improve the distortion correction, which is often of\n sub-pixel dimensions, at the cost of reduced signal to noise for the\n slice intensity profile. As a general rule, `dr` should be\n significantly smaller than the radial \"feature size\" in the image.\n\n dt : float\n Angular grid size. This sets the number of radial slices, given by\n :math:`2\\pi/dt`. Default = 0.1, ~ 63 slices. More slices, using\n smaller `dt`, may provide a more detailed angular variation of the\n correction, at the cost of greater signal to noise in the correction\n function.\n\n Also passed to :func:`abel.tools.polar.reproject_image_into_polar`.\n\n smooth : float\n Deprecated, use **tol** instead. The relationship is\n **smooth** = `N`\\ :sub:`angles` × **tol**\\ :sup:`2`,\n where `N`\\ :sub:`angles` is the number of slices (see **dt**).\n\n ref_angle : None or float\n Reference angle for which radial coordinate is unchanged.\n Angle varies between :math:`-\\pi` and :math:`\\pi`, with zero angle\n vertical.\n\n ``None`` uses :func:`numpy.mean` of the radial correction function,\n which attempts to maintain the same average radial scaling. This\n approximation is likely valid, unless you know for certain that a\n specific angle of your image corresponds to an undistorted image.\n\n inverse : bool\n Apply an inverse Abel transform to the `polar`-coordinate image, to\n remove the background intensity. This may improve the signal-to-noise\n ratio, allowing the weaker intensity featured to be followed in angle.\n\n Note that this step is only for the purposes of allowing the algorithm\n to better follow peaks in the image. It does not affect the final\n image that is returned, except for (hopefully) slightly improving the\n precision of the distortion correction.\n\n return_correction : bool\n Additional outputs, as describe below.\n\n tol : float\n Root-mean-square (RMS) fitting tolerance for the spline function. At\n the default zero value, the spline interpolates between the discrete\n scaling factors. At larger values, a smoother spline is found such that\n its RMS deviation from the discrete scaling factors does not exceed\n this number. For example, ``tol=0.01`` means 1% RMS tolerance for the\n radial scaling correction. At very large tolerances, the spline\n degenerates to a constant, the average of the discrete scaling factors.\n\n Typically, **tol** may remain zero (use interpolation), but noisy data\n may require some smoothing, since the found discrete scaling factors\n can have noticeable errors. To examine the relative scaling factors and\n how well they are represented by the spline function, use the option\n ``return_correction=True``.\n\n Returns\n -------\n IMcirc : numpy 2D array\n Circularized version of the input image, same size as input.\n\n The following values are returned if ``return_correction=True``:\n\n angles : numpy 1D array\n Mid-point angle (radians) of each image slice.\n\n radial_correction : numpy 1D array\n Radial correction scale factor at each angular slice.\n\n radial_correction_function : function(numpy 1D array)\n Function that may be used to evaluate the radial correction at any\n angle.\n\n \"\"\"\n if center is not _deprecated:\n _deprecate('abel.tools.circularize.circularize_image() '\n 'argument \"center\" is deprecated, use \"origin\" instead.')\n origin = center\n\n if origin is not None:\n # convenience function for the case image is not centered\n IM = abel.tools.center.center_image(IM, method=origin)\n\n # map image into polar coordinates - much easier to slice\n # cartesian (Y, X) -> polar (Radius, Theta)\n polarIM, radial_coord, angle_coord = \\\n abel.tools.polar.reproject_image_into_polar(IM, dr=dr, dt=dt)\n\n if inverse:\n # pseudo inverse Abel transform of the polar image, removes background\n # to enhance transition peaks\n polarIM = abel.dasch.two_point_transform(polarIM.T).T\n\n # more convenient 1-D coordinate arrays\n angles = angle_coord[0] # angle coordinate\n radial = radial_coord[:, 0] # radial coordinate\n\n # limit radial range of polar image, if selected\n if radial_range is not None:\n subr = np.logical_and(radial > radial_range[0],\n radial < radial_range[1])\n polarIM = polarIM[subr]\n radial = radial[subr]\n\n # evaluate radial correction factor that aligns each angular slice\n radcorr = correction(polarIM.T, angles, radial, method=method)\n\n if smooth is not _deprecated:\n _deprecate('abel.tools.circularize.circularize_image() '\n 'argument \"smooth\" is deprecated, use \"tol\" instead.')\n else:\n smooth = len(angles) * tol**2\n\n # periodic spline radial correction vs angle\n spl = splrep(np.append(angles, angles[0] + 2 * np.pi),\n np.append(radcorr, radcorr[0]), s=smooth, per=True)\n\n def radial_correction_function(angle):\n return splev(angle, spl)\n\n # apply the correction\n IMcirc = circularize(IM, radial_correction_function, ref_angle=ref_angle)\n\n if return_correction:\n return IMcirc, angles, radcorr, radial_correction_function\n else:\n return IMcirc\n\n\ndef circularize(IM, radial_correction_function, ref_angle=None):\n \"\"\"\n Remap image from its distorted grid to the true cartesian grid.\n\n Parameters\n ----------\n IM : numpy 2D array\n Original image\n\n radial_correction_function : function(numpy 1D array)\n A function returning the radial correction for a given angle. It\n should accept a numpy 1D array of angles.\n\n \"\"\"\n # cartesian coordinate system\n Y, X = np.indices(IM.shape)\n\n row, col = IM.shape\n origin = (col // 2, row // 2) # odd image\n\n # coordinates relative to center\n X -= origin[0]\n Y = origin[1] - Y # negative values below the axis\n theta = np.arctan2(X, Y) # referenced to vertical direction\n\n # radial scale factor at angle = ref_angle\n if ref_angle is None:\n factor = np.mean(radial_correction_function(theta))\n else:\n factor = radial_correction_function(ref_angle)\n\n # radial correction\n Xactual = X * factor / radial_correction_function(theta)\n Yactual = Y * factor / radial_correction_function(theta)\n\n # @DanHickstein magic\n # https://github.com/PyAbel/PyAbel/issues/186#issuecomment-275471271\n IMcirc = map_coordinates(IM, (origin[1] - Yactual, Xactual + origin[0]))\n\n return IMcirc\n\n\ndef _residual(param, radial, profile, previous):\n \"\"\" `scipy.optimize.leastsq` residuals function.\n\n Evaluate the difference between a radial-scaled intensity profile\n and its adjacent \"previous\" angular slice.\n\n \"\"\"\n\n radial_scaling, amplitude = param[0], param[1]\n\n newradial = radial * radial_scaling\n spline_prof = UnivariateSpline(newradial, profile, s=0, ext=3)\n newprof = spline_prof(radial) * amplitude\n\n # residual cf adjacent slice profile\n return newprof - previous\n\n\ndef correction(polarIMTrans, angles, radial, method):\n r\"\"\"\n Determines a radial correction factors that align an angular slice\n radial intensity profile with its adjacent (previous) slice profile.\n\n\n Parameters\n ----------\n polarIMTrans : numpy 2D array\n Polar coordinate image, transposed :math:`(\\theta, r)` so that each\n row is a single angle.\n\n angles : numpy 1D array\n Angle coordinates for one row of `polarIMTrans`.\n\n radial : numpy 1D array\n Radial coordinates for one column of `polarIMTrans`.\n\n method : str\n ``argmax``\n radial correction factor from position of maximum intensity.\n\n ``lsq``\n least-squares determine a radial correction factor that will align\n a radial intensity profile with the previous, adjacent slice.\n \"\"\"\n\n if method == \"argmax\":\n # follow position of intensity maximum\n pkpos = []\n\n for ang, aslice in zip(angles, polarIMTrans):\n profile = aslice\n pkpos.append(profile.argmax()) # store index of peak position\n\n # radial correction factor relative to peak max in first angular slice\n radcorr = radial[pkpos[0]] / radial[pkpos]\n\n elif method == \"lsq\":\n # least-squares radially scale intensity profile matching previous slice\n\n # initial guess fit parameters: radial correction factor, and amplitude\n fitpar = np.array([1.0, 1.0])\n\n # storage for the radial correction factors\n radcorr = []\n radcorr.append(1) # first slice nothing to compare with\n previous = polarIMTrans[0]\n\n for ang, aslice in zip(angles[1:], polarIMTrans[1:]):\n profile = aslice\n\n result = leastsq(_residual, fitpar, args=(radial, profile,\n previous))\n\n radcorr.append(result[0][0]) # radial scale factor direct from lsq\n\n previous += _residual(result[0], radial, profile, previous)\n # This \"previous\" slice corresponds to the previous slice intensity\n # profile that has been re-scaled. Thus, if the next slice is\n # identical, it will be assigned a scale factor of 1.0\n\n # use the determined radial scale factor, and amplitude parameters\n # for the next slice\n fitpar = result[0]\n\n else:\n raise ValueError(\"method variable must be one of 'argmax' or 'lsq',\"\n \" not '{}'\".format(method))\n\n return radcorr\n",
"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os.path\n\nimport itertools\n\nimport numpy as np\nfrom numpy.testing import assert_equal, assert_allclose\nfrom scipy.ndimage.interpolation import shift\n\nimport abel\nfrom abel.tools.center import find_origin, center_image, set_center\n\n\ndef test_find_origin():\n \"\"\"\n Test find_origin methods.\n \"\"\"\n size = [12, 13]\n row, col = 5.4, 6.6 # origin\n w = 3.0 # gaussian width parameter (sqrt(2) * sigma)\n for rows in size:\n y2 = ((np.arange(rows) - row) / w)**2\n for cols in size:\n x2 = ((np.arange(cols) - col) / w)**2\n data = np.exp(-(x2 + y2[:, None]))\n axes = (1, 0)\n # (not testing trivial 'image_center', which does not find origin)\n for method in ['com', 'convolution', 'gaussian', 'slice']:\n origin = find_origin(data, method, axes)\n ref = (row if 0 in axes else rows // 2,\n col if 1 in axes else cols // 2)\n tol = 0.2 # 'convolution' rounds to 0.5 pixels\n assert_allclose(origin, ref, atol=tol, verbose=False,\n err_msg='-> {} x {}, method = {}, axes = {}: '\n 'origin = {} not equal {}'.\n format(rows, cols, method, axes,\n origin, ref))\n\n\ndef test_set_center_int():\n \"\"\"\n Test whole-pixel shifts.\n \"\"\"\n # input sizes\n size = [4, 5]\n # input size, crop, origin -> output elements\n param = {4: {'maintain_size': [[None, '1234'],\n [0, '0012'],\n [1, '0123'],\n [2, '1234'],\n [3, '2340']],\n 'valid_region': [[None, '1234'],\n [0, '1'],\n [1, '123'],\n [2, '234'],\n [3, '4']],\n 'maintain_data': [[None, '1234'],\n [0, '0001234'],\n [1, '01234'],\n [2, '12340'],\n [3, '1234000']]},\n 5: {'maintain_size': [[None, '12345'],\n [0, '00123'],\n [1, '01234'],\n [2, '12345'],\n [3, '23450'],\n [4, '34500']],\n 'valid_region': [[None, '12345'],\n [0, '1'],\n [1, '123'],\n [2, '12345'],\n [3, '345'],\n [4, '5']],\n 'maintain_data': [[None, '12345'],\n [0, '000012345'],\n [1, '0012345'],\n [2, '12345'],\n [3, '1234500'],\n [4, '123450000']]}}\n # all size combinations\n for rows, cols in itertools.product(size, repeat=2):\n # test data: consecutive numbers from 1, row by row\n data = (np.arange(rows * cols) + 1).reshape((rows, cols))\n # all crop options\n for crop in ['maintain_size', 'valid_region', 'maintain_data']:\n # all origin rows\n for row, rref in param[rows][crop]:\n # vector or reference rows\n rref = np.array([int(n) for n in rref])\n # all origin columns\n for col, cref in param[cols][crop]:\n # vector of reference columns\n cref = np.array([int(n) for n in cref])\n # reference array\n ref = (rref[:, None] - 1) * cols + cref\n ref[rref == 0] = 0\n ref[:, cref == 0] = 0\n # check set_center() result\n result = set_center(data, (row, col), crop=crop)\n assert_equal(result, ref, verbose=False,\n err_msg='-> {} x {}, origin = {}, crop = {}\\n'\n 'result =\\n{}\\n'\n 'must be =\\n{}'.\n format(rows, cols, (row, col), crop,\n result, ref))\n\n\ndef test_set_center_float():\n \"\"\"\n Test fractional shifts.\n \"\"\"\n # input sizes\n size = [10, 11]\n # default origin coordinate (substituting None)\n default = 5.0\n # input size, origin, crop -> output size, non-zero range\n param = {10: [(None, {'maintain_size': [10, (0, 10)],\n 'valid_region': [10, (0, 10)],\n 'maintain_data': [10, (0, 10)]}),\n (2.5, {'maintain_size': [10, (2, 10)],\n 'valid_region': [5, (0, 5)],\n 'maintain_data': [15, (4, 15)]}),\n (3.5, {'maintain_size': [10, (1, 10)],\n 'valid_region': [7, (0, 7)],\n 'maintain_data': [13, (2, 13)]}),\n (4.5, {'maintain_size': [10, (0, 10)],\n 'valid_region': [9, (0, 9)],\n 'maintain_data': [11, (0, 11)]}),\n (5.5, {'maintain_size': [10, (0, 10)],\n 'valid_region': [7, (0, 7)],\n 'maintain_data': [13, (0, 11)]}),\n (6.5, {'maintain_size': [10, (0, 9)],\n 'valid_region': [5, (0, 5)],\n 'maintain_data': [15, (0, 11)]})],\n 11: [(None, {'maintain_size': [11, (0, 11)],\n 'valid_region': [11, (0, 11)],\n 'maintain_data': [11, (0, 11)]}),\n (3.5, {'maintain_size': [11, (1, 11)],\n 'valid_region': [7, (0, 7)],\n 'maintain_data': [15, (3, 15)]}),\n (4.5, {'maintain_size': [11, (0, 11)],\n 'valid_region': [9, (0, 9)],\n 'maintain_data': [13, (1, 13)]}),\n (5.5, {'maintain_size': [11, (0, 11)],\n 'valid_region': [9, (0, 9)],\n 'maintain_data': [13, (0, 12)]}),\n (6.5, {'maintain_size': [11, (0, 10)],\n 'valid_region': [7, (0, 7)],\n 'maintain_data': [15, (0, 12)]})]}\n w = 2.0 # gaussian width parameter (sqrt(2) * sigma)\n # all size combinations\n for rows, cols in itertools.product(size, repeat=2):\n # all origin \"rows\"\n for row, rparam in param[rows]:\n y2 = ((np.arange(rows) - (row or default)) / w)**2\n # all origin \"columns\"\n for col, cparam in param[cols]:\n x2 = ((np.arange(cols) - (col or default)) / w)**2\n # test data: gaussian centered at (row, col)\n data = np.exp(-(x2 + y2[:, None]))\n # all crop options\n for crop in ['maintain_size', 'valid_region', 'maintain_data']:\n # check set_center() result\n result = set_center(data, (row, col), crop=crop)\n refrows, rrange = rparam[crop]\n refcols, crange = cparam[crop]\n refshape = (refrows, refcols)\n refrange = (slice(*rrange), slice(*crange))\n reforigin = (refrows // 2 if row else default,\n refcols // 2 if col else default)\n msg = '-> {} x {}, origin = {}, crop = {}: '.\\\n format(rows, cols, (row, col), crop)\n # shape\n assert_equal(result.shape, refshape, verbose=False,\n err_msg=msg + 'shape {} not equal {}'.\n format(result.shape, refshape))\n # non-zero data\n assert_equal(result[refrange] != 0, True,\n err_msg=msg + 'zeros in non-zero range')\n # zero padding\n tmp = result.copy()\n tmp[refrange] = 0\n assert_equal(tmp, 0, err_msg=msg +\n 'non-zeros outside non-zero range')\n # gaussian center\n origin = find_origin(result, 'gaussian')\n assert_allclose(origin, reforigin, atol=0.01,\n verbose=False, err_msg=msg +\n 'shifted center {} not equal {}'.\n format(origin, reforigin))\n\n\ndef test_set_center_axes():\n \"\"\"\n Test \"None\" origin components and axes selection.\n \"\"\"\n for N in [4, 5]:\n data = np.arange(N**2).reshape((N, N))\n c = N // 2\n msg = '-> N = {}, '.format(N)\n assert_equal(set_center(data, (None, None)),\n data,\n err_msg=msg + '(None, None)')\n assert_equal(set_center(data, (0, 0), axes=[]),\n data,\n err_msg=msg + '(0, 0), axes=[]')\n assert_equal(set_center(data, (0, None)),\n set_center(data, (0, c)),\n err_msg=msg + '(0, None)')\n assert_equal(set_center(data, (None, 0)),\n set_center(data, (c, 0)),\n err_msg=msg + '(None, 0)')\n assert_equal(set_center(data, (0, 0), axes=0),\n set_center(data, (0, c)),\n err_msg=msg + '(0, 0), axes=0')\n assert_equal(set_center(data, (0, 0), axes=1),\n set_center(data, (c, 0)),\n err_msg=msg + '(0, 0), axes=1')\n\n\ndef test_set_center_order():\n \"\"\"\n Test rounding for order = 0 and exact output for order = 1.\n \"\"\"\n data = data = np.ones((5, 5))\n origin = np.array([1.9, 2.2])\n # check origin rounding for order = 0\n assert_equal(set_center(data, origin, order=0),\n set_center(data, origin.round()),\n err_msg='-> order = 0 not equal round(origin)')\n # check output for order = 1:\n # maintain_size\n result = set_center(data, origin, 'maintain_size', order=1)\n ref = np.outer([0.9, 1, 1, 1, 1],\n [1, 1, 1, 1, 0.8])\n assert_allclose(result, ref,\n err_msg='-> crop = maintain_size, order = 1')\n # valid_region\n result = set_center(data, origin, 'valid_region', order=1)\n ref = np.ones((3, 3))\n assert_allclose(result, ref,\n err_msg='-> crop = valid_region, order = 1')\n # maintain_data\n result = set_center(data, origin, 'maintain_data', order=1)\n ref = np.outer([0, 0.9, 1, 1, 1, 1, 0.1],\n [0.2, 1, 1, 1, 1, 0.8, 0])\n assert_allclose(result, ref,\n err_msg='-> crop = maintain_data, order = 1')\n\n\ndef test_center_image():\n\n # BASEX sample image, Gaussians at 10, 15, 20, 70,85, 100, 145, 150, 155\n # image width, height n = 361, origin = (180, 180)\n IM = abel.tools.analytical.SampleImage(n=361, name=\"dribinski\").image\n\n # artificially displace origin, now at (179, 182)\n IMx = shift(IM, (-1, 2))\n true_origin = (179, 182)\n\n # find_origin using 'slice' method\n origin = find_origin(IMx, method=\"slice\")\n\n assert_allclose(origin, true_origin, atol=1)\n\n # find_origin using 'com' method\n origin = find_origin(IMx, method=\"com\")\n\n assert_allclose(origin, true_origin, atol=1)\n\n # check single axis - vertical\n # center shifted image IMx in the vertical direction only\n IMc = center_image(IMx, method=\"com\", axes=1)\n # determine the origin\n origin = find_origin(IMc, method=\"com\")\n\n assert_allclose(origin, (179, 180), atol=1)\n\n # check single axis - horizontal\n # center shifted image IMx in the horizontal direction only\n IMc = center_image(IMx, method=\"com\", axes=0)\n origin = find_origin(IMc, method=\"com\")\n\n assert_allclose(origin, (180, 182), atol=1)\n\n # check even image size returns odd\n # drop off one column, to make an even column image\n IM = IM[:, :-1]\n m, n = IM.shape\n\n IMy = center_image(IM, method=\"slice\", odd_size=True)\n\n assert_allclose(IMy.shape, (m, n-1))\n\n\nif __name__ == \"__main__\":\n test_find_origin()\n test_set_center_axes()\n test_set_center_int()\n test_set_center_float()\n test_set_center_order()\n test_center_image()\n"
] |
[
[
"numpy.sqrt",
"numpy.cos",
"numpy.sin",
"numpy.arctan2",
"scipy.ndimage.map_coordinates",
"numpy.meshgrid",
"numpy.vstack"
],
[
"scipy.interpolate.UnivariateSpline",
"numpy.indices",
"scipy.ndimage.interpolation.map_coordinates",
"numpy.arctan2",
"scipy.interpolate.splev",
"numpy.append",
"scipy.optimize.leastsq",
"numpy.array",
"numpy.logical_and"
],
[
"numpy.testing.assert_equal",
"numpy.arange",
"numpy.ones",
"scipy.ndimage.interpolation.shift",
"numpy.testing.assert_allclose",
"numpy.exp",
"numpy.outer",
"numpy.array"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"0.15",
"1.4",
"1.3",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"0.16"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"0.15",
"1.4",
"0.16",
"1.0",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"0.10",
"0.17",
"1.3"
],
"tensorflow": []
}
] |
adamosSol/SC-DNN
|
[
"482d67ed906535397fd2885aab74c323e024ce1a"
] |
[
"src/inference/network_II/fp_inference.py"
] |
[
"from __future__ import print_function\n\n# Import MNIST data\nfrom tensorflow.examples.tutorials.mnist import input_data\nmnist = input_data.read_data_sets(\"/tmp/data/\", one_hot=True)\n\nimport numpy as np\nimport tensorflow as tf\n\n# Network parameters \nn_hidden_1 = 128 # 1st layer number of neurons\nn_features = 784 # MNIST data input (img shape: 28*28)\nn_classes = 10 # MNIST total classes (0-9 digits)\n\n# Load trained coefficients \nw_dic = {\n 'h1': np.genfromtxt('coeffs/W1.csv', delimiter=','),\n 'out': np.genfromtxt('coeffs/Wout.csv', delimiter=','),\n}\n\nb_dic = {\n 'b1': np.genfromtxt('coeffs/b1.csv', delimiter=','),\n 'out': np.genfromtxt('coeffs/bout.csv', delimiter=','),\n}\n\n# Graph input \nX = tf.placeholder(tf.float32, [None, n_features])\nY = tf.placeholder(tf.float32, [None, n_classes])\n\n# Network coefficients \nweights = {\n 'h1': tf.placeholder(tf.float32, [n_features, n_hidden_1], name=\"w1\"),\n 'out': tf.placeholder(tf.float32, [n_hidden_1, n_classes], name=\"wout\")\n}\n\nbiases = {\n 'b1': tf.placeholder(tf.float32, [n_hidden_1], name=\"b1\"),\n 'out': tf.placeholder(tf.float32, [n_classes], name=\"bout\")\n}\n\n# Network graph \ndef neural_net(x):\n # Hidden layer \n layer_1 = tf.add( tf.matmul(x, weights['h1']), biases['b1'] ) \n # Output layer \n out_layer = tf.matmul(layer_1, weights['out']) + biases['out']\n\n return out_layer\n\nlogits = neural_net(X)\nprediction = tf.nn.softmax(logits)\n\ncorrect_pred = tf.equal(tf.argmax(prediction, 1), tf.argmax(Y, 1))\naccuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))\n\nwith tf.Session() as sess:\n\n # Run floating point network inference\n print(\"Testing accuracy: \", \n sess.run(accuracy, feed_dict={X: mnist.train.images,\n Y: mnist.train.labels,\n weights['h1']: w_dic['h1'],\n weights['out']: w_dic['out'],\n biases['b1']: b_dic['b1'],\n biases['out']: b_dic['out']}))\n"
] |
[
[
"tensorflow.matmul",
"tensorflow.nn.softmax",
"tensorflow.cast",
"tensorflow.placeholder",
"numpy.genfromtxt",
"tensorflow.Session",
"tensorflow.argmax",
"tensorflow.examples.tutorials.mnist.input_data.read_data_sets"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
Sadique96645/Financial-distress-prediction
|
[
"0789c76352e14add8a37cf205c81805eb6cb79c6"
] |
[
"code.py"
] |
[
"# --------------\nimport pandas as pd\nimport numpy as np\nimport os\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom sklearn.model_selection import train_test_split\nimport warnings\nwarnings.filterwarnings('ignore')\n# Path variable\ndf = pd.read_csv(path)\n# First 5 columns\ndf.head(5)\ndf.drop('Unnamed: 0',1,inplace=True)\n# Independent variables\nX = df.drop('SeriousDlqin2yrs',1)\n# Dependent variable\ny = df['SeriousDlqin2yrs']\n# Check the value counts\ncount = df['SeriousDlqin2yrs'].value_counts()\nprint(count)\n# Split the data set into train and test sets\nX_train,X_test,y_train,y_test = train_test_split(X,y,test_size = 0.3,random_state = 6)\n\n\n# --------------\nX = df.drop(['SeriousDlqin2yrs','Unnamed: 0'],axis = 1)\n# Dependent variable\ny = df['SeriousDlqin2yrs']\n# Check the value counts\ncount = df['SeriousDlqin2yrs'].value_counts()\n#print(count)\nX_train,X_test,y_train,y_test = train_test_split(X,y,test_size = 0.3,random_state = 6)\n# save list of all the columns of X in cols\ncols = list(X.columns)\n# create subplots\nprint(cols)\nfigs,axes = plt.subplots(nrows =5, ncols =2,figsize=(20,20))\nfor i in range(0,5):\n for j in range(0,2):\n col = cols[ i * 2 + j]\n axes[i,j].set_title(col)\n axes[i,j].scatter(X_train[col],y_train)\n axes[i,j].set_xlabel(col)\n axes[i,j].set_ylabel('SeriousDlqin2yrs')\n\n\n\n\n# --------------\n# Check for null values\nprint(X_train.isnull().sum())\n# Filling the missing values for columns in training data set\nfrom sklearn.preprocessing import Imputer\nmedian_imputer = Imputer(strategy='median')\n# Filling the missing values for columns in testing data set\ntrain_imputer = median_imputer.fit(X_train[['MonthlyIncome',]])\ntest_imputer = median_imputer.fit(X_test[['MonthlyIncome']])\ntrain_imputer = median_imputer.fit(X_train[['NumberOfDependents']])\ntest_imputer = median_imputer.fit(X_test[['NumberOfDependents']])\nX_train['NumberOfDependents'] = train_imputer.transform(X_train[['NumberOfDependents']])\nX_test['NumberOfDependents'] = test_imputer.transform(X_test[['NumberOfDependents']])\nX_train['MonthlyIncome'] = train_imputer.transform(X_train[['MonthlyIncome']])\nX_test['MonthlyIncome'] = test_imputer.transform(X_test[['MonthlyIncome']])\n\n# Checking for null values\n\n\n\n# --------------\n# Correlation matrix for training set\ncorr = X_train.corr()\nimport seaborn as sns\n# Plot the heatmap of the correlation matrix\nsns.heatmap(corr)\n# drop the columns which are correlated amongst each other except one\nX_train.drop(['NumberOfTime30-59DaysPastDueNotWorse','NumberOfTime60-89DaysPastDueNotWorse'],1,inplace=True)\nX_test.drop(['NumberOfTime30-59DaysPastDueNotWorse','NumberOfTime60-89DaysPastDueNotWorse'],1,inplace=True)\n\n\n# --------------\nfrom sklearn.preprocessing import StandardScaler\r\n# Initialize scaler object\r\nscaler = StandardScaler()\r\n\r\n# Fit on training data with columns of interest\r\nX_train = scaler.fit_transform(X_train)\r\n\r\n# Transform test data with columns of interest\r\nX_test = scaler.transform(X_test)\n\n\n# --------------\n# Import Logistic regression model and accuracy score\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.linear_model import LogisticRegression\n\n# Instantiate the model in a variable in log_reg\nlog_reg = LogisticRegression()\n# Fit the model on training data\nlog_reg.fit(X_train,y_train)\n# Predictions of the training dataset\ny_pred = log_reg.predict(X_test)\n# accuracy score\naccuracy = accuracy_score(y_test,y_pred)\nprint(accuracy)\n\n\n# --------------\n# Import all the models\nfrom sklearn.metrics import roc_auc_score\nfrom sklearn import metrics\nimport matplotlib.pyplot as plt\nfrom sklearn.metrics import f1_score, confusion_matrix, classification_report\nfrom sklearn.metrics import precision_score, recall_score\n# Plot the auc-roc curve\nscore = roc_auc_score(y_pred , y_test)\ny_pred_proba = log_reg.predict_proba(X_test)[:,1]\nfpr, tpr, _ = metrics.roc_curve(y_test, y_pred_proba)\nauc = metrics.roc_auc_score(y_test, y_pred_proba)\nplt.plot(fpr,tpr,label=\"Logistic model, auc=\"+str(auc))\nplt.xlabel('False Positive Rate')\nplt.ylabel('True Positive Rate')\nplt.legend(loc=4)\nplt.show()\n\n# Evaluation parameters for the model\n\nf1 = f1_score(y_test, log_reg.predict(X_test))\nprecision = precision_score(y_test, log_reg.predict(X_test))\nrecall = recall_score(y_test, log_reg.predict(X_test))\nroc_auc = roc_auc_score(y_test, log_reg.predict(X_test))\nprint ('Confusion_matrix' + '\\n', confusion_matrix(y_test, log_reg.predict(X_test)))\nprint ('Classification_report' + '\\n' + classification_report(y_test,y_pred))\n\n\n# --------------\n# Import SMOTE from imblearn library\nfrom imblearn.over_sampling import SMOTE\n\n# Check value counts of target variable for data imbalance\n\n\n# Instantiate smote\nsmote = SMOTE(random_state=9)\n# Fit Smote on training set\nX_sample,y_sample = smote.fit_sample(X_train,y_train)\n# Check for count of class\n\n\n\n\n# --------------\n# Fit logistic regresion model on X_sample and y_sample\nlog_reg.fit(X_sample,y_sample)\n# Store the result predicted in y_pred\ny_pred = log_reg.predict(X_test)\n# Store the auc_roc score\nscore = roc_auc_score(y_pred,y_test)\n# Store the probablity of any class\ny_pred_proba = log_reg.predict_proba(X_test)[:,1]\nfpr, tpr, _ = metrics.roc_curve(y_test, y_pred_proba)\nauc = metrics.roc_auc_score(y_test, y_pred_proba)\n# Plot the auc_roc_graph\nplt.plot(fpr,tpr,label=\"Logistic model, auc=\"+str(auc))\n# Print f1_score,Precision_score,recall_score,roc_auc_score and confusion matrix\nf1 = f1_score(y_test, log_reg.predict(X_test))\nprecision = precision_score(y_test, log_reg.predict(X_test))\nrecall = recall_score(y_test, log_reg.predict(X_test))\nroc_auc = roc_auc_score(y_test, log_reg.predict(X_test))\nprint ('Confusion_matrix' + '\\n', confusion_matrix(y_test, log_reg.predict(X_test)))\nprint ('Classification_report' + '\\n' + classification_report(y_test,y_pred))\n\n\n# --------------\n# Import RandomForestClassifier from sklearn library\nfrom sklearn.ensemble import RandomForestClassifier\n# Instantiate RandomForrestClassifier to a variable rf.\nrf = RandomForestClassifier(random_state = 9)\n# Fit the model on training data.\nrf.fit(X_sample,y_sample)\n# store the predicted values of testing data in variable y_pred.\ny_pred = rf.predict(X_test)\n# Store the different evaluation values.\nf1 = f1_score(y_test, rf.predict(X_test))\nprecision = precision_score(y_test, rf.predict(X_test))\nrecall = recall_score(y_test, rf.predict(X_test))\nroc_auc = roc_auc_score(y_test, rf.predict(X_test))\nprint ('Confusion_matrix' + '\\n', confusion_matrix(y_test,rf.predict(X_test)))\nprint ('Classification_report' + '\\n' + classification_report(y_test,y_pred))\n# Plot the auc_roc graph\ny_pred_proba = rf.predict_proba(X_test)[:,1]\nfpr, tpr, _ = metrics.roc_curve(y_test, y_pred_proba)\nauc = metrics.roc_auc_score(y_test, y_pred_proba)\nplt.plot(fpr,tpr,label=\"XG Boost model, auc=\"+str(auc))\n# Store the auc_roc score\nscore = roc_auc_score(y_pred,y_test)\nplt.plot(fpr,tpr,label=\"XG Boost model, auc=\"+str(auc))\n\n\n"
] |
[
[
"sklearn.metrics.roc_auc_score",
"matplotlib.pyplot.legend",
"pandas.read_csv",
"sklearn.linear_model.LogisticRegression",
"sklearn.ensemble.RandomForestClassifier",
"matplotlib.pyplot.subplots",
"sklearn.model_selection.train_test_split",
"sklearn.preprocessing.Imputer",
"sklearn.metrics.roc_curve",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"sklearn.preprocessing.StandardScaler",
"matplotlib.pyplot.show",
"sklearn.metrics.classification_report",
"sklearn.metrics.accuracy_score"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
olivierverdier/odelab
|
[
"ee3300c663f595c2d185a00605bcfb93649352e0"
] |
[
"odelab/scheme/stochastic.py"
] |
[
"# -*- coding: utf-8 -*-\nfrom __future__ import division\n\nimport numpy as np\nimport scipy.linalg as sl\n\nfrom odelab.scheme import Scheme\nfrom newton import FSolve, Newton\n\nclass EulerMaruyama(Scheme):\n\tdef step(self,t,u,h):\n\t\tsystem = self.system\n\t\tnoise = np.random.normal(size=[len(system.noise(t,u).T)])\n\t\tdef residual(v):\n\t\t\treturn (system.mass(t+h,v) - system.mass(t,u))/h - system.deterministic(t+h,v) - np.sqrt(h)/h*np.dot(system.noise(t,u),noise)\n\t\tN = Newton(residual)\n## \t\tN.xtol = min(N.xtol, h*1e-4)\n\t\tresult = N.run(u)\n\t\treturn t+h, result\n\t\n\tdef linstep(self,t,u):\n\t\treturn t+self.h, sl.solve(self.system.mass_mat-self.h*self.system.det_mat, np.dot(self.system.mass_mat,u)-self.h*self.system.V(t+self.h))\n\t\n"
] |
[
[
"numpy.dot",
"numpy.sqrt"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
jadenPete/keras_practice
|
[
"8e06c686f8a93ed7d0d7c244a3ea92d2bd428eb7"
] |
[
"main.py"
] |
[
"#!/usr/bin/env python3\n\nfrom keras.callbacks import ModelCheckpoint\nfrom keras.layers import Dense, Embedding, LSTM\nfrom keras.models import Sequential, load_model\nfrom keras.preprocessing.text import Tokenizer, text_to_word_sequence\nfrom keras.utils import to_categorical\nimport numpy\nimport os\nimport re\nimport sys\n\nwith open(\"republic_clean.txt\", \"r\") as file:\n\ttext = re.sub(r\"^BOOK .+$\", \"\", file.read(), flags=re.M)\n\twords = text_to_word_sequence(text)\n\n\ttokenizer = Tokenizer()\n\ttokenizer.fit_on_texts(words)\n\n\tcontext_len = 50\n\tvocab_size = len(tokenizer.word_index) + 1\n\tsequences = [seq[0] for seq in tokenizer.texts_to_sequences(words)]\n\n\t# Fast replacement for keras.preprocessing.sequence.pad_sequences(sequences)\n\tx = numpy.array([[0] * (context_len - i) + sequences[max(i - context_len, 0):i]\n\t for i in range(len(sequences))])\n\n\t# Regex match for the newest model file\n\tmatch = next(filter(None, map(\n\t\tre.compile(r\"^model-([0-9]{2})-[0-9]\\.[0-9]{4}\\.h5$\").match,\n\t\tsorted(filter(os.path.isfile, os.listdir()), reverse=True))\n\t), None)\n\n\tmodel = Sequential([\n\t\tEmbedding(vocab_size, 50, mask_zero=True, input_length=context_len),\n\t\tLSTM(100, return_sequences=True),\n\t\tLSTM(100),\n\t\tDense(100, activation=\"relu\"),\n\t\tDense(vocab_size, activation=\"softmax\")\n\t]) if match is None else load_model(match.group(0))\n\n\tif len(sys.argv) > 1 and sys.argv[1] in (\"-t\", \"--test\"):\n\t\twith open(\"compressed.txt\", \"w\") as file:\n\t\t\tfor i, context in enumerate(x):\n\t\t\t\toutput = model.predict(numpy.array([context]))[0]\n\t\t\t\tfile.write(f\"{sum(prob > output[sequences[i]] for prob in output)}\\n\")\n\telse:\n\t\tmodel.summary()\n\t\tprint()\n\n\t\tmodel.compile(optimizer=\"adam\",\n\t\t loss=\"categorical_crossentropy\",\n\t\t metrics=[\"accuracy\"])\n\n\t\ty = to_categorical(sequences, num_classes=vocab_size)\n\n\t\tmodel.fit(x, y, batch_size=64, epochs=10, verbose=1, callbacks=[\n\t\t\tModelCheckpoint(\"model-{epoch:02d}-{loss:.4f}.h5\")\n\t\t], initial_epoch=0 if match is None else int(match.group(1)))\n"
] |
[
[
"numpy.array"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
waidyanatha/quasar
|
[
"da64dfb1993b013ee09a024bbdaad96d6e984409"
] |
[
"lib/cluster_quality.py"
] |
[
"#!/usr/bin/env python3\n# -*- coding: UTF-8 -*-\n'''\n CLASS for community detection in graphs, both spatial and temporal, functions necessary for the station-fault analysis\n'''\n\nclass cluster_quality_metric():\n\n def __init__(self):\n\n self._l_reg_methods = ['Absolute','Average','minPoints','None']\n\n self._max_distance=30.0\n self._minimum_samples=3\n self._algorithm=None\n self._metric=None\n self._cluster_method=None\n self._seed=None\n#d self._force_minPts = True # force to remove subgraphs with < minPts\n self._force_regularity='minPoints' # force Absolute, Average, or None regularity\n self._reg_tol_scaler=0.99 # multiply the regularity by the scaler to reduce the threshold\n\n pass\n\n def set_quality_frame(self, clustering_name: str=\"greedy_modularity_communities\",\n **metric_params: dict):\n\n import traceback\n import numpy as np\n\n self._max_distance=None\n self._minimum_samples=None\n self._algorithm=None\n self._metric=None\n self._cluster_method=None\n self._seed=None\n\n self._name=clustering_name\n try:\n ''' Set the default paramters for the specific clustering method '''\n if 'distance_km' in metric_params:\n if isinstance(metric_params[\"distance_km\"],float) and metric_params[\"distance_km\"] > 0:\n self._max_distance=metric_params[\"distance_km\"]\n else:\n raise ValueError('distance_km %s must be a float > 0.'\n % str(metric_params[\"distance_km\"]))\n\n if 'minimum_samples' in metric_params:\n if isinstance(metric_params[\"minimum_samples\"],(int, np.integer)) and metric_params[\"minimum_samples\"] > 0:\n# if metric_params[\"minimum_samples\"] > 0:\n self._minimum_samples=int(metric_params[\"minimum_samples\"])\n else:\n raise ValueError('minimum_samples %s must be an int > 0.'\n % str(metric_params[\"minimum_samples\"]))\n\n if 'algorithm' in metric_params:\n if isinstance(metric_params[\"algorithm\"],str):\n self._algorithm=metric_params[\"algorithm\"]\n else:\n raise ValueError('algorithm %s is invalid.' % (metric_params[\"algorithm\"]))\n\n if 'metric' in metric_params:\n if isinstance(metric_params[\"metric\"], str):\n self._metric=metric_params[\"metric\"]\n else:\n raise ValueError('metric %s is invalid.' % (metric_params[\"metric\"]))\n\n if 'cluster_method' in metric_params:\n if isinstance(metric_params[\"cluster_method\"], str):\n self._cluster_method=metric_params[\"cluster_method\"]\n else:\n raise ValueError('cluster_method %s is invalid.' % (metric_params[\"cluster_method\"]))\n\n if 'seed' in metric_params:\n if metric_params[\"seed\"] is np.random:\n self._seed=\"random\"\n elif metric_params[\"seed\"] is int:\n self._seed=\"integer\"\n else:\n self._seed=None\n\n except Exception as err:\n print(\"Class cluster_quality_metric [set_quality_frame] Error message:\", err)\n print(traceback.format_exc())\n\n return self\n\n\n def get_seq_params(self, _iter_combos_df, _n_exp_seq: int = 0):\n\n#d import pandas as pd # holds the clustering sequence parameters\n import numpy as np # necessary when using seed = np.random\n import traceback\n\n '''\n Set clustering parameters to execute cloud or graph clustering technique\n\n TODO: use below dictionaries to look up and validate valuses\n (python dictionary key look up not working)\n\n _dict_algorithms = {\"DBSCAN\": [\"auto\", \"ball_tree\", \"kd_tree\", \"brute\"],\n 'HDBSCAN': [\"best\",\"generic\",\"prims_kdtree\",\"prims_balltree\",\"boruvka_kdtree\",\n \"boruvka_balltree\"]}\n _dict_clust_method = {\"HDBSCAN\": [\"leaf\",\"eom\"],\"OPTICS\": [\"xi\",\"dbscan\"]}\n _dict_algorithms = {\"DBSCAN\": [\"auto\", \"ball_tree\", \"kd_tree\", \"brute\"],\n \"HDBSCAN\": \"best\",\"generic\",\"prims_kdtree\",\"prims_balltree\",\n \"boruvka_kdtree\",\"boruvka_balltree\",\n \"OPTICS\": ['auto', 'ball_tree', 'kd_tree', 'brute']\n }\n\n TODO: acquire the lists and dictionaries from the respective cloud and graph clustering classes\n '''\n\n _l_cluster_techniques = ['cloud','graph']\n _l_cloud_cluster_name = ['DBSCAN','HDBSCAN','AFFINITYPROPAGATION','OPTICS','MEANSHIFT',\n 'AGGLOMERATIVE','BIRCH','KMEANS','KNN','DENCLUE',\"SPECTRAL\"]\n _l_graph_cluster_name = [\"GREEDY\",\"NAIVE-GREEDY\",\"LPC\",\"ASYNC-LPA\",\n \"LUKES\",\"ASYNC-FLUID\",\"GIRVAN-NEWMAN\"]\n _dict_algorithms = {\"auto\", \"ball_tree\", \"kd_tree\", \"brute\",\n \"best\",\"generic\",\"prims_kdtree\",\"prims_balltree\",\"boruvka_kdtree\",\"boruvka_balltree\",\n \"kmeans\",\"discretize\"}\n\n _dict_clust_method = {\"leaf\",\"eom\",\"xi\",\"dbscan\",\"arpack\", \"lobpcg\", \"amg\"}\n '''In all instances when possible <haversine> will be the choice; else it will be <precomputed>'''\n _lst_metric = [\"haversine\",\"euclidean\",\"manhattan\",\"minkowski\",\"precomputed\",\n \"precomputed_nearest_neighbors\",\"nearest_neighbors\",\"rbf\"]\n\n _dict_clust_params = {}\n\n i=_n_exp_seq\n try:\n# ''' Load data from CSV '''\n# _iter_combos_df = pd.read_csv(\"../experiments/cluster_runs.csv\")\n\n ''' Clustering method name is mandatory'''\n if _iter_combos_df.loc[i, 'name'] not in _l_cloud_cluster_name+_l_graph_cluster_name:\n raise AttributeError('%s is not a valid clustering method use \\n%s'\n % (_iter_combos_df.loc[i, 'name'],\n _l_cloud_cluster_name+_l_graph_cluster_name))\n else:\n _s_cloud_clust_name = str(_iter_combos_df.loc[i, 'name'])\n\n ''' Technique - assign the appropriate value based on the clustering name '''\n if _iter_combos_df.loc[i, 'technique'] not in _l_cluster_techniques:\n if _s_cloud_clust_name in _l_cloud_cluster_name:\n _cluster_technique = \"cloud\"\n elif _s_cloud_clust_name in _l_graph_cluster_name:\n _cluster_technique = \"graph\"\n else:\n _cluster_technique = str(_iter_combos_df.loc[i, 'technique'])\n\n ''' Create clustering input parameter Dictionary\n Maximum distance between points and the minimum points are undefined assign defaults '''\n if _iter_combos_df.loc[i, 'maxDistance'].astype(float) <= 1:\n print('maxDistance (Km) must ba a float >= 1.0; proceeding with default maxDistance=30.0 Km')\n _dict_clust_params[\"distance_km\"] = 30.0\n else:\n# _dict_clust_params[\"distance_km\"] = _iter_combos_df.loc[i, 'maxDistance'].astype(float)\n _dict_clust_params[\"distance_km\"] = float(_iter_combos_df.loc[i, 'maxDistance'])\n\n if _iter_combos_df.loc[i, 'minPts'].astype(int) <= 0:\n print('minPts must be an integer > 0; proceeding with default minPts=3')\n _n_min_cloud_clust_size = 3\n else:\n _dict_clust_params[\"minimum_samples\"] = _iter_combos_df.loc[i, 'minPts'].astype(int)\n# _dict_clust_params[\"minimum_samples\"] = int(_iter_combos_df.loc[i, 'minPts'])\n\n ''' Validate and assign algorithim based on the clustering name'''\n if _iter_combos_df.loc[i, 'algorithm'] in _dict_algorithms:\n _dict_clust_params[\"algorithm\"] = str(_iter_combos_df.loc[i, 'algorithm'])\n\n ''' Validate and assign clustering_method based on the clustering name'''\n if _iter_combos_df.loc[i, 'method'] in _dict_clust_method:\n _dict_clust_params[\"cluster_method\"] = str(_iter_combos_df.loc[i, 'method'])\n\n if _iter_combos_df.loc[i, 'metric'] in _lst_metric:\n _dict_clust_params[\"metric\"] = str(_iter_combos_df.loc[i, 'metric'])\n\n if isinstance(_iter_combos_df.loc[i, 'weight'],str):\n _dict_clust_params[\"weight\"] = str(_iter_combos_df.loc[i, 'weight'])\n\n if isinstance(_iter_combos_df.loc[i, 'seed'], str):\n if _iter_combos_df.loc[i, 'seed'] == \"random\":\n self._seed = \"random\"\n _dict_clust_params[\"seed\"] = np.random\n elif _iter_combos_df.loc[i, 'seed'] == \"int\":\n self._seed = \"int\"\n _dict_clust_params[\"seed\"] = int\n else:\n pass\n\n# if _iter_combos_df.loc[i, 'maxIter'] and _iter_combos_df.loc[i, 'maxIter'] > 0:\n if _iter_combos_df.loc[i, 'maxIter'].astype(int) > 0:\n _dict_clust_params[\"max_iter\"] = int(_iter_combos_df.loc[i, 'maxIter'])\n\n if _iter_combos_df.loc[i, 'randomState'].astype(int) > 0:\n _dict_clust_params[\"random_state\"] = int(_iter_combos_df.loc[i, 'randomState'])\n\n if _iter_combos_df.loc[i, 'numClusters'] > 0:\n _dict_clust_params[\"n_clusters\"] = int(_iter_combos_df.loc[i, 'numClusters'])\n\n# print('Preparing for %s clustering %s with parameters\\n%s'\n# % (_cluster_technique,_s_cloud_clust_name,_dict_clust_params))\n\n except Exception as err:\n print(\"Class cluster_quality_metric [get_seq_params] Error message:\", err)\n print(traceback.format_exc())\n\n return _cluster_technique,_s_cloud_clust_name,_dict_clust_params\n\n ''' Run the cloud or graph clustering sequence for the specific clustering method and parameters '''\n def get_clusters(self,\n _dict_clust_params,\n station_df,\n _cluster_technique: str=\"cloud\",\n _s_cloud_clust_name: str=\"DBSCAN\"):\n\n import cloud_clustering as cc\n import graph_clustering as gc\n import numpy as np\n import networkx as nx\n\n import traceback\n\n# __st_clust_df = station_df.copy()\n arr_st_coords = station_df[['st_lat','st_lon']].to_numpy()\n\n try:\n if _cluster_technique == 'cloud':\n cls_clust = cc.cluster_data(_s_cloud_clust_name,**_dict_clust_params)\n labels, labels_true, clust_centers = cls_clust.get_clusters(arr_st_coords)\n\n if arr_st_coords.shape[0] != labels.shape[0]:\n raise ValueError('Mismatch in station coordinate and labels array sizes to; cannot proceed')\n\n station_df['label'] = labels\n\n elif _cluster_technique == 'graph':\n cls_g_clust = gc.community_detection()\n params = cls_g_clust.set_community_detection_params(_s_cloud_clust_name,**_dict_clust_params)\n\n ''' G_cluster required to distinguish between communities and valid clusters '''\n G_simple, G_clusters = cls_g_clust.get_communities(station_df)\n station_df['label'] = nx.get_node_attributes(G_simple,'label').values()\n\n else:\n raise ValueError('Invalid clustering technique: %s' % _cluster_technique)\n\n ''' Force Regularity of flag is set'''\n\n except Exception as err:\n print(\"Class cluster_quality_metric [get_clusters] Error message:\", err)\n print(traceback.format_exc())\n\n return station_df\n\n ''' Get all quality measures and other parameters for the dataframe with appropriate cluster labels '''\n def get_quality_metrics(self, station_df):\n\n import dunn as di\n from sklearn import metrics\n import networkx as nx\n import networkx.algorithms.community as nx_comm\n import numpy as np\n import pandas as pd\n import traceback\n\n quality_metric_df = pd.DataFrame([])\n\n try:\n#d _n_num_clust = len(station_df['label'].unique()) # Generated Cluster Count\n _n_num_clust = len([x for x in station_df['label'].unique() if x > -1]) # Generated Cluster Count\n if _n_num_clust <= 1:\n raise ValueError('Cannot compute quality metric for %d clusters' % (_n_num_clust))\n\n ''' returns the simple graph of the clusters and the set dictionary of cluster nodes '''\n G_simple_, l_G_clusters_ = self.__get_graph_n_labels(station_df)\n\n _s_st_types = str(station_df['st_type'].unique()) # Station Types\n _n_tot_num_st = station_df.shape[0] # Station Quantity\n _f_min_dist = self._max_distance # Minimum Distance\n _n_min_pts = self._minimum_samples # Minimum Points\n _s_clust = str(self._name) # Clustering Name\n _s_algo = str(self._algorithm) # Algorithm\n _s_metric = str(self._metric) # Metric\n _s_method = str(self._cluster_method) # Method\n _s_seed = str(self._seed) # Seed\n __lst_valid_cloud_clust = [frozenset(clust) for clust in l_G_clusters_\n if len(clust) >= self._minimum_samples]\n _n_valid_clust = len(__lst_valid_cloud_clust) # Valid Cluster Count\n\n # Clustered Station Count\n _n_sts_in_clusters=0\n for x in __lst_valid_cloud_clust:\n _n_sts_in_clusters += len(x)\n\n _n_noise = station_df.shape[0] - _n_sts_in_clusters # Unclsutered Noise Count\n _n_avg_deg = sum([v for k, v in G_simple_.degree()\n if G_simple_.nodes[k][\"label\"] > -1])/_n_sts_in_clusters # Average Node Degree\n\n ''' prepare valid stations for measuring the quality'''\n lst_st = list(nx.get_node_attributes(G_simple_,'pos').values())\n lst_lbl = list(nx.get_node_attributes(G_simple_,'label').values())\n\n _f_silhouette = metrics.silhouette_score(lst_st, lst_lbl,\n metric='haversine') # Silhouette Coefficient\n# _f_silhouette = metrics.silhouette_score(station_df[['st_lat','st_lon']].to_numpy(),\n# list(station_df['label']),\n# metric='haversine') # Silhouette Coefficient\n _f_cal_har = metrics.calinski_harabasz_score(lst_st, lst_lbl) # Calinski Harabaz score\n# _f_cal_har = metrics.calinski_harabasz_score(station_df[['st_lat','st_lon']].to_numpy(),\n# list(station_df['label'])) # Calinski Harabaz score\n _f_dav_bould = metrics.davies_bouldin_score(lst_st, lst_lbl) # Davies Bouldin score\n# _f_dav_bould = metrics.davies_bouldin_score(station_df[['st_lat','st_lon']].to_numpy(),\n# list(station_df['label'])) # Davies Bouldin score\n _f_dunn = di.dunn_fast(lst_st, lst_lbl) # Dunn Index\n# _f_dunn = di.dunn_fast(station_df[['st_lat','st_lon']].to_numpy(),\n# list(station_df['label'])) # Dunn Index\n _f_modul = nx_comm.modularity(G_simple_,l_G_clusters_) # Modularity\n\n try:\n l_conductance = list(nx.conductance(G_simple_, cluster_i, weight='distance')\n for cluster_i in __lst_valid_cloud_clust)\n _f_conduct = sum(l_conductance)/len(l_conductance) # Conductance Average\n except Exception:\n _f_conduct = 0\n _f_cover = nx_comm.coverage(G_simple_, l_G_clusters_) # Coverage Score\n _f_perform = nx_comm.performance(G_simple_, l_G_clusters_) # Performance Score\n\n dict_quality_mesrs = {\n 'Station Types': _s_st_types,\n 'Station Quantity': _n_tot_num_st,\n 'Maximum Distance': _f_min_dist,\n 'Minimum Points': _n_min_pts,\n 'Name': _s_clust,\n 'Algorithm': _s_algo,\n 'Metric': _s_metric,\n 'Method': _s_method,\n 'Seed': _s_seed,\n 'Generated Cluster Count': _n_num_clust,\n 'Valid Cluster Count': _n_valid_clust,\n 'Clustered Station Count': _n_sts_in_clusters,\n 'Unclsutered Noise Count': _n_noise,\n 'Average Node Degree': _n_avg_deg,\n 'Silhouette Coefficient': _f_silhouette,\n 'Calinski Harabaz score': _f_cal_har,\n 'Davies Bouldin score': _f_dav_bould,\n 'Dunn Index': _f_dunn,\n 'Modularity': _f_modul,\n 'Conductance Average': _f_conduct,\n 'Coverage Score': _f_cover,\n 'Performance Score': _f_perform,\n }\n# print('Dict qual',dict_quality_mesrs('Seed'))\n quality_metric_df = pd.DataFrame(dict_quality_mesrs, index=[_s_clust])\n quality_metric_df.reset_index(drop=True, inplace=True)\n\n except Exception as err:\n print(\"Class cluster_quality_metric [get_quality_metrics] Error message:\", err)\n# print(G_simple_.edges('distance'))\n print(traceback.format_exc())\n\n return quality_metric_df\n\n\n def __get_graph_n_labels(self, station_df):\n\n import sys; sys.path.insert(1, '../lib')\n import graph_clustering as gc\n import networkx as nx\n# import networkx.algorithms.community as nx_comm\n\n dict_feature_params = {\"distance_km\":self._max_distance,\n \"minimum_samples\":self._minimum_samples}\n\n cls_g_clust = gc.community_detection(**dict_feature_params)\n G_simple_ = cls_g_clust.get_simple_graph(station_df)\n #print(cloud_G_simple.nodes(data=True))\n\n _cloud_unique_labels = set(nx.get_node_attributes(G_simple_,'label').values())\n\n _l_cloud_g_cluster =[]\n for label in _cloud_unique_labels:\n selected_nodes = sorted([n for n,v in G_simple_.nodes(data=True) if v['label'] == label])\n if len(selected_nodes) > 0 and label != -1:\n _l_cloud_g_cluster.append(set(selected_nodes))\n elif len(selected_nodes) > 0 and label == -1:\n for st_node in selected_nodes:\n _l_cloud_g_cluster.append(set([st_node]))\n\n return G_simple_, _l_cloud_g_cluster\n\n ''' get_r_regular_clusters furhter removes those stations and clusters that do not comply with\n minPts and maxDist constraints\n '''\n def get_r_regular_clusters(self,_dict_reg_param,__st_clust_df):\n\n import sys; sys.path.insert(1, './lib')\n import pandas as pd\n import graph_clustering as gc\n import networkx as nx\n\n import traceback\n\n ''' Create subgraphs that comply with r-regularity where r >= minPts-1\n Given that the regularity is based on the average degree, change the scaling value 0.95\n to one that is desired and in the interval (0,1] to set a regularity threshold @_f_reg_thresh\n '''\n try:\n ''' Set the default paramters for the specific r-regularity method '''\n if 'force_regularity' in _dict_reg_param:\n if _dict_reg_param[\"force_regularity\"] in self._l_reg_methods:\n self._force_regularity=_dict_reg_param[\"force_regularity\"]\n else:\n raise ValueError('force_regularity must be {%s}'\n % str(self._l_reg_methods))\n\n if 'regularity_threshold' in _dict_reg_param:\n if isinstance(_dict_reg_param[\"tolerance_scaler\"],float) and _dict_reg_param[\"tolerance_scaler\"] < 1.0:\n self._reg_tol_scaler = _dict_reg_param[\"tolerance_scaler\"]\n else:\n raise ValueError('regularity_threshold must be %s in invalid an must be in the interval [0,1]'\n % str(_dict_reg_param[\"tolerance_scaler\"]))\n#d else:\n#d print('Unspecified regularity_threshold; using default value %0.2f' % (self._reg_tol_scaler))\n\n ''' (n-1)-simplicies equalant to the regularity required to ensure all target-site stations\n have the minimum required target station connections '''\n _f_reg_thresh = self._reg_tol_scaler*(self._minimum_samples - 1)\n\n lst_G_simple = []\n ''' Only plot valid clusters '''\n no_noise_df = __st_clust_df[__st_clust_df['label']!= -1]\n# print('%d clusters after removing the noise clusters; i.e. label = -1'\n# % len(no_noise_df['label'].unique()))\n\n dict_feature_params = {\"distance_km\": self._max_distance,\n \"minimum_samples\": self._minimum_samples}\n cls_g_clust = gc.community_detection(**dict_feature_params)\n G_simple = cls_g_clust.get_simple_graph(no_noise_df)\n G_simple.remove_nodes_from(list(nx.isolates(G_simple)))\n if not nx.is_empty(G_simple):\n lst_G_simple = cls_g_clust.get_list_subgraphs(G_simple)\n# print('%d simple subgraphs created after removing clusters with isolated nodes' % len(lst_G_simple))\n\n ''' remove any graphs with zero average degree '''\n incomplete = True #flag to start stop while loop\n while incomplete and self._force_regularity != \"None\":\n incomplete = False\n ''' As a precaution first remove all subgraphs with zero degree nodes; i.e. singletons '''\n for G_idx, G in enumerate(lst_G_simple):\n if len(G.edges()) == 0:\n lst_G_simple.pop(G_idx)\n# print('...removed subgraph %d with zero degree' % G_idx)\n incomplete = True\n\n for G_idx, G in enumerate(lst_G_simple):\n\n ''' Average regularity function '''\n degree_sequence = sorted([d for n, d in G.degree()], reverse=True)\n _avg_degree = sum(degree_sequence)/len(degree_sequence)\n if self._force_regularity == 'Average' and _avg_degree <= _f_reg_thresh:\n ''' try to pop if G_idx fails pass and will catch in the next round '''\n try:\n lst_G_simple.pop(G_idx)\n# print('...removed subgraph %d with average degree %0.02f <= %0.02f tolerated degree'\n# % (G_idx, _avg_degree,_f_reg_thresh))\n incomplete = True\n except Exception as err:\n pass\n\n elif self._force_regularity == 'Absolute':\n ''' Absolute regularity function forces strict minimal regularity '''\n H = nx.Graph(G)\n remove = [node for node,degree in dict(H.degree()).items() if degree < _f_reg_thresh]\n if len(remove) > 0:\n# print('...removing nodes %s with degree <= %0.02f' % (remove, _f_reg_thresh))\n H.remove_nodes_from(remove)\n if H.number_of_nodes() > 0:\n lst_G_simple.pop(G_idx)\n lst_G_simple.append(H)\n# print('...replaced subgraph %d with reduced nodes=%d'\n# % (G_idx, H.number_of_nodes()))\n else:\n# print('...removing subgraph %d with %d nodes after node removal'\n# % (G_idx, H.number_of_nodes()))\n lst_G_simple.pop(G_idx)\n incomplete = True\n elif self._force_regularity == 'minPoints':\n ''' minPoints function remove clusters with size < minimum_samples '''\n if G.number_of_nodes() < self._minimum_samples:\n lst_G_simple.pop(G_idx)\n#d for k in no_noise_df['label'].unique():\n#d temp_df = pd.DataFrame([])\n#d temp_df = no_noise_df.loc[lambda no_noise_df: no_noise_df['label'] == k]\n#d if temp_df.shape[0] < int(self._minimum_samples+1):\n#d no_noise_df = no_noise_df[no_noise_df['label'] != k]\n#d print('%d clusters remaining after removing clusters with minPts < %d+1 for %d-regularity minimum requirement'\n#d % (len(no_noise_df['label'].unique()),self._minimum_samples,self._minimum_samples))\n\n\n# print('%d simple subgraphs remaining after validating with parameters:'\n# % (len(lst_G_simple)))\n# print(' degree tolerance of %0.02f and forced regularity set to %s.'\n# % (_f_reg_thresh, self._force_regularity))\n\n ''' Modify the station dataframe to reflect the new noise and cluster labels '''\n new_st_clust_df_ = __st_clust_df.copy()\n#d if len(lst_G_simple) > 0 and self._force_regularity != \"None\":\n if self._force_regularity != \"None\":\n new_st_clust_df_[\"label\"] = -1\n if len(lst_G_simple) > 0:\n for G_idx, G in enumerate(lst_G_simple):\n _nodes = sorted([n for n,v in G.nodes(data=True)])\n new_st_clust_df_.loc[new_st_clust_df_[\"st_name\"].isin(_nodes),\"label\"] = G_idx\n\n except Exception as err:\n print(\"Class cluster_quality_metric [get_r_regular_clusters] Error message:\", err)\n print(traceback.format_exc())\n\n return new_st_clust_df_, lst_G_simple\n"
] |
[
[
"sklearn.metrics.davies_bouldin_score",
"sklearn.metrics.calinski_harabasz_score",
"pandas.DataFrame",
"sklearn.metrics.silhouette_score"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
PhilipeRLeal/QAA
|
[
"9917b10190e4d96ac6c633f15d4d40bed3240bbe"
] |
[
"scripts/IOP_AOPs/tests/QAA_analysis_tests.py"
] |
[
"import numpy as np\nimport pandas as pd\nfrom unittest import TestCase\n\nfrom .IOP_AOPs.QAA_analysis import apply_QAA\n\nclass Testgcdistance(TestCase):\n def test_apply_QAA(self):\n \"\"\"\n This is a function to evaluate the QAA algorithm\n\n \"\"\"\n try:\n N_samples = 10\n\n Bandas = [412, 443, 489, 510, 555, 670]\n\n size = (N_samples, len(Bandas)) / 1000\n\n Random_data = np.random.randint(low=0,\n high=800,\n size=size\n )\n\n Rrs_Data = pd.DataFrame(Random_data, columns=Bandas)\n\n QAA_Results = apply_QAA(Rrs_Data)\n\n for k, v in QAA_Results.items():\n print(str(k), '\\n' * 3, '-' * 50,\n '\\n', v, '\\n' * 3)\n\n R = isinstance(Rrs_Data, pd.DataFrame)\n\n except BaseException:\n R = False\n\n self.assertTrue(R)"
] |
[
[
"pandas.DataFrame",
"numpy.random.randint"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
ashishpatel26/svhn-detection-tf
|
[
"20946383a039cfddcc43b54f796189290610f29b"
] |
[
"svhn_detection/utils.py"
] |
[
"#!/usr/bin/env python3\nimport tensorflow as tf\nimport numpy as np\nfrom svhn_dataset import SVHN\n\ndef bbox_area(a):\n return tf.maximum(tf.zeros_like(a[...,2]), a[...,2] - a[...,0]) * tf.maximum(tf.zeros_like(a[...,2]), a[...,3] - a[...,1]) \n\n\ndef bbox_iou(a, b):\n \"\"\" Compute IoU for two bboxes a, b.\n\n Each bbox is parametrized as a four-tuple (top, left, bottom, right).\n \"\"\"\n a = tf.expand_dims(a, -2)\n b = tf.expand_dims(b, -3)\n intersection = tf.stack([\n tf.maximum(a[...,0], b[...,0]),\n tf.maximum(a[...,1], b[...,1]),\n tf.minimum(a[...,2], b[...,2]),\n tf.minimum(a[...,3], b[...,3]),\n ], -1)\n area_a = bbox_area(a)\n area_b = bbox_area(b)\n area_intersection = bbox_area(intersection)\n area_union = area_a + area_b - area_intersection\n filter_expr = area_intersection > 0\n area_intersection = tf.where(filter_expr, area_intersection / area_union, area_intersection)\n return area_intersection\n\n\ndef np_bbox_area(a):\n return max(0, a[SVHN.BOTTOM] - a[SVHN.TOP]) * max(0, a[SVHN.RIGHT] - a[SVHN.LEFT])\n\ndef np_bbox_iou(a, b):\n \"\"\" Compute IoU for two bboxes a, b.\n Each bbox is parametrized as a four-tuple (top, left, bottom, right).\n \"\"\"\n intersection = [\n max(a[SVHN.TOP], b[SVHN.TOP]),\n max(a[SVHN.LEFT], b[SVHN.LEFT]),\n min(a[SVHN.BOTTOM], b[SVHN.BOTTOM]),\n min(a[SVHN.RIGHT], b[SVHN.RIGHT]),\n ]\n if intersection[SVHN.RIGHT] <= intersection[SVHN.LEFT] or intersection[SVHN.BOTTOM] <= intersection[SVHN.TOP]:\n return 0\n return np_bbox_area(intersection) / float(np_bbox_area(a) + np_bbox_area(b) - np_bbox_area(intersection))\n\n\ndef bbox_to_fast_rcnn(anchor, bbox):\n \"\"\" Convert `bbox` to a Fast-R-CNN-like representation relative to `anchor`.\n\n The `anchor` and `bbox` are four-tuples (top, left, bottom, right);\n you can use SVNH.{TOP, LEFT, BOTTOM, RIGHT} as indices.\n\n The resulting representation is a four-tuple with:\n - (bbox_y_center - anchor_y_center) / anchor_height\n - (bbox_x_center - anchor_x_center) / anchor_width\n - log(bbox_height / anchor_height)\n - log(bbox_width / anchor_width)\n \"\"\"\n bbox_height = bbox[...,SVHN.BOTTOM] - bbox[...,SVHN.TOP]\n bbox_width = bbox[...,SVHN.RIGHT] - bbox[...,SVHN.LEFT]\n anchor_height = anchor[...,SVHN.BOTTOM] - anchor[...,SVHN.TOP]\n anchor_width = anchor[...,SVHN.RIGHT] - anchor[...,SVHN.LEFT]\n bbox_y_center = 0.5 * (bbox_height) + bbox[...,SVHN.TOP]\n bbox_x_center = 0.5 * (bbox_width) + bbox[...,SVHN.LEFT]\n anchor_y_center = 0.5 * (anchor_height) + anchor[...,SVHN.TOP]\n anchor_x_center = 0.5 * (anchor_width) + anchor[...,SVHN.LEFT]\n return tf.stack([\n (bbox_y_center - anchor_y_center) / anchor_height,\n (bbox_x_center - anchor_x_center) / anchor_width,\n tf.math.log(bbox_height / anchor_height),\n tf.math.log(bbox_width / anchor_width),\n ], -1)\n\ndef bbox_from_fast_rcnn(anchor, fast_rcnn):\n \"\"\" Convert Fast-R-CNN-like representation relative to `anchor` to a `bbox`.\"\"\"\n anchor_height = anchor[...,SVHN.BOTTOM] - anchor[...,SVHN.TOP]\n anchor_width = anchor[...,SVHN.RIGHT] - anchor[...,SVHN.LEFT]\n anchor_y_center = 0.5 * (anchor_height) + anchor[...,SVHN.TOP]\n anchor_x_center = 0.5 * (anchor_width) + anchor[...,SVHN.LEFT]\n\n center_y, center_x, height, width = fast_rcnn[...,0], fast_rcnn[...,1],fast_rcnn[...,2],fast_rcnn[...,3]\n bbox_height = tf.exp(height) * anchor_height\n bbox_width = tf.exp(width) * anchor_width\n bbox_y_center = center_y * anchor_height + anchor_y_center\n bbox_x_center = center_x * anchor_width + anchor_x_center\n return tf.stack([\n bbox_y_center - bbox_height * 0.5,\n bbox_x_center - bbox_width * 0.5,\n bbox_y_center + bbox_height * 0.5,\n bbox_x_center + bbox_width * 0.5,\n ], -1)\n\ndef compute_matches(iou_table, iou_threshold, background_iou_threshold, force_gold_match):\n \"\"\" \n Itfut matrix has shape [gold_boxes, anchors]\n Returns: [final_matches, final_mask, anchor_mask],\n where anchor_mask is one for background and foregroun objects and 0 for objects in the critical interval\n between background iou threshold and iou threshold\n \"\"\"\n matches = tf.argmax(iou_table, 0) \n matched_vals = tf.reduce_max(iou_table, 0)\n mask = matched_vals >= iou_threshold\n if background_iou_threshold is None:\n anchor_mask = tf.ones_like(mask)\n else:\n anchor_mask = tf.logical_or(mask, matched_vals < background_iou_threshold)\n\n if force_gold_match:\n force_match_column_ids = tf.argmax(iou_table, 1)\n force_match_column_indicators = tf.one_hot(force_match_column_ids, depth = tf.shape(iou_table)[1])\n force_match_row_ids = tf.argmax(force_match_column_indicators, 0)\n force_match_column_mask = tf.cast(tf.reduce_max(force_match_column_indicators, 0), tf.bool)\n matches = tf.where(force_match_column_mask, force_match_row_ids, matches)\n mask = tf.logical_or(mask, force_match_column_mask)\n anchor_mask = tf.logical_or(anchor_mask, force_match_column_mask)\n\n return matches, mask, anchor_mask\n\n\ndef bboxes_training(anchors, gold_classes, gold_bboxes, iou_threshold = 0.5, background_iou_threshold = 0.4, force_gold_match = False):\n \"\"\" Compute training data for object detection.\n\n Arguments:\n - `anchors` is an array of four-tuples (top, left, bottom, right)\n - `gold_classes` is an array of zero-based classes of the gold objects\n - `gold_bboxes` is an array of four-tuples (top, left, bottom, right)\n of the gold objects\n - `iou_threshold` is a given threshold\n\n Returns:\n - `anchor_classes` contains for every anchor either 0 for background\n (if no gold object is assigned) or `1 + gold_class` if a gold object\n with `gold_class` as assigned to it\n - `anchor_bboxes` contains for every anchor a four-tuple\n `(center_y, center_x, height, width)` representing the gold bbox of\n a chosen object using parametrization of Fast R-CNN; zeros if not\n gold object was assigned to the anchor\n \"\"\"\n num_anchors = tf.shape(anchors)[0]\n anchor_classes = tf.zeros((num_anchors,), tf.int32)\n assigned_bboxes = tf.zeros((num_anchors, 4), tf.float32)\n\n iou_table = bbox_iou(gold_bboxes, anchors)\n matches, mask, anchor_mask = compute_matches(iou_table, iou_threshold, background_iou_threshold, force_gold_match)\n anchor_classes = tf.where(mask, tf.gather_nd(gold_classes, matches[:,tf.newaxis]) + 1, anchor_classes)\n anchor_bboxes = tf.where(mask[:, tf.newaxis], \n bbox_to_fast_rcnn(anchors, tf.gather_nd(gold_bboxes, matches[:, tf.newaxis])), \n tf.zeros((num_anchors, 4), tf.float32))\n return anchor_classes, anchor_bboxes, anchor_mask\n\n\n\ndef generate_anchors(pyramid_levels, image_size, first_feature_scale=4, anchor_scale=4.0, aspect_ratios = [(1.0, 1.0), (1.4, 0.7), (0.7, 1.4)], num_scales=3): \n boxes_all = []\n for s in range(pyramid_levels):\n boxes_level = []\n for octave in range(num_scales):\n for aspect_h, aspect_w in aspect_ratios:\n scale = 2 ** (octave / num_scales)\n stride = first_feature_scale * 2 ** s\n base_anchor_size = anchor_scale * stride * scale\n anchor_size_x = base_anchor_size * aspect_w / 2.0\n anchor_size_y = base_anchor_size * aspect_h / 2.0\n \n x = np.arange(stride / 2, image_size, stride)\n y = np.arange(stride / 2, image_size, stride)\n xv, yv = np.meshgrid(x, y)\n xv = xv.reshape(-1)\n yv = yv.reshape(-1)\n\n boxes = np.vstack((yv - anchor_size_y, xv - anchor_size_x,\n yv + anchor_size_y, xv + anchor_size_x))\n boxes = np.swapaxes(boxes, 0, 1)\n boxes_level.append(np.expand_dims(boxes, axis=1))\n boxes_level = np.concatenate(boxes_level, axis=1)\n boxes_all.append(boxes_level.reshape(-1, 4))\n return np.vstack(boxes_all) \n\n\ndef WarmStartCosineDecay(initial_learning_rate, num_epochs, num_batches, epoch, epoch_step): \n cosine_schedule = tf.keras.experimental.CosineDecay(initial_learning_rate, num_epochs - 1)\n def compute():\n minibatch_progress = tf.cast(epoch_step + 1, tf.float32) / float(num_batches)\n first_epoch = tf.cast(epoch == 0, tf.float32)\n cosine_decay = cosine_schedule(epoch - 1)\n first_epoch_schedule = minibatch_progress * initial_learning_rate\n return first_epoch * first_epoch_schedule + (1 - first_epoch) * cosine_decay\n return compute \n\n\[email protected]\ndef mask_reduce_sum_over_batch(values, mask):\n batch_size = tf.cast(tf.shape(values)[0], tf.float32)\n if len(tf.shape(values)) == 3:\n mask = tf.expand_dims(mask, -1)\n masked_values = tf.where(mask, values, 0.0)\n return tf.reduce_sum(masked_values) / batch_size\n\n\ndef correct_predictions(gold_classes, gold_bboxes, predicted_classes, predicted_bboxes, iou_threshold=0.5):\n if len(gold_classes) != len(predicted_classes):\n return False\n \n used = [False] * len(gold_classes)\n for cls, bbox in zip(predicted_classes, predicted_bboxes):\n best = None\n for i in range(len(gold_classes)):\n if used[i] or gold_classes[i] != cls:\n continue\n iou = np_bbox_iou(bbox, gold_bboxes[i])\n if iou >= iou_threshold and (best is None or iou > best_iou):\n best, best_iou = i, iou\n if best is None:\n return False\n used[best] = True\n return True\n\n\n\n"
] |
[
[
"numpy.expand_dims",
"tensorflow.zeros",
"tensorflow.stack",
"tensorflow.reduce_sum",
"tensorflow.cast",
"tensorflow.minimum",
"tensorflow.keras.experimental.CosineDecay",
"numpy.concatenate",
"tensorflow.where",
"numpy.swapaxes",
"tensorflow.logical_or",
"numpy.arange",
"tensorflow.argmax",
"tensorflow.gather_nd",
"tensorflow.shape",
"tensorflow.exp",
"tensorflow.zeros_like",
"numpy.meshgrid",
"tensorflow.reduce_max",
"tensorflow.maximum",
"tensorflow.ones_like",
"tensorflow.expand_dims",
"tensorflow.math.log",
"numpy.vstack"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
HenriqueVarellaEhrenfried/U2GNN
|
[
"ade3d617ddd9cd8aadd310fcb33bc8b71fcae7cf"
] |
[
"U2GNN_pytorch/util.py"
] |
[
"import networkx as nx\nimport numpy as np\nimport random\nimport scipy.sparse as sp\nfrom sklearn.model_selection import StratifiedKFold\n\n\"\"\"Adapted from https://github.com/weihua916/powerful-gnns/blob/master/util.py\"\"\"\n\nclass S2VGraph(object):\n def __init__(self, g, label, node_tags=None, node_features=[]):\n '''\n g: a networkx graph\n label: an integer graph label\n node_tags: a list of integer node tags\n node_features: a torch float tensor, one-hot representation of the tag that is used as input to neural nets\n edge_mat: a torch long tensor, contain edge list, will be used to create torch sparse tensor\n neighbors: list of neighbors (without self-loop)\n '''\n self.label = label\n self.g = g\n self.node_tags = node_tags\n self.neighbors = []\n self.node_features = 0 if node_features == [] else node_features\n self.edge_mat = 0\n self.max_neighbor = 0\n\n\ndef load_data(dataset, degree_as_tag):\n '''\n dataset: name of dataset\n test_proportion: ratio of test train split\n seed: random seed for random splitting of dataset\n '''\n\n print('loading data')\n g_list = []\n label_dict = {}\n feat_dict = {}\n\n with open('../dataset/%s/%s.txt' % (dataset, dataset), 'r') as f:\n n_g = int(f.readline().strip())\n for i in range(n_g):\n row = f.readline().strip().split()\n n, l = [int(w) for w in row]\n if not l in label_dict:\n mapped = len(label_dict)\n label_dict[l] = mapped\n g = nx.Graph()\n node_tags = []\n node_features = []\n n_edges = 0\n for j in range(n):\n g.add_node(j)\n row = f.readline().strip().split()\n tmp = int(row[1]) + 2 # row size being read\n if tmp == len(row):\n # no node attributes\n row = [int(w) for w in row]\n attr = []\n else:\n attr = np.array([float(w) for w in row[tmp:]])\n row = [int(w) for w in row[:tmp]]\n if not row[0] in feat_dict:\n mapped = len(feat_dict)\n feat_dict[row[0]] = mapped\n node_tags.append(feat_dict[row[0]])\n\n # if tmp > len(row):\n if attr != []:\n node_features.append(attr)\n else:\n attr = None\n\n n_edges += row[1]\n for k in range(2, len(row)):\n g.add_edge(j, row[k])\n\n if node_features != []:\n node_features = np.stack(node_features)\n node_feature_flag = True\n else:\n node_features = None\n node_feature_flag = False\n\n assert len(g) == n\n\n # g_list.append(S2VGraph(g, l, node_tags))\n g_list.append(S2VGraph(g, l, node_tags, node_features))\n #add labels and edge_mat \n for g in g_list:\n g.neighbors = [[] for i in range(len(g.g))]\n for i, j in g.g.edges():\n g.neighbors[i].append(j)\n g.neighbors[j].append(i)\n degree_list = []\n for i in range(len(g.g)):\n g.neighbors[i] = g.neighbors[i]\n degree_list.append(len(g.neighbors[i]))\n g.max_neighbor = max(degree_list)\n\n g.label = label_dict[g.label]\n\n edges = [list(pair) for pair in g.g.edges()]\n edges.extend([[i, j] for j, i in edges])\n\n deg_list = list(dict(g.g.degree(range(len(g.g)))).values())\n\n # print(\"\\n\\n LEN(EDGES)\",len(edges))\n # print(\"EDGES > \", edges)\n g.edge_mat = np.transpose(np.array(edges, dtype=np.int32), (1,0))\n\n if degree_as_tag:\n for g in g_list:\n g.node_tags = list(dict(g.g.degree).values())\n\n #Extracting unique tag labels \n tagset = set([])\n for g in g_list:\n tagset = tagset.union(set(g.node_tags))\n\n tagset = list(tagset)\n tag2index = {tagset[i]:i for i in range(len(tagset))}\n\n\n for g in g_list:\n # if g.node_features == []: \n if not node_feature_flag:\n g.node_features = np.zeros((len(g.node_tags), len(tagset)), dtype=np.float32)\n g.node_features[range(len(g.node_tags)), [tag2index[tag] for tag in g.node_tags]] = 1\n\n\n print('# classes: %d' % len(label_dict))\n print('# maximum node tag: %d' % len(tagset))\n\n print(\"# data: %d\" % len(g_list))\n\n return g_list, len(label_dict)\n\ndef separate_data(graph_list, fold_idx, seed=0):\n assert 0 <= fold_idx and fold_idx < 10, \"fold_idx must be from 0 to 9.\"\n skf = StratifiedKFold(n_splits=10, shuffle=True, random_state=seed)\n\n labels = [graph.label for graph in graph_list]\n idx_list = []\n for idx in skf.split(np.zeros(len(labels)), labels):\n idx_list.append(idx)\n train_idx, test_idx = idx_list[fold_idx]\n\n train_graph_list = [graph_list[i] for i in train_idx]\n test_graph_list = [graph_list[i] for i in test_idx]\n\n return train_graph_list, test_graph_list\n\n\"\"\"Get indexes of train and test sets\"\"\"\ndef separate_data_idx(graph_list, fold_idx, seed=0):\n assert 0 <= fold_idx and fold_idx < 10, \"fold_idx must be from 0 to 9.\"\n skf = StratifiedKFold(n_splits=10, shuffle=True, random_state=seed)\n\n labels = [graph.label for graph in graph_list]\n idx_list = []\n for idx in skf.split(np.zeros(len(labels)), labels):\n idx_list.append(idx)\n train_idx, test_idx = idx_list[fold_idx]\n\n return train_idx, test_idx\n\n\"\"\"Convert sparse matrix to tuple representation.\"\"\"\ndef sparse_to_tuple(sparse_mx):\n def to_tuple(mx):\n if not sp.isspmatrix_coo(mx):\n mx = mx.tocoo()\n coords = np.vstack((mx.row, mx.col)).transpose()\n values = mx.data\n shape = mx.shape\n return coords, values, shape\n\n if isinstance(sparse_mx, list):\n for i in range(len(sparse_mx)):\n sparse_mx[i] = to_tuple(sparse_mx[i])\n else:\n sparse_mx = to_tuple(sparse_mx)\n\n return sparse_mx"
] |
[
[
"scipy.sparse.isspmatrix_coo",
"numpy.stack",
"sklearn.model_selection.StratifiedKFold",
"numpy.array",
"numpy.vstack"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
}
] |
mahnooranjum/Tensorflow_DeepLearning
|
[
"65ab178d4c17efad01de827062d5c85bdfb9b1ca",
"65ab178d4c17efad01de827062d5c85bdfb9b1ca"
] |
[
"Tensorflow_2X_PythonFiles/demo59_gradientdescentsigmoid.py",
"Tensorflow_2X_PythonFiles/demo52_rnn_moviereviews.py"
] |
[
"# -*- coding: utf-8 -*-\n\"\"\"Demo59_GradientDescentSigmoid.ipynb\n\n# **Delve Deeper**\n\nWe need sound conceptual foundation to be good Machine Learning Artists\n\n## Leggo\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport tensorflow as tf\nprint(tf.__version__)\n\ndef sigmoid(x):\n return 1/(1+np.exp(-x))\n\ndef sigmoid_hat(x):\n return sigmoid(x) * (1 - sigmoid(x))\n\nfrom sklearn.datasets.samples_generator import make_moons\nfrom sklearn.datasets.samples_generator import make_circles\nfrom sklearn.datasets.samples_generator import make_blobs\n# generate 2d classification dataset\nn = 500\nX, y = make_moons(n_samples=n, noise=0.1)\n# scatter plot, dots colored by class value\ndf = pd.DataFrame(dict(x=X[:,0], y=X[:,1], label=y))\ncolors = {0:'red', 1:'blue'}\nfig, ax = plt.subplots()\ngrouped = df.groupby('label')\nfor key, group in grouped:\n group.plot(ax=ax, kind='scatter', x='x', y='y', label=key, color=colors[key])\nplt.show()\n\ndatadict = {'X1': X[:,0],'X2' : X[:,1], 'target': y}\ndata = pd.DataFrame(data=datadict)\n\ndof = 2\n# Initialize weights\nweights = np.random.normal(scale= 1/dof**.5, size=dof)\nprint(weights)\nepochs = 2000\nlr = 0.2\n\nX = data.iloc[:, [0,1]].values\nY = data.iloc[:, 2].values\n\nfor e in range(epochs):\n delta_w = np.zeros(weights.shape)\n for x, y in zip(X, Y):\n pred = sigmoid(np.dot(x, weights))\n error = y - pred\n\n sigma = error * pred * (1 - pred)\n\n # error x gradient x inputs\n delta_w += sigma * x\n\n weights += lr * delta_w / n\n\n\n if e % (epochs / 20) == 0:\n Y_pred = sigmoid(np.dot(X, weights))\n loss = np.mean((Y_pred - Y) ** 2)\n print(\"Train loss: \", loss)\n\nY_pred = sigmoid(np.dot(X, weights))\nY_pred = Y_pred > 0.5\n\nfrom matplotlib.colors import ListedColormap\nX_set, y_set = X, Y_pred\nX1, X2 = np.meshgrid(np.arange(start = X_set[:, 0].min() - 1, stop = X_set[:, 0].max() + 1, step = 0.1),\n np.arange(start = X_set[:, 1].min() - 1, stop = X_set[:, 1].max() + 1, step = 0.1))\nplt.xlim(X1.min(), X1.max())\nplt.ylim(X2.min(), X2.max())\nfor i, j in enumerate(np.unique(y_set)):\n plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1],\n c = ListedColormap(('red', 'blue'))(i), label = j)\nplt.title('Output')\nplt.xlabel('X')\nplt.ylabel('y')\nplt.legend()\nplt.show()",
"# -*- coding: utf-8 -*-\n\"\"\"Demo52_RNN_MovieReviews.ipynb\n\n\n# **Spit some [tensor] flow**\n\nWe need to learn the intricacies of tensorflow to master deep learning\n\n`Let's get this over with`\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport tensorflow as tf\nimport cv2\nprint(tf.__version__)\n\n\"\"\"## Look at the following equations \n\nWell now we're going to go into the details of the implementation; \n\n```\nx_pred_t = w0 + w1 * x_t-1 + w2 * x_t-2 + w3 * x_t-3 + w4 * x_t-4\n\nx_pred_t+1 = w0 + w1 * x_pred_t + w2 * x_t-1 + w3 * x_t-2 + w4 * x_t-3\n```\n\nThis is an autoregressive model now. \n\n```\nx_pred_5 = w0 + w1 * x_4 + w2 * x_3 + w3 * x_2 + w4 * x_1\n\nx_pred_6 = w0 + w1 * x_pred_5 + w2 * x_4 + w3 * x_3 + w4 * x_2\n\n```\n\nWe know that the predictions at time T in an RNN depend on all the previous times. Thus, indirectly, y(t) depends on x(t), x(t-1), x(t-2),....., x(2), x(1)\n\nTo optimize the weights, we must take the derivative of the equation containing the weights; however, there will be numerous W_input_to_hidden weights in the network. \n\n\nThe RNN keeps nesting the older timestamps; the derivatives use multiplication and chain rule in composite functions, thus, the more older the timestamp, the more its gradient vanishes.\n\n## So how do GRU's work?\n\nThrowback to the RNN equation: \n\nh(t) = activation(W(x,h) x(t) + W(h,h) h(t-1) + b(h))\n\nGRU's calulate two other things to calculate h(t):\n\n- update gate vector z(t)\n\n- reset gate vector r(t)\n\nz(t) = sigmoid(W(x,z) x(t) + W(h,z) h(t-1) + b(z))\n\nr(t) = sigmoid(W(x,r) x(t) + W(h,r) h(t-1) + b(r))\n\n```\nh(t) = (1 - z(t)) * h(t-1) +\n\n z(t) tanh(W(x,h) x(t) + W(h,h) (r(t) * h(t-1)) + b(h))\n```\n\n(*) ==== element wise multiplication\n\nz(t), r(t), h(t) ===== Size U\n\n## Z(t) \n\nShould we take the new value of h(t)? or keep h(t-1)? \n\n- z(t) close to 0, take h(t-1)\n\n- z(t) close to 1, take h(t)\n\nSo: \n\n```\nh(t) = (1 - z(t)) * h(t-1) +\n\n z(t) tanh(W(x,h) x(t) + W(h,h) (r(t) * h(t-1)) + b(h))\n```\n\nBecomes \n\n```\nh(t) = (factor keep h(t-1)) * h(t-1) + \n\n (discard h(t-1)) * RNN(x(t), h(t-1))\n```\n\n## R(t) \n\nChange the value of h(t-1) \n\n- r(t) close to 0, zero value h(t-1)\n\n- r(t) close to 1, keep value h(t-1)\n\n\n## So how do LSTM's work?\n\n### https://towardsdatascience.com/\n\nWe add another state to the mix, the cell state c(t)\n\nwe add three different neurons:\n\nforget neuron = f(t) \n\nThis gate decides which information should be thrown away or kept. Input from h(t-1) and x(t) is passed through this gate; and it uses sigmoid to either forget (0) or remember (1) it.\n\n\ninput gate neuron = i(t)\n\nWe use this to update the cell state. We pass the h(t-1) and x(t) to the sigmoid function. This will decide which values will be updated in the cell state.\n\noutput gate neuron = o(t) \n\nThe output gate decides what the next hidden state h(t) should be. Remember that the hidden state contains information on previous inputs. The hidden state is also used for predictions. \n\nFirst, we pass the previous hidden state and the current input into a sigmoid function.\n\nThen we pass the newly modified cell state to the tanh function. \n\nWe multiply the tanh output with the sigmoid output to decide what information the hidden state should carry. \n\nThe output is the hidden state. \n\nThe new cell state and the new hidden is then carried over to the next time step.\n\n```\n\nf(t) = sigmoid ( W(x,f) x(t) + W(h, f)h(t-1) + b(f) )\n\ni(t) = sigmoid ( W(x,i) x(t) + W(h, i)h(t-1) + b(i) ) \n\no(t) = sigmoid ( W(x,o) x(t) + W(h, o)h(t-1) + b(o) ) \n\n\nc(t) = f(t) * c(t-1) + \n\n i(t) * tanh ( W(x,c) x(t) + W(h,c) h(t-1) + b(c) ) \n\n\nh(t) = o(t) * tanh( c(t) )\n\n```\n\n## So we OHE the last NLP problem, why not do the same and feed it to the neural network? Well because, features in a language, are not independent. \n\n\nLet's explore this: \n\nThe quick brown fox jumps over __________________\n\nSee you know the end of this sentence because you know the words right? \n\nwell wb this: \n\nover _____________________\n\nNow we don't know the end of this sentence. \n\nSo in tensorflow, to save computations, we have the embedding layer: \n\n### Step 1: Words to ints\n\nNothing deep about deep learning ----> 13 43 32 43 98\n\n### Step 2: Ints to word vector \n\n13 43 32 43 98 ------> [0.9, 1.2] [-0.4, 0.2] [0.3, 0.3] [-0.4, 0.2] [0.2, 0.5] \n\nT -----> T x D\n\n\n### We can use word2vec to make sure the embedding layer has similar words close to each other\n\"\"\"\n\nfrom tensorflow.keras.layers import Input, Dropout, Dense, Flatten, SimpleRNN, LSTM, GlobalMaxPooling1D, Embedding\nfrom tensorflow.keras.models import Model\nfrom tensorflow.keras.optimizers import SGD, Adam, Adamax\nfrom tensorflow.keras.preprocessing.text import Tokenizer\nfrom tensorflow.keras.preprocessing.sequence import pad_sequences\n\n\"\"\"## Let's import the dataset\"\"\"\n\ndef evaluation_tf(report, y_test, y_pred, classes):\n plt.plot(report.history['loss'], label = 'training_loss')\n plt.plot(report.history['val_loss'], label = 'validation_loss')\n plt.legend()\n plt.show()\n plt.plot(report.history['accuracy'], label = 'training_accuracy')\n plt.plot(report.history['val_accuracy'], label = 'validation_accuracy')\n plt.legend()\n plt.show()\n\n from sklearn.metrics import confusion_matrix\n import itertools\n cm = confusion_matrix(y_test, y_pred)\n\n plt.figure(figsize=(10,10))\n plt.imshow(cm, cmap=plt.cm.Blues)\n for i,j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i,j], 'd'),\n horizontalalignment = 'center',\n color='black')\n plt.xlabel(\"Predicted labels\")\n plt.ylabel(\"True labels\")\n plt.xticks(range(0,classes))\n plt.yticks(range(0,classes))\n plt.title('Confusion matrix')\n plt.colorbar()\n plt.show()\n\ndata = pd.read_csv(\"sample_data/train.tsv\", delimiter='\\t')\ndata.head()\n\nY = len(data.Sentiment.unique())\nprint(Y)\n\nX = data.iloc[:, 2]\ny = data.iloc[:, -1].values\n\n# TRAIN TEST SPLIT\nfrom sklearn.model_selection import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2)\n\nMAX_SIZE = 20000\ntokenizer = Tokenizer(num_words=MAX_SIZE)\ntokenizer.fit_on_texts(X_train)\nsequences_train = tokenizer.texts_to_sequences(X_train)\nsequences_test = tokenizer.texts_to_sequences(X_test)\n\nword2index = tokenizer.word_index\nV = len(word2index)\nprint(\"tokens = \" + str(V))\n\nX_train = pad_sequences(sequences_train)\n\nX_test = pad_sequences(sequences_test, maxlen=X_train.shape[1])\n\nprint(X_train.shape)\nprint(X_test.shape)\nprint(y_train.shape)\nprint(y_test.shape)\n\nN, T = X_train.shape\n\n# Let's talk about D, what is it and how do we set it? \n# This is the dimensionality of the embedding layer, essentially the vector that each word becomes \n\nD = 10 \n# hidden units\nU = 20\n\ni_layer = Input(shape = (T,))\nh_layer = Embedding(V+1, D)(i_layer)\n# V+1 because https://github.com/tensorflow/tensorflow/issues/38619\nh_layer = LSTM(U,return_sequences=True)(h_layer)\nh_layer = GlobalMaxPooling1D()(h_layer)\no_layer = Dense(Y, activation='softmax')(h_layer)\n\nmodel = Model(i_layer, o_layer)\n\nmodel.compile(\n optimizer='adam',\n loss='sparse_categorical_crossentropy',\n metrics=['accuracy'])\n\nreport = model.fit(X_train, y_train, epochs = 20, validation_data=(X_test, y_test))\n\nplt.plot(report.history['loss'], label='training_loss')\nplt.plot(report.history['val_loss'], label='validation_loss')\nplt.legend()\n\n\"\"\"## Seems like the model is overfitting, let's tune it\"\"\"\n\n# Let's talk about D, what is it and how do we set it? \n# This is the dimensionality of the embedding layer, essentially the vector that each word becomes \n\nD = 15\n# hidden units\nU = 30\n\ni_layer = Input(shape = (T,))\nh_layer = Embedding(V+1, D)(i_layer)\n# V+1 because https://github.com/tensorflow/tensorflow/issues/38619\n\nh_layer = LSTM(U,return_sequences=True)(h_layer)\nh_layer = GlobalMaxPooling1D()(h_layer)\nh_layer = Dense(5, activation='relu')(h_layer)\nh_layer = Dropout(0.3)(h_layer)\no_layer = Dense(Y, activation='softmax')(h_layer)\n\nmodel = Model(i_layer, o_layer)\n\nmodel.compile(\n optimizer='adam',\n loss='sparse_categorical_crossentropy',\n metrics=['accuracy'])\n\nreport = model.fit(X_train, y_train, epochs = 10, validation_data=(X_test, y_test))\n\nplt.plot(report.history['loss'], label='training_loss')\nplt.plot(report.history['val_loss'], label='validation_loss')\nplt.legend()\n\nprint(X_train.shape)\nprint(X_test.shape)\n\ny_pred = model.predict(X_test).argmax(axis=1)\n\nevaluation_tf(report, y_test, y_pred, Y)"
] |
[
[
"matplotlib.pyplot.legend",
"numpy.dot",
"matplotlib.pyplot.title",
"numpy.unique",
"matplotlib.pyplot.subplots",
"pandas.DataFrame",
"numpy.random.normal",
"numpy.mean",
"matplotlib.colors.ListedColormap",
"numpy.exp",
"matplotlib.pyplot.xlabel",
"sklearn.datasets.samples_generator.make_moons",
"numpy.zeros",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel"
],
[
"matplotlib.pyplot.legend",
"matplotlib.pyplot.imshow",
"sklearn.metrics.confusion_matrix",
"matplotlib.pyplot.plot",
"tensorflow.keras.layers.Dropout",
"pandas.read_csv",
"tensorflow.keras.layers.Embedding",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.title",
"tensorflow.keras.models.Model",
"tensorflow.keras.layers.GlobalMaxPooling1D",
"tensorflow.keras.layers.Dense",
"sklearn.model_selection.train_test_split",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel",
"tensorflow.keras.preprocessing.text.Tokenizer",
"matplotlib.pyplot.colorbar",
"tensorflow.keras.layers.LSTM",
"matplotlib.pyplot.xlabel",
"tensorflow.keras.preprocessing.sequence.pad_sequences",
"tensorflow.keras.layers.Input"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"2.6",
"2.4",
"2.3",
"2.5",
"2.2"
]
}
] |
LanceaKing/kaldi
|
[
"eb205a83f08fb8056ba1deb03c505ec8b722d4d9",
"eb205a83f08fb8056ba1deb03c505ec8b722d4d9"
] |
[
"egs/chime5/s5b/local/extract_noises.py",
"egs/wsj/s5/steps/segmentation/internal/get_default_targets_for_out_of_segments.py"
] |
[
"#!/usr/bin/env python3\n\nimport argparse\nimport json\nimport logging\nimport os\nimport sys\nimport scipy.io.wavfile as siw\nimport math\nimport numpy as np\n\n\ndef get_args():\n parser = argparse.ArgumentParser(\n \"\"\"Extract noises from the corpus based on the non-speech regions.\n e.g. {} /export/corpora4/CHiME5/audio/train/ \\\\\n /export/corpora4/CHiME5/transcriptions/train/ \\\\\n /export/b05/zhiqiw/noise/\"\"\".format(sys.argv[0]))\n\n parser.add_argument(\"--segment-length\", default=20)\n parser.add_argument(\"audio_dir\", help=\"\"\"Location of the CHiME5 Audio files. e.g. /export/corpora4/CHiME5/audio/train/\"\"\")\n parser.add_argument(\"trans_dir\", help=\"\"\"Location of the CHiME5 Transcriptions. e.g. /export/corpora4/CHiME5/transcriptions/train/\"\"\")\n parser.add_argument(\"audio_list\", help=\"\"\"List of ids of the CHiME5 recordings from which noise is extracted. e.g. local/distant_audio_list\"\"\")\n parser.add_argument(\"out_dir\", help=\"Output directory to write noise files. e.g. /export/b05/zhiqiw/noise/\")\n\n args = parser.parse_args()\n return args\n\n\ndef Trans_time(time, fs):\n units = time.split(':')\n time_second = float(units[0]) * 3600 + float(units[1]) * 60 + float(units[2])\n return int(time_second*fs)\n\n\ndef Get_time(conf, tag, mic, fs):\n for i in conf:\n st = Trans_time(i['start_time'][mic], fs)\n ed = Trans_time(i['end_time'][mic], fs)\n tag[st:ed] = 0\n return tag\n\n\ndef write_noise(out_dir, seg, audio, sig, tag, fs, cnt):\n sig_noise = sig[np.nonzero(tag)]\n for i in range(math.floor(len(sig_noise)/(seg*fs))):\n siw.write(out_dir +'/noise'+str(cnt)+'.wav', fs, sig_noise[i*seg*fs:(i+1)*seg*fs])\n cnt += 1\n return cnt\n\n\ndef main():\n args = get_args()\n\n if not os.path.exists(args.out_dir):\n os.makedirs(args.out_dir)\n\n wav_list = open(args.audio_list).readlines()\n\n cnt = 1\n for i, audio in enumerate(wav_list):\n parts = audio.strip().split('.')\n if len(parts) == 2:\n # Assuming distant mic with name like S03_U01.CH1\n session, mic = parts[0].split('_')\n channel = parts[1]\n base_name = session + \"_\" + mic + \".\" + channel\n else:\n # Assuming close talk mic with name like S03_P09\n session, mic = audio.strip().split('_')\n base_name = session + \"_\" + mic\n fs, sig = siw.read(args.audio_dir + \"/\" + base_name + '.wav')\n tag = np.ones(len(sig))\n if i == 0 or session != session_p:\n with open(args.trans_dir + \"/\" + session + '.json') as f:\n conf = json.load(f)\n tag = Get_time(conf, tag, mic, fs)\n cnt = write_noise(args.out_dir, args.segment_length, audio, sig, tag, fs, cnt)\n session_p = session\n\n\nif __name__ == '__main__':\n main()\n",
"#!/usr/bin/env python\n\n# Copyright 2017 Vimal Manohar\n# Apache 2.0\n\n\"\"\"\nThis script gets targets for the whole recording\nby adding 'default_targets' vector read from file specified by\n--default-targets option for the out-of-segments regions and\nzeros for all other frames. See steps/segmentation/lats_to_targets.sh\nfor details about the targets matrix.\nBy default, the 'default_targets' would be [ 1 0 0 ], which means all\nthe out-of-segment regions are assumed as silence. But depending, on\nthe application and data, this could be [ 0 0 0 ] or [ 0 0 1 ] or\nsomething with fractional weights.\n\"\"\"\nfrom __future__ import division\n\nimport argparse\nimport logging\nimport numpy as np\nimport subprocess\nimport sys\n\nsys.path.insert(0, 'steps')\nimport libs.common as common_lib\n\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.INFO)\nhandler = logging.StreamHandler()\nhandler.setLevel(logging.INFO)\nformatter = logging.Formatter(\"%(asctime)s [%(pathname)s:%(lineno)s - \"\n \"%(funcName)s - %(levelname)s ] %(message)s\")\nhandler.setFormatter(formatter)\nlogger.addHandler(handler)\n\n\ndef get_args():\n parser = argparse.ArgumentParser(\n description=\"\"\"This script gets targets for the whole recording\n by adding 'default_targets' vector read from file specified by\n --default-targets option for the out-of-segments regions and\n zeros for all other frames. See steps/segmentation/lats_to_targets.sh\n for details about the targets matrix.\n By default, the 'default_targets' would be [ 1 0 0 ], which means all\n the out-of-segment regions are assumed as silence. But depending, on\n the application and data, this could be [ 0 0 0 ] or [ 0 0 1 ] or\n something with fractional weights.\n \"\"\")\n\n parser.add_argument(\"--frame-shift\", type=float, default=0.01,\n help=\"Frame shift value in seconds\")\n parser.add_argument(\"--default-targets\", type=str, default=None,\n action=common_lib.NullstrToNoneAction,\n help=\"Vector of default targets for out-of-segments \"\n \"region\")\n parser.add_argument(\"--length-tolerance\", type=int, default=2,\n help=\"Tolerate length mismatches of this many frames\")\n parser.add_argument(\"--verbose\", type=int, default=0, choices=[0,1,2],\n help=\"Verbose level\")\n\n parser.add_argument(\"--reco2num-frames\", type=str, required=True,\n action=common_lib.NullstrToNoneAction,\n help=\"\"\"The number of frames per reco\n is used to determine the num-rows of the output matrix\n \"\"\")\n parser.add_argument(\"reco2utt\", type=str,\n help=\"\"\"reco2utt file.\n The format is <reco> <utt-1> <utt-2> ... <utt-N>\"\"\")\n parser.add_argument(\"segments\", type=str,\n help=\"Input kaldi segments file\")\n parser.add_argument(\"out_targets_ark\", type=str,\n help=\"\"\"Output archive to which the\n recording-level matrix will be written in text\n format\"\"\")\n\n args = parser.parse_args()\n\n if args.frame_shift < 0.0001 or args.frame_shift > 1:\n raise ValueError(\"--frame-shift should be in [0.0001, 1]; got {0}\"\n \"\".format(args.frame_shift))\n\n if args.verbose >= 2:\n logger.setLevel(logging.DEBUG)\n handler.setLevel(logging.DEBUG)\n\n return args\n\n\ndef run(args):\n reco2utt = {}\n with common_lib.smart_open(args.reco2utt) as f:\n for line in f:\n parts = line.strip().split()\n if len(parts) < 2:\n raise ValueError(\"Could not parse line {0}\".format(line))\n reco2utt[parts[0]] = parts[1:]\n\n reco2num_frames = {}\n with common_lib.smart_open(args.reco2num_frames) as f:\n for line in f:\n parts = line.strip().split()\n if len(parts) != 2:\n raise ValueError(\"Could not parse line {0}\".format(line))\n if parts[0] not in reco2utt:\n continue\n reco2num_frames[parts[0]] = int(parts[1])\n\n segments = {}\n with common_lib.smart_open(args.segments) as f:\n for line in f:\n parts = line.strip().split()\n if len(parts) not in [4, 5]:\n raise ValueError(\"Could not parse line {0}\".format(line))\n utt = parts[0]\n reco = parts[1]\n if reco not in reco2utt:\n continue\n start_time = float(parts[2])\n end_time = float(parts[3])\n segments[utt] = [reco, start_time, end_time]\n\n num_utt_err = 0\n num_utt = 0\n num_reco = 0\n\n if args.default_targets is not None:\n default_targets = np.matrix(common_lib.read_matrix_ascii(args.default_targets))\n else:\n default_targets = np.matrix([[1, 0, 0]])\n assert (np.shape(default_targets)[0] == 1\n and np.shape(default_targets)[1] == 3)\n\n with common_lib.smart_open(args.out_targets_ark, 'w') as f:\n for reco, utts in reco2utt.items():\n reco_mat = np.repeat(default_targets, reco2num_frames[reco],\n axis=0)\n utts.sort(key=lambda x: segments[x][1]) # sort on start time\n for i, utt in enumerate(utts):\n if utt not in segments:\n num_utt_err += 1\n continue\n segment = segments[utt]\n\n start_frame = int(segment[1] / args.frame_shift)\n end_frame = int(segment[2] / args.frame_shift)\n num_frames = end_frame - start_frame\n\n if end_frame > reco2num_frames[reco]:\n end_frame = reco2num_frames[reco]\n num_frames = end_frame - start_frame\n\n reco_mat[start_frame:end_frame] = np.zeros([num_frames, 3])\n num_utt += 1\n\n if reco_mat.shape[0] > 0:\n common_lib.write_matrix_ascii(f, reco_mat.tolist(),\n key=reco)\n num_reco += 1\n\n logger.info(\"Got default out-of-segment targets for {num_reco} recordings \"\n \"containing {num_utt} in-segment regions; \"\n \"failed to account {num_utt_err} utterances\"\n \"\".format(num_reco=num_reco, num_utt=num_utt,\n num_utt_err=num_utt_err))\n\n if num_utt == 0 or num_utt_err > num_utt // 2 or num_reco == 0:\n raise RuntimeError\n\n\ndef main():\n args = get_args()\n try:\n run(args)\n except Exception:\n raise\n\n\nif __name__ == \"__main__\":\n main()\n"
] |
[
[
"scipy.io.wavfile.read",
"numpy.nonzero"
],
[
"numpy.matrix",
"numpy.repeat",
"numpy.shape",
"numpy.zeros"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
vstark21/SDC_in_simulator
|
[
"18e4cd26de7dcebfa33c0d41e1853400753b5b6b"
] |
[
"Track-1/utils.py"
] |
[
"import tensorflow as tf\nimport numpy as np\nimport csv\nimport cv2\nfrom scipy import ndimage\nimport os\nimport sklearn\nfrom sklearn.model_selection import train_test_split\n\nglobal SOURCE_PATH, EPOCHS\nSOURCE_PATH = 'new_data2/'\nEPOCHS = 1\n\ndef create_model():\n \n print(\"Creating Model\")\n\n # Ask to create a new model or use an existing one\n\n if input(\"Want a new model[y/n] : \") == 'n' and 'model.h5' in os.listdir():\n print(\"-------------------------------- Loading previous model --------------------------------\")\n model = tf.keras.models.load_model('model.h5')\n \n else:\n print(\"-------------------------------- Creating new model --------------------------------\")\n model = tf.keras.models.Sequential([\n tf.keras.layers.Lambda(lambda x: x / 255.0 - 0.5, input_shape=(160, 320, 3)),\n tf.keras.layers.Cropping2D(cropping=((50, 20), (0, 0))),\n tf.keras.layers.Conv2D(6, (5, 5), activation='relu'),\n tf.keras.layers.MaxPooling2D(),\n tf.keras.layers.Conv2D(6, (3, 3), activation='relu'),\n tf.keras.layers.MaxPooling2D(),\n tf.keras.layers.Flatten(),\n tf.keras.layers.Dense(120),\n tf.keras.layers.Dense(84),\n tf.keras.layers.Dense(1)])\n\n model.compile(loss='mse', optimizer='adam')\n return model\n \n\ndef generator(samples, batch_size=32):\n\n num_samples = len(samples)\n np.random.shuffle(samples)\n\n while 1: # Loop forever so the generator never terminates\n \n for offset in range(0, num_samples, batch_size):\n\n batch_samples = samples[offset:offset+batch_size]\n\n images = []\n measurements = []\n\n for batch_sample in batch_samples:\n\n source_path = batch_sample[0][batch_sample[0].index('IMG'):]\n filename = SOURCE_PATH + source_path\n image = cv2.imread(filename)[:, :, ::-1]\n images.append(image)\n measurements.append(float(batch_sample[3]))\n\n X_train = np.array(images)\n y_train = np.array(measurements)\n\n yield sklearn.utils.shuffle(X_train, y_train)\n\ndef train_model(model):\n \n global SOURCE_PATH, EPOCHS\n\n samples = []\n print(\"Reading CSV\")\n with open(SOURCE_PATH + 'driving_log.csv') as csv_file:\n reader = csv.reader(csv_file)\n for line in reader:\n samples.append(line)\n \n train_samples, validation_samples = train_test_split(samples, test_size=0.2)\n\n train_generator = generator(train_samples, batch_size=512)\n validation_generator = generator(validation_samples, batch_size=512)\n \n try:\n for epoch in range(EPOCHS):\n \n print(f\"In Epoch : {epoch}\")\n \n x_train, y_train = next(train_generator)\n x_val, y_val = next(validation_generator)\n\n model.fit(x_train, y_train, steps_per_epoch=len(x_train), validation_data=(x_val, y_val), epochs=1)\n except KeyboardInterrupt:\n pass\n \n model.save('model.h5')"
] |
[
[
"tensorflow.keras.models.load_model",
"tensorflow.keras.layers.Lambda",
"tensorflow.keras.layers.Dense",
"sklearn.utils.shuffle",
"tensorflow.keras.layers.Conv2D",
"sklearn.model_selection.train_test_split",
"numpy.random.shuffle",
"tensorflow.keras.layers.MaxPooling2D",
"tensorflow.keras.layers.Cropping2D",
"numpy.array",
"tensorflow.keras.layers.Flatten"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"2.6",
"2.4",
"2.3",
"2.5",
"2.2"
]
}
] |
nyu-dl/dl4mt-nonauto
|
[
"f737794c3ca48ec146e82c387923406130f4b602"
] |
[
"utils.py"
] |
[
"import math\nimport ipdb\nimport torch\nimport random\nimport numpy as np\nimport _pickle as pickle\nimport revtok\nimport os\nfrom itertools import groupby\nimport getpass\nfrom collections import Counter\n\nfrom torch.autograd import Variable\nfrom torchtext import data, datasets\nfrom nltk.translate.gleu_score import sentence_gleu, corpus_gleu\nfrom nltk.translate.bleu_score import closest_ref_length, brevity_penalty, modified_precision, SmoothingFunction\nfrom contextlib import ExitStack\nfrom collections import OrderedDict\nimport fractions\n\nimport torchvision\n\ntry:\n fractions.Fraction(0, 1000, _normalize=False)\n from fractions import Fraction\nexcept TypeError:\n from nltk.compat import Fraction\n\ndef sentence_bleu(references, hypothesis, weights=(0.25, 0.25, 0.25, 0.25),\n smoothing_function=None, auto_reweigh=False,\n emulate_multibleu=False):\n\n return corpus_bleu([references], [hypothesis],\n weights, smoothing_function, auto_reweigh,\n emulate_multibleu)\n\n\ndef corpus_bleu(list_of_references, hypotheses, weights=(0.25, 0.25, 0.25, 0.25),\n smoothing_function=None, auto_reweigh=False,\n emulate_multibleu=False):\n p_numerators = Counter() # Key = ngram order, and value = no. of ngram matches.\n p_denominators = Counter() # Key = ngram order, and value = no. of ngram in ref.\n hyp_lengths, ref_lengths = 0, 0\n\n if len(list_of_references) != len(hypotheses):\n print (\"The number of hypotheses and their reference(s) should be the same\")\n return (0, (0, 0, 0, 0), 0, 0, 0)\n\n # Iterate through each hypothesis and their corresponding references.\n for references, hypothesis in zip(list_of_references, hypotheses):\n # For each order of ngram, calculate the numerator and\n # denominator for the corpus-level modified precision.\n for i, _ in enumerate(weights, start=1):\n p_i = modified_precision(references, hypothesis, i)\n p_numerators[i] += p_i.numerator\n p_denominators[i] += p_i.denominator\n\n # Calculate the hypothesis length and the closest reference length.\n # Adds them to the corpus-level hypothesis and reference counts.\n hyp_len = len(hypothesis)\n hyp_lengths += hyp_len\n ref_lengths += closest_ref_length(references, hyp_len)\n\n # Calculate corpus-level brevity penalty.\n bp = brevity_penalty(ref_lengths, hyp_lengths)\n\n # Uniformly re-weighting based on maximum hypothesis lengths if largest\n # order of n-grams < 4 and weights is set at default.\n if auto_reweigh:\n if hyp_lengths < 4 and weights == (0.25, 0.25, 0.25, 0.25):\n weights = ( 1 / hyp_lengths ,) * hyp_lengths\n\n # Collects the various precision values for the different ngram orders.\n p_n = [Fraction(p_numerators[i], p_denominators[i], _normalize=False)\n for i, _ in enumerate(weights, start=1)]\n\n p_n_ = [xx.numerator / xx.denominator * 100 for xx in p_n]\n\n # Returns 0 if there's no matching n-grams\n # We only need to check for p_numerators[1] == 0, since if there's\n # no unigrams, there won't be any higher order ngrams.\n if p_numerators[1] == 0:\n return (0, (0, 0, 0, 0), 0, 0, 0)\n\n # If there's no smoothing, set use method0 from SmoothinFunction class.\n if not smoothing_function:\n smoothing_function = SmoothingFunction().method0\n # Smoothen the modified precision.\n # Note: smoothing_function() may convert values into floats;\n # it tries to retain the Fraction object as much as the\n # smoothing method allows.\n p_n = smoothing_function(p_n, references=references, hypothesis=hypothesis,\n hyp_len=hyp_len, emulate_multibleu=emulate_multibleu)\n s = (w * math.log(p_i) for i, (w, p_i) in enumerate(zip(weights, p_n)))\n s = bp * math.exp(math.fsum(s)) * 100\n final_bleu = round(s, 4) if emulate_multibleu else s\n return (final_bleu, p_n_, bp, ref_lengths, hyp_lengths)\n\nINF = 1e10\nTINY = 1e-9\ndef computeGLEU(outputs, targets, corpus=False, tokenizer=None):\n if tokenizer is None:\n tokenizer = revtok.tokenize\n\n outputs = [tokenizer(o) for o in outputs]\n targets = [tokenizer(t) for t in targets]\n\n if not corpus:\n return torch.Tensor([sentence_gleu(\n [t], o) for o, t in zip(outputs, targets)])\n return corpus_gleu([[t] for t in targets], [o for o in outputs])\n\ndef computeBLEU(outputs, targets, corpus=False, tokenizer=None):\n if tokenizer is None:\n tokenizer = revtok.tokenize\n\n outputs = [tokenizer(o) for o in outputs]\n targets = [tokenizer(t) for t in targets]\n\n if corpus:\n return corpus_bleu([[t] for t in targets], [o for o in outputs], emulate_multibleu=True)\n else:\n return [sentence_bleu([t], o)[0] for o, t in zip(outputs, targets)]\n #return torch.Tensor([sentence_bleu([t], o)[0] for o, t in zip(outputs, targets)])\n\ndef computeBLEUMSCOCO(outputs, targets, corpus=True, tokenizer=None):\n # outputs is list of 5000 captions\n # targets is list of 5000 lists each length of 5\n if tokenizer is None:\n tokenizer = revtok.tokenize\n\n outputs = [tokenizer(o) for o in outputs]\n new_targets = []\n for i, t in enumerate(targets):\n new_targets.append([tokenizer(tt) for tt in t])\n #targets[i] = [tokenizer(tt) for tt in t]\n\n if corpus:\n return corpus_bleu(new_targets, outputs, emulate_multibleu=True)\n else:\n return [sentence_bleu(new_t, o)[0] for o, new_t in zip(outputs, new_targets)]\n\ndef compute_bp(hypotheses, list_of_references):\n hyp_lengths, ref_lengths = 0, 0\n for references, hypothesis in zip(list_of_references, hypotheses):\n hyp_len = len(hypothesis)\n hyp_lengths += hyp_len\n ref_lengths += closest_ref_length(references, hyp_len)\n\n # Calculate corpus-level brevity penalty.\n bp = brevity_penalty(ref_lengths, hyp_lengths)\n return bp\n\ndef computeGroupBLEU(outputs, targets, tokenizer=None, bra=10, maxmaxlen=80):\n if tokenizer is None:\n tokenizer = revtok.tokenize\n\n outputs = [tokenizer(o) for o in outputs]\n targets = [tokenizer(t) for t in targets]\n maxlens = max([len(t) for t in targets])\n print(maxlens)\n maxlens = min([maxlens, maxmaxlen])\n nums = int(np.ceil(maxlens / bra))\n outputs_buckets = [[] for _ in range(nums)]\n targets_buckets = [[] for _ in range(nums)]\n for o, t in zip(outputs, targets):\n idx = len(o) // bra\n if idx >= len(outputs_buckets):\n idx = -1\n outputs_buckets[idx] += [o]\n targets_buckets[idx] += [t]\n\n for k in range(nums):\n print(corpus_bleu([[t] for t in targets_buckets[k]], [o for o in outputs_buckets[k]], emulate_multibleu=True))\n\nclass TargetLength:\n def __init__(self, lengths=None): # data_type : sum, avg\n self.lengths = lengths if lengths != None else dict()\n\n def accumulate(self, batch):\n src_len = (batch.src != 1).sum(-1).cpu().data.numpy()\n trg_len = (batch.trg != 1).sum(-1).cpu().data.numpy()\n for (slen, tlen) in zip(src_len, trg_len):\n if not slen in self.lengths:\n self.lengths[slen] = (1, int(tlen))\n else:\n (count, acc) = self.lengths[slen]\n self.lengths[slen] = (count + 1, acc + int(tlen))\n\n def get_trg_len(self, src_len):\n if not src_len in self.lengths:\n return self.get_trg_len(src_len + 1) - 1\n else:\n (count, acc) = self.lengths[src_len]\n return acc / float(count)\n\ndef organise_trg_len_dic(trg_len_dic):\n trg_len_dic = {k:int(v[1]/float(v[0])) for (k, v) in trg_len_dic.items()}\n return trg_len_dic\n\ndef query_trg_len_dic(trg_len_dic, q):\n max_src_len = max(trg_len_dic.keys())\n if q <= max_src_len:\n if q in trg_len_dic:\n return trg_len_dic[q]\n else:\n return query_trg_len_dic(trg_len_dic, q+1) - 1\n else:\n return int(math.floor( trg_len_dic[max_src_len] / max_src_len * q ))\n\ndef make_decoder_masks(source_masks, trg_len_dic):\n batch_size, src_max_len = source_masks.size()\n src_len = (source_masks == 1).sum(-1).cpu().numpy()\n trg_len = [int(math.floor(query_trg_len_dic(trg_len_dic, src) * 1.1)) for src in src_len]\n trg_max_len = max(trg_len)\n decoder_masks = np.zeros((batch_size, trg_max_len))\n #decoder_masks = Variable(torch.zeros(batch_size, trg_max_len), requires_grad=False)\n for idx, tt in enumerate(trg_len):\n decoder_masks[idx][:tt] = 1\n result = torch.from_numpy(decoder_masks).float()\n if source_masks.is_cuda:\n result = result.cuda()\n return result\n\ndef double_source_masks(source_masks):\n batch_size, src_max_len = source_masks.size()\n src_len = (source_masks == 1).sum(-1).cpu().numpy()\n decoder_masks = np.zeros((batch_size, src_max_len * 2))\n for idx, tt in enumerate(src_len):\n decoder_masks[idx][:2*tt] = 1\n result = torch.from_numpy(decoder_masks).float()\n if source_masks.is_cuda:\n result = result.cuda()\n return result\n\nclass Metrics:\n\n def __init__(self, name, *metrics, data_type=\"sum\"): # data_type : sum, avg\n self.count = 0\n self.metrics = OrderedDict((metric, 0) for metric in metrics)\n self.name = name\n self.data_type = data_type\n\n def accumulate(self, count, *values, print_iter=None):\n self.count += count\n if print_iter is not None:\n print(print_iter, end=' ')\n for value, metric in zip(values, self.metrics):\n if isinstance(value, torch.autograd.Variable):\n value = value.data\n if torch.is_tensor(value):\n with torch.cuda.device_of(value):\n value = value.cpu()\n value = value.float().sum()\n\n if print_iter is not None:\n print('%.3f' % value, end=' ')\n if self.data_type == \"sum\":\n self.metrics[metric] += value\n elif self.data_type == \"avg\":\n self.metrics[metric] += value * count\n\n if print_iter is not None:\n print()\n return values[0] # loss\n\n def __getattr__(self, key):\n if key in self.metrics:\n return self.metrics[key] / (self.count + 1e-9)\n raise AttributeError\n\n def __repr__(self):\n return (\"{}: \".format(self.name) +\n \"[{}]\".format( ', '.join([\"{:.4f}\".format(getattr(self, metric)) for metric, value in self.metrics.items() if value is not 0 ] ) ) )\n\n def tensorboard(self, expt, i):\n for metric in self.metrics:\n value = getattr(self, metric)\n if value != 0:\n #expt.add_scalar_value(f'{self.name}_{metric}', value, step=i)\n expt.add_scalar_value(\"{}_{}\".format(self.name, metric), value, step=i)\n\n def reset(self):\n self.count = 0\n self.metrics.update({metric: 0 for metric in self.metrics})\n\nclass Best:\n def __init__(self, cmp_fn, *metrics, model=None, opt=None, path='', gpu=0, which=[0]):\n self.cmp_fn = cmp_fn\n self.model = model\n self.opt = opt\n self.path = path + '.pt'\n self.metrics = OrderedDict((metric, None) for metric in metrics)\n self.gpu = gpu\n self.which = which\n self.best_cmp_value = None\n\n def accumulate(self, *other_values):\n\n with torch.cuda.device(self.gpu):\n cmp_values = [other_values[which] for which in self.which]\n if self.best_cmp_value is None or \\\n self.cmp_fn(self.best_cmp_value, *cmp_values) != self.best_cmp_value:\n self.metrics.update( { metric: value for metric, value in zip(\n list(self.metrics.keys()), other_values) } )\n self.best_cmp_value = self.cmp_fn( [ list(self.metrics.items())[which][1] for which in self.which ] )\n\n #open(self.path + '.temp', 'w')\n if self.model is not None:\n torch.save(self.model.state_dict(), self.path)\n\n if self.opt is not None:\n torch.save([self.i, self.opt.state_dict()], self.path + '.states')\n #os.remove(self.path + '.temp')\n\n def __getattr__(self, key):\n if key in self.metrics:\n return self.metrics[key]\n raise AttributeError\n\n def __repr__(self):\n return (\"BEST: \" +\n ', '.join([\"{}: {:.4f}\".format(metric, getattr(self, metric)) for metric, value in self.metrics.items() if value is not 0]))\n\nclass CacheExample(data.Example):\n\n @classmethod\n def fromsample(cls, data_lists, names):\n ex = cls()\n for data, name in zip(data_lists, names):\n setattr(ex, name, data)\n return ex\n\n\nclass Cache:\n\n def __init__(self, size=10000, fileds=[\"src\", \"trg\"]):\n self.cache = []\n self.maxsize = size\n\n def demask(self, data, mask):\n with torch.cuda.device_of(data):\n data = [d[:l] for d, l in zip(data.data.tolist(), mask.sum(1).long().tolist())]\n return data\n\n def add(self, data_lists, masks, names):\n data_lists = [self.demask(d, m) for d, m in zip(data_lists, masks)]\n for data in zip(*data_lists):\n self.cache.append(CacheExample.fromsample(data, names))\n\n if len(self.cache) >= self.maxsize:\n self.cache = self.cache[-self.maxsize:]\n\n\nclass Batch:\n def __init__(self, src=None, trg=None, dec=None):\n self.src, self.trg, self.dec = src, trg, dec\n\ndef masked_sort(x, mask, dim=-1):\n x.data += ((1 - mask) * INF).long()\n y, i = torch.sort(x, dim)\n y.data *= mask.long()\n return y, i\n\ndef unsorted(y, i, dim=-1):\n z = Variable(y.data.new(*y.size()))\n z.scatter_(dim, i, y)\n return z\n\n\ndef merge_cache(decoding_path, names0, last_epoch=0, max_cache=20):\n file_lock = open(decoding_path + '/_temp_decode', 'w')\n\n for name in names0:\n filenames = []\n for i in range(max_cache):\n filenames.append('{}/{}.ep{}'.format(decoding_path, name, last_epoch - i))\n if (last_epoch - i) <= 0:\n break\n code = 'cat {} > {}.train.{}'.format(\" \".join(filenames), '{}/{}'.format(decoding_path, name), last_epoch)\n os.system(code)\n os.remove(decoding_path + '/_temp_decode')\n\ndef corrupt_target_fix(trg, decoder_masks, vocab_size, weight=0.1, cor_p=[0.1, 0.1, 0.1, 0.1]):\n batch_size, max_trg_len = trg.size() # actual trg len\n max_dec_len = decoder_masks.size(1) # 2 * actual src len\n dec_lens = (decoder_masks == 1).sum(-1).cpu().numpy()\n trg_lens = (trg != 1).sum(-1).data.cpu().numpy()\n\n num_corrupts = np.array( [ np.random.choice(dec_lens[bidx]//2,\n min( max( math.floor(weight * (dec_lens[bidx]//2)), 1 ), dec_lens[bidx]//2),\n replace=False ) \\\n for bidx in range(batch_size) ] )\n\n #min_len = min(max_trg_len, max_dec_len)\n decoder_input = np.ones((batch_size, max_dec_len))\n decoder_input.fill(3)\n #decoder_input[:, :min_len] = trg[:, :min_len].data.cpu().numpy()\n\n for bidx in range(batch_size):\n min_len = min(dec_lens[bidx], trg_lens[bidx])\n decoder_input[bidx][:min_len] = trg[bidx, :min_len].data.cpu().numpy()\n nr_list = num_corrupts[bidx]\n for nr in nr_list:\n\n prob = np.random.rand()\n\n #### each corruption changes multiple words\n if prob < sum(cor_p[:1]): # repeat\n decoder_input[bidx][nr+1:] = decoder_input[bidx][nr:-1]\n\n elif prob < sum(cor_p[:2]): # drop\n decoder_input[bidx][nr:-1] = decoder_input[bidx][nr+1:]\n\n #### each corruption changes one word\n elif prob < sum(cor_p[:3]): # replace word with random word\n decoder_input[bidx][nr] = np.random.randint(vocab_size-4) + 4\n\n #### each corruption changes two words\n elif prob < sum(cor_p[:4]): # swap\n temp = decoder_input[bidx][nr]\n decoder_input[bidx][nr] = decoder_input[bidx][nr+1]\n decoder_input[bidx][nr+1] = temp\n\n result = torch.from_numpy(decoder_input).long()\n if decoder_masks.is_cuda:\n result = result.cuda(decoder_masks.get_device())\n return Variable(result, requires_grad=False)\n\ndef corrupt_target(trg, decoder_masks, vocab_size, weight=0.1, cor_p=[0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1]):\n batch_size, max_trg_len = trg.size()\n max_dec_len = decoder_masks.size(1)\n dec_lens = (decoder_masks == 1).sum(-1).cpu().numpy()\n\n num_corrupts = np.array( [ np.random.choice(dec_lens[bidx]-1,\n min( max( math.floor(weight * dec_lens[bidx]), 1 ), dec_lens[bidx]-1 ),\n replace=False ) \\\n for bidx in range(batch_size) ] )\n\n min_len = min(max_trg_len, max_dec_len)\n decoder_input = np.ones((batch_size, max_dec_len))\n decoder_input.fill(3)\n decoder_input[:, :min_len] = trg[:, :min_len].data.cpu().numpy()\n\n for bidx in range(batch_size):\n nr_list = num_corrupts[bidx]\n for nr in nr_list:\n\n prob = np.random.rand()\n\n #### each corruption changes multiple words\n if prob < sum(cor_p[:1]): # repeat\n decoder_input[bidx][nr+1:] = decoder_input[bidx][nr:-1]\n\n elif prob < sum(cor_p[:2]): # drop\n decoder_input[bidx][nr:-1] = decoder_input[bidx][nr+1:]\n\n elif prob < sum(cor_p[:3]): # add random word\n decoder_input[bidx][nr+1:] = decoder_input[bidx][nr:-1]\n decoder_input[bidx][nr] = np.random.randint(vocab_size-4) + 4 # sample except UNK/PAD/INIT/EOS\n\n #### each corruption changes one word\n elif prob < sum(cor_p[:4]): # repeat and drop next\n decoder_input[bidx][nr+1] = decoder_input[bidx][nr]\n\n elif prob < sum(cor_p[:5]): # replace word with random word\n decoder_input[bidx][nr] = np.random.randint(vocab_size-4) + 4\n\n #### each corruption changes two words\n elif prob < sum(cor_p[:6]): # swap\n temp = decoder_input[bidx][nr]\n decoder_input[bidx][nr] = decoder_input[bidx][nr+1]\n decoder_input[bidx][nr+1] = temp\n\n elif prob < sum(cor_p[:7]): # global swap\n swap_idx = np.random.randint(1, dec_lens[bidx]-nr) + nr\n temp = decoder_input[bidx][nr]\n decoder_input[bidx][nr] = decoder_input[bidx][swap_idx]\n decoder_input[bidx][swap_idx] = temp\n\n result = torch.from_numpy(decoder_input).long()\n if decoder_masks.is_cuda:\n result = result.cuda(decoder_masks.get_device())\n return Variable(result, requires_grad=False)\n\ndef drop(sentence, n_d):\n cur_len = np.sum( sentence != 1 )\n for idx in range(n_d):\n drop_pos = random.randint(0, cur_len - 1) # a <= N <= b\n sentence[drop_pos:-1] = sentence[drop_pos+1:]\n cur_len = cur_len - 1\n sentence[-n_d:] = 1\n return sentence\n\ndef repeat(sentence, n_r):\n cur_len = np.sum( sentence != 1 )\n for idx in range(n_r):\n drop_pos = random.randint(0, cur_len) # a <= N <= b\n sentence[drop_pos+1:] = sentence[drop_pos:-1]\n sentence[cur_len:] = 1\n return sentence\n\ndef remove_repeats(lst_of_sentences):\n lst = []\n for sentence in lst_of_sentences:\n lst.append( \" \".join([x[0] for x in groupby(sentence.split())]) )\n return lst\n\ndef remove_repeats_tensor(tensor):\n tensor = tensor.data.cpu()\n newtensor = tensor.clone()\n batch_size, seq_len = tensor.size()\n for bidx in range(batch_size):\n for sidx in range(seq_len-1):\n if newtensor[bidx, sidx] == newtensor[bidx, sidx+1]:\n newtensor[bidx, sidx:-1] = newtensor[bidx, sidx+1:]\n return Variable(newtensor)\n\ndef mkdir(path):\n if not os.path.exists(path):\n os.mkdir(path)\n\ndef print_bleu(bleu_output, verbose=True):\n (final_bleu, prec, bp, ref_lengths, hyp_lengths) = bleu_output\n ratio = 0 if ref_lengths == 0 else hyp_lengths/ref_lengths\n if verbose:\n return \"BLEU = {:.2f}, {:.1f}/{:.1f}/{:.1f}/{:.1f} (BP={:.3f}, ratio={:.3f}, hyp_len={}, ref_len={})\".format(\n final_bleu, prec[0], prec[1], prec[2], prec[3], bp, ratio, hyp_lengths, ref_lengths\n )\n else:\n return \"BLEU = {:.2f}, {:.1f}/{:.1f}/{:.1f}/{:.1f} (BP={:.3f}, ratio={:.3f})\".format(\n final_bleu, prec[0], prec[1], prec[2], prec[3], bp, ratio\n )\n\ndef set_eos(argmax):\n new_argmax = Variable(argmax.data.new(*argmax.size()), requires_grad=False)\n new_argmax.fill_(3)\n batch_size, seq_len = argmax.size()\n argmax_lst = argmax.data.cpu().numpy().tolist()\n for bidx in range(batch_size):\n if 3 in argmax_lst[bidx]:\n idx = argmax_lst[bidx].index(3)\n if idx > 0 :\n new_argmax[bidx,:idx] = argmax[bidx,:idx]\n return new_argmax\n\ndef init_encoder(model, saved):\n saved_ = {k.replace(\"encoder.\",\"\"):v for (k,v) in saved.items() if \"encoder\" in k}\n encoder = model.encoder\n encoder.load_state_dict(saved_)\n return model\n\ndef oracle_converged(bleu_hist, num_items=5):\n batch_size = len(bleu_hist)\n converged = [False for bidx in range(batch_size)]\n for bidx in range(batch_size):\n if len(bleu_hist[bidx]) < num_items:\n converged[bidx] = False\n else:\n converged[bidx] = True\n hist = bleu_hist[bidx][-num_items:]\n for item in hist[1:]:\n if item > hist[0]:\n converged[bidx] = False # if BLEU improves in 4 iters, not converged\n return converged\n\ndef equality_converged(output_hist, num_items=5):\n batch_size = len(output_hist)\n converged = [False for bidx in range(batch_size)]\n for bidx in range(batch_size):\n if len(output_hist[bidx]) < num_items:\n converged[bidx] = False\n else:\n converged[bidx] = False\n hist = output_hist[bidx][-num_items:]\n for item in hist[1:]:\n if item == hist[0]:\n converged[bidx] = True # if out_i == out_j for (j = i+1, i+2, i+3, i+4), converged\n return converged\n\ndef jaccard_converged(multiset_hist, num_items=5, jaccard_thresh=1.0):\n batch_size = len(multiset_hist)\n converged = [False for bidx in range(batch_size)]\n for bidx in range(batch_size):\n if len(multiset_hist[bidx]) < num_items:\n converged[bidx] = False\n else:\n converged[bidx] = False\n hist = multiset_hist[bidx][-num_items:]\n for item in hist[1:]:\n\n inters = len(item.intersection(hist[0]))\n unio = len(item.union(hist[0]))\n jaccard_index = float(inters) / np.maximum(1.,float(unio))\n\n if jaccard_index >= jaccard_thresh:\n converged[bidx] = True\n return converged\n"
] |
[
[
"torch.from_numpy",
"torch.is_tensor",
"numpy.ones",
"numpy.ceil",
"numpy.random.randint",
"torch.sort",
"torch.cuda.device_of",
"numpy.random.rand",
"torch.cuda.device",
"numpy.zeros",
"numpy.sum",
"torch.autograd.Variable"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
RadioAstronomySoftwareGroup/pyuvdata
|
[
"3a34a39b95503a908f49bedd9f7289bff9198d2c"
] |
[
"pyuvdata/tests/test_utils.py"
] |
[
"# -*- mode: python; coding: utf-8 -*-\n# Copyright (c) 2018 Radio Astronomy Software Group\n# Licensed under the 2-clause BSD License\n\n\"\"\"Tests for common utility functions.\"\"\"\nimport os\nimport copy\nimport re\n\nimport pytest\nimport numpy as np\nfrom astropy import units\nfrom astropy.time import Time\nfrom astropy.coordinates import SkyCoord, Angle, EarthLocation\n\nfrom pyuvdata import UVData, UVFlag, UVCal\nimport pyuvdata.utils as uvutils\nimport pyuvdata.tests as uvtest\nfrom pyuvdata.data import DATA_PATH\n\n\nref_latlonalt = (-26.7 * np.pi / 180.0, 116.7 * np.pi / 180.0, 377.8)\nref_xyz = (-2562123.42683, 5094215.40141, -2848728.58869)\n\npytestmark = pytest.mark.filterwarnings(\n \"ignore:telescope_location is not set. Using known values\",\n \"ignore:antenna_positions is not set. Using known values\",\n)\n\n\npytestmark = pytest.mark.filterwarnings(\n \"ignore:telescope_location is not set. Using known values for HERA.\",\n \"ignore:antenna_positions is not set. Using known values for HERA.\",\n)\n\n\[email protected](scope=\"session\")\ndef astrometry_args():\n default_args = {\n \"time_array\": 2456789.0 + np.array([0.0, 1.25, 10.5, 100.75]),\n \"icrs_ra\": 2.468,\n \"icrs_dec\": 1.234,\n \"epoch\": 2000.0,\n \"telescope_loc\": (0.123, -0.456, 4321.0),\n \"pm_ra\": 12.3,\n \"pm_dec\": 45.6,\n \"vrad\": 31.4,\n \"dist\": 73.31,\n \"library\": \"erfa\",\n }\n\n default_args[\"lst_array\"] = uvutils.get_lst_for_time(\n default_args[\"time_array\"],\n default_args[\"telescope_loc\"][0] * (180.0 / np.pi),\n default_args[\"telescope_loc\"][1] * (180.0 / np.pi),\n default_args[\"telescope_loc\"][2],\n )\n\n default_args[\"drift_coord\"] = SkyCoord(\n default_args[\"lst_array\"],\n [default_args[\"telescope_loc\"][0]] * len(default_args[\"lst_array\"]),\n unit=\"rad\",\n )\n\n default_args[\"icrs_coord\"] = SkyCoord(\n default_args[\"icrs_ra\"], default_args[\"icrs_dec\"], unit=\"rad\",\n )\n\n default_args[\"fk5_ra\"], default_args[\"fk5_dec\"] = uvutils.transform_sidereal_coords(\n default_args[\"icrs_ra\"],\n default_args[\"icrs_dec\"],\n \"icrs\",\n \"fk5\",\n in_coord_epoch=\"J2000.0\",\n out_coord_epoch=\"J2000.0\",\n )\n\n # These are values calculated w/o the optional arguments, e.g. pm, vrad, dist\n default_args[\"app_ra\"], default_args[\"app_dec\"] = uvutils.transform_icrs_to_app(\n default_args[\"time_array\"],\n default_args[\"icrs_ra\"],\n default_args[\"icrs_dec\"],\n default_args[\"telescope_loc\"],\n )\n\n default_args[\"app_coord\"] = SkyCoord(\n default_args[\"app_ra\"], default_args[\"app_dec\"], unit=\"rad\",\n )\n\n yield default_args\n\n\[email protected]\ndef vector_list():\n x_vecs = np.array([[1, 0, 0], [2, 0, 0]], dtype=float).T\n y_vecs = np.array([[0, 1, 0], [0, 2, 0]], dtype=float).T\n z_vecs = np.array([[0, 0, 1], [0, 0, 2]], dtype=float).T\n test_vecs = np.array([[1, 1, 1], [2, 2, 2]], dtype=float).T\n\n yield x_vecs, y_vecs, z_vecs, test_vecs\n\n\[email protected]\ndef calc_uvw_args():\n default_args = {\n \"app_ra\": np.zeros(3),\n \"app_dec\": np.zeros(3) + 1.0,\n \"frame_pa\": np.zeros(3) + 1e-3,\n \"lst_array\": np.zeros(3) + np.pi,\n \"use_ant_pos\": True,\n \"uvw_array\": np.array([[1, -1, 0], [0, -1, 1], [-1, 0, 1]], dtype=float),\n \"antenna_positions\": np.array([[0, 0, 1], [0, 1, 0], [1, 0, 0]], dtype=float),\n \"antenna_numbers\": [1, 2, 3],\n \"ant_1_array\": np.array([1, 1, 2]),\n \"ant_2_array\": np.array([2, 3, 3]),\n \"old_app_ra\": np.zeros(3) + np.pi,\n \"old_app_dec\": np.zeros(3),\n \"old_frame_pa\": np.zeros(3),\n \"telescope_lat\": 1.0,\n \"telescope_lon\": 0.0,\n \"to_enu\": False,\n \"from_enu\": False,\n }\n yield default_args\n\n\[email protected]\ndef uvcalibrate_init_data():\n uvdata = UVData()\n uvdata.read(\n os.path.join(DATA_PATH, \"zen.2458098.45361.HH.uvh5_downselected\"),\n file_type=\"uvh5\",\n )\n uvcal = UVCal()\n uvcal.read_calfits(\n os.path.join(DATA_PATH, \"zen.2458098.45361.HH.omni.calfits_downselected\")\n )\n\n yield uvdata, uvcal\n\n\[email protected]\ndef uvcalibrate_data(uvcalibrate_init_data):\n uvdata, uvcal = uvcalibrate_init_data\n\n # fix the antenna names in the uvcal object to match the uvdata object\n uvcal.antenna_names = np.array(\n [name.replace(\"ant\", \"HH\") for name in uvcal.antenna_names]\n )\n\n yield uvdata, uvcal\n\n\ndef test_XYZ_from_LatLonAlt():\n \"\"\"Test conversion from lat/lon/alt to ECEF xyz with reference values.\"\"\"\n out_xyz = uvutils.XYZ_from_LatLonAlt(\n ref_latlonalt[0], ref_latlonalt[1], ref_latlonalt[2]\n )\n # Got reference by forcing http://www.oc.nps.edu/oc2902w/coord/llhxyz.htm\n # to give additional precision.\n assert np.allclose(ref_xyz, out_xyz, rtol=0, atol=1e-3)\n\n # test error checking\n with pytest.raises(\n ValueError,\n match=\"latitude, longitude and altitude must all have the same length\",\n ):\n uvutils.XYZ_from_LatLonAlt(\n ref_latlonalt[0],\n ref_latlonalt[1],\n np.array([ref_latlonalt[2], ref_latlonalt[2]]),\n )\n\n with pytest.raises(\n ValueError,\n match=\"latitude, longitude and altitude must all have the same length\",\n ):\n uvutils.XYZ_from_LatLonAlt(\n ref_latlonalt[0],\n np.array([ref_latlonalt[1], ref_latlonalt[1]]),\n ref_latlonalt[2],\n )\n\n\ndef test_LatLonAlt_from_XYZ():\n \"\"\"Test conversion from ECEF xyz to lat/lon/alt with reference values.\"\"\"\n out_latlonalt = uvutils.LatLonAlt_from_XYZ(ref_xyz)\n # Got reference by forcing http://www.oc.nps.edu/oc2902w/coord/llhxyz.htm\n # to give additional precision.\n assert np.allclose(ref_latlonalt, out_latlonalt, rtol=0, atol=1e-3)\n pytest.raises(ValueError, uvutils.LatLonAlt_from_XYZ, ref_latlonalt)\n\n # test passing multiple values\n xyz_mult = np.stack((np.array(ref_xyz), np.array(ref_xyz)))\n lat_vec, lon_vec, alt_vec = uvutils.LatLonAlt_from_XYZ(xyz_mult)\n assert np.allclose(\n ref_latlonalt, (lat_vec[1], lon_vec[1], alt_vec[1]), rtol=0, atol=1e-3\n )\n # check error if array transposed\n with pytest.raises(ValueError) as cm:\n uvutils.LatLonAlt_from_XYZ(xyz_mult.T)\n assert str(cm.value).startswith(\n \"The expected shape of ECEF xyz array is (Npts, 3).\"\n )\n\n # check error if only 2 coordinates\n with pytest.raises(ValueError) as cm:\n uvutils.LatLonAlt_from_XYZ(xyz_mult[:, 0:2])\n assert str(cm.value).startswith(\n \"The expected shape of ECEF xyz array is (Npts, 3).\"\n )\n\n # test error checking\n pytest.raises(ValueError, uvutils.LatLonAlt_from_XYZ, ref_xyz[0:1])\n\n\ndef test_lla_xyz_lla_roundtrip():\n \"\"\"Test roundtripping an array will yield the same values.\"\"\"\n np.random.seed(0)\n lats = -30.721 + np.random.normal(0, 0.0005, size=30)\n lons = 21.428 + np.random.normal(0, 0.0005, size=30)\n alts = np.random.uniform(1051, 1054, size=30)\n lats *= np.pi / 180.0\n lons *= np.pi / 180.0\n xyz = uvutils.XYZ_from_LatLonAlt(lats, lons, alts)\n lats_new, lons_new, alts_new = uvutils.LatLonAlt_from_XYZ(xyz)\n assert np.allclose(lats_new, lats)\n assert np.allclose(lons_new, lons)\n assert np.allclose(alts_new, alts)\n\n\[email protected](scope=\"module\")\ndef enu_ecef_info():\n \"\"\"Some setup info for ENU/ECEF calculations.\"\"\"\n center_lat = -30.7215261207 * np.pi / 180.0\n center_lon = 21.4283038269 * np.pi / 180.0\n center_alt = 1051.7\n # fmt: off\n lats = (np.array([-30.72218216, -30.72138101, -30.7212785, -30.7210011,\n -30.72159853, -30.72206199, -30.72174614, -30.72188775,\n -30.72183915, -30.72100138])\n * np.pi / 180.0)\n lons = (np.array([21.42728211, 21.42811727, 21.42814544, 21.42795736,\n 21.42686739, 21.42918772, 21.42785662, 21.4286408,\n 21.42750933, 21.42896567])\n * np.pi / 180.0)\n alts = np.array([1052.25, 1051.35, 1051.2, 1051., 1051.45, 1052.04, 1051.68,\n 1051.87, 1051.77, 1051.06])\n # used pymap3d, which implements matlab code, as a reference.\n x = [5109327.46674067, 5109339.76407785, 5109344.06370947,\n 5109365.11297147, 5109372.115673, 5109266.94314734,\n 5109329.89620962, 5109295.13656657, 5109337.21810468,\n 5109329.85680612]\n\n y = [2005130.57953031, 2005221.35184577, 2005225.93775268,\n 2005214.8436201, 2005105.42364036, 2005302.93158317,\n 2005190.65566222, 2005257.71335575, 2005157.78980089,\n 2005304.7729239]\n\n z = [-3239991.24516348, -3239914.4185286, -3239904.57048431,\n -3239878.02656316, -3239935.20415493, -3239979.68381865,\n -3239949.39266985, -3239962.98805772, -3239958.30386264,\n -3239878.08403833]\n\n east = [-97.87631659, -17.87126443, -15.17316938, -33.19049252, -137.60520964,\n 84.67346748, -42.84049408, 32.28083937, -76.1094745, 63.40285935]\n north = [-72.7437482, 16.09066646, 27.45724573, 58.21544651, -8.02964511,\n -59.41961437, -24.39698388, -40.09891961, -34.70965816, 58.18410876]\n up = [0.54883333, -0.35004539, -0.50007736, -0.70035299, -0.25148791, 0.33916067,\n -0.02019057, 0.16979185, 0.06945155, -0.64058124]\n # fmt: on\n yield (\n center_lat,\n center_lon,\n center_alt,\n lats,\n lons,\n alts,\n x,\n y,\n z,\n east,\n north,\n up,\n )\n\n\ndef test_xyz_from_latlonalt(enu_ecef_info):\n \"\"\"Test calculating xyz from lat lot alt.\"\"\"\n (\n center_lat,\n center_lon,\n center_alt,\n lats,\n lons,\n alts,\n x,\n y,\n z,\n east,\n north,\n up,\n ) = enu_ecef_info\n xyz = uvutils.XYZ_from_LatLonAlt(lats, lons, alts)\n assert np.allclose(np.stack((x, y, z), axis=1), xyz, atol=1e-3)\n\n\ndef test_enu_from_ecef(enu_ecef_info):\n \"\"\"Test calculating ENU from ECEF coordinates.\"\"\"\n (\n center_lat,\n center_lon,\n center_alt,\n lats,\n lons,\n alts,\n x,\n y,\n z,\n east,\n north,\n up,\n ) = enu_ecef_info\n xyz = uvutils.XYZ_from_LatLonAlt(lats, lons, alts)\n\n enu = uvutils.ENU_from_ECEF(xyz, center_lat, center_lon, center_alt)\n assert np.allclose(np.stack((east, north, up), axis=1), enu, atol=1e-3)\n\n\[email protected](\"shape_type\", [\"transpose\", \"Nblts,2\", \"Nblts,1\"])\ndef test_enu_from_ecef_shape_errors(enu_ecef_info, shape_type):\n \"\"\"Test ENU_from_ECEF input shape errors.\"\"\"\n (\n center_lat,\n center_lon,\n center_alt,\n lats,\n lons,\n alts,\n x,\n y,\n z,\n east,\n north,\n up,\n ) = enu_ecef_info\n xyz = uvutils.XYZ_from_LatLonAlt(lats, lons, alts)\n if shape_type == \"transpose\":\n xyz = xyz.T.copy()\n elif shape_type == \"Nblts,2\":\n xyz = xyz.copy()[:, 0:2]\n elif shape_type == \"Nblts,1\":\n xyz = xyz.copy()[:, 0:1]\n\n # check error if array transposed\n with pytest.raises(ValueError) as cm:\n uvutils.ENU_from_ECEF(xyz, center_lat, center_lon, center_alt)\n assert str(cm.value).startswith(\n \"The expected shape of ECEF xyz array is (Npts, 3).\"\n )\n\n\ndef test_enu_from_ecef_magnitude_error(enu_ecef_info):\n \"\"\"Test ENU_from_ECEF input magnitude errors.\"\"\"\n (\n center_lat,\n center_lon,\n center_alt,\n lats,\n lons,\n alts,\n x,\n y,\n z,\n east,\n north,\n up,\n ) = enu_ecef_info\n xyz = uvutils.XYZ_from_LatLonAlt(lats, lons, alts)\n # error checking\n with pytest.raises(ValueError) as cm:\n uvutils.ENU_from_ECEF(xyz / 2.0, center_lat, center_lon, center_alt)\n assert str(cm.value).startswith(\n \"ECEF vector magnitudes must be on the order of the radius of the earth\"\n )\n\n\ndef test_ecef_from_enu_roundtrip(enu_ecef_info):\n \"\"\"Test ECEF_from_ENU values.\"\"\"\n (\n center_lat,\n center_lon,\n center_alt,\n lats,\n lons,\n alts,\n x,\n y,\n z,\n east,\n north,\n up,\n ) = enu_ecef_info\n xyz = uvutils.XYZ_from_LatLonAlt(lats, lons, alts)\n enu = uvutils.ENU_from_ECEF(xyz, center_lat, center_lon, center_alt)\n # check that a round trip gives the original value.\n xyz_from_enu = uvutils.ECEF_from_ENU(enu, center_lat, center_lon, center_alt)\n assert np.allclose(xyz, xyz_from_enu, atol=1e-3)\n\n\[email protected](\"shape_type\", [\"transpose\", \"Nblts,2\", \"Nblts,1\"])\ndef test_ecef_from_enu_shape_errors(enu_ecef_info, shape_type):\n (\n center_lat,\n center_lon,\n center_alt,\n lats,\n lons,\n alts,\n x,\n y,\n z,\n east,\n north,\n up,\n ) = enu_ecef_info\n xyz = uvutils.XYZ_from_LatLonAlt(lats, lons, alts)\n enu = uvutils.ENU_from_ECEF(xyz, center_lat, center_lon, center_alt)\n if shape_type == \"transpose\":\n enu = enu.copy().T\n elif shape_type == \"Nblts,2\":\n enu = enu.copy()[:, 0:2]\n elif shape_type == \"Nblts,1\":\n enu = enu.copy()[:, 0:1]\n\n # check error if array transposed\n with pytest.raises(ValueError) as cm:\n uvutils.ECEF_from_ENU(enu, center_lat, center_lon, center_alt)\n assert str(cm.value).startswith(\"The expected shape of the ENU array is (Npts, 3).\")\n\n\ndef test_ecef_from_enu_single(enu_ecef_info):\n \"\"\"Test single coordinate transform.\"\"\"\n (\n center_lat,\n center_lon,\n center_alt,\n lats,\n lons,\n alts,\n x,\n y,\n z,\n east,\n north,\n up,\n ) = enu_ecef_info\n xyz = uvutils.XYZ_from_LatLonAlt(lats, lons, alts)\n # check passing a single value\n enu_single = uvutils.ENU_from_ECEF(xyz[0, :], center_lat, center_lon, center_alt)\n\n assert np.allclose(np.array((east[0], north[0], up[0])), enu_single, atol=1e-3)\n\n\ndef test_ecef_from_enu_single_roundtrip(enu_ecef_info):\n \"\"\"Test single coordinate roundtrip.\"\"\"\n (\n center_lat,\n center_lon,\n center_alt,\n lats,\n lons,\n alts,\n x,\n y,\n z,\n east,\n north,\n up,\n ) = enu_ecef_info\n xyz = uvutils.XYZ_from_LatLonAlt(lats, lons, alts)\n # check passing a single value\n enu = uvutils.ENU_from_ECEF(xyz, center_lat, center_lon, center_alt)\n\n enu_single = uvutils.ENU_from_ECEF(xyz[0, :], center_lat, center_lon, center_alt)\n assert np.allclose(np.array((east[0], north[0], up[0])), enu[0, :], atol=1e-3)\n\n xyz_from_enu = uvutils.ECEF_from_ENU(enu_single, center_lat, center_lon, center_alt)\n assert np.allclose(xyz[0, :], xyz_from_enu, atol=1e-3)\n\n\ndef test_mwa_ecef_conversion():\n \"\"\"\n Test based on comparing the antenna locations in a Cotter uvfits file to\n the antenna locations in MWA_tools.\n \"\"\"\n\n test_data_file = os.path.join(DATA_PATH, \"mwa128_ant_layouts.npz\")\n f = np.load(test_data_file)\n\n # From the STABXYZ table in a cotter-generated uvfits file, obsid = 1066666832\n xyz = f[\"stabxyz\"]\n # From the East/North/Height columns in a cotter-generated metafits file,\n # obsid = 1066666832\n enh = f[\"ENH\"]\n # From a text file antenna_locations.txt in MWA_Tools/scripts\n txt_topo = f[\"txt_topo\"]\n\n # From the unphased uvw coordinates of obsid 1066666832, positions relative\n # to antenna 0\n # these aren't used in the current test, but are interesting and might help\n # with phasing diagnosis in the future\n uvw_topo = f[\"uvw_topo\"]\n # Sky coordinates are flipped for uvw derived values\n uvw_topo = -uvw_topo\n uvw_topo += txt_topo[0]\n\n # transpose these arrays to get them into the right shape\n txt_topo = txt_topo.T\n uvw_topo = uvw_topo.T\n\n # ARRAYX, ARRAYY, ARRAYZ in ECEF frame from Cotter file\n arrcent = f[\"arrcent\"]\n lat, lon, alt = uvutils.LatLonAlt_from_XYZ(arrcent)\n\n # The STABXYZ coordinates are defined with X through the local meridian,\n # so rotate back to the prime meridian\n new_xyz = uvutils.ECEF_from_rotECEF(xyz.T, lon)\n # add in array center to get real ECEF\n ecef_xyz = new_xyz + arrcent\n\n enu = uvutils.ENU_from_ECEF(ecef_xyz, lat, lon, alt)\n\n assert np.allclose(enu, enh)\n\n # test other direction of ECEF rotation\n rot_xyz = uvutils.rotECEF_from_ECEF(new_xyz, lon)\n assert np.allclose(rot_xyz.T, xyz)\n\n\[email protected](\n \"input1,input2,msg\",\n (\n [0.0, np.array([0.0]), \"lon_array and lat_array must either both be floats or\"],\n [np.array([0.0, 1.0]), np.array([0.0]), \"lon_array and lat_array must have \"],\n ),\n)\ndef test_polar2_to_cart3_arg_errs(input1, input2, msg):\n \"\"\"\n Test that bad arguments to polar2_to_cart3 throw appropriate errors.\n \"\"\"\n with pytest.raises(ValueError) as cm:\n uvutils.polar2_to_cart3(input1, input2)\n assert str(cm.value).startswith(msg)\n\n\[email protected](\n \"input1,msg\",\n (\n [0.0, \"xyz_array must be an ndarray.\"],\n [np.array(0.0), \"xyz_array must have ndim > 0\"],\n [np.array([0.0]), \"xyz_array must be length 3\"],\n ),\n)\ndef test_cart3_to_polar2_arg_errs(input1, msg):\n \"\"\"\n Test that bad arguments to cart3_to_polar2 throw appropriate errors.\n \"\"\"\n with pytest.raises(ValueError) as cm:\n uvutils.cart3_to_polar2(input1)\n assert str(cm.value).startswith(msg)\n\n\[email protected](\n \"input1,input2,input3,msg\",\n (\n [np.zeros((1, 3, 1)), np.zeros((1, 3, 3)), 2, \"rot_matrix must be of shape \"],\n [np.zeros((1, 2, 1)), np.zeros((1, 3, 3)), 1, \"Misshaped xyz_array - expected\"],\n [np.zeros((2, 1)), np.zeros((1, 3, 3)), 1, \"Misshaped xyz_array - expected\"],\n [np.zeros((2)), np.zeros((1, 3, 3)), 1, \"Misshaped xyz_array - expected shape\"],\n ),\n)\ndef test_rotate_matmul_wrapper_arg_errs(input1, input2, input3, msg):\n \"\"\"\n Test that bad arguments to _rotate_matmul_wrapper throw appropriate errors.\n \"\"\"\n with pytest.raises(ValueError) as cm:\n uvutils._rotate_matmul_wrapper(input1, input2, input3)\n assert str(cm.value).startswith(msg)\n\n\ndef test_cart_to_polar_roundtrip():\n \"\"\"\n Test that polar->cart coord transformation is the inverse of cart->polar.\n \"\"\"\n # Basic round trip with vectors\n assert uvutils.cart3_to_polar2(uvutils.polar2_to_cart3(0.0, 0.0)) == (0.0, 0.0)\n\n\ndef test_rotate_one_axis(vector_list):\n \"\"\"\n Tests some basic vector rotation operations with a single axis rotation.\n \"\"\"\n # These tests are used to verify the basic functionality of the primary\n # functions used to perform rotations\n x_vecs, y_vecs, z_vecs, test_vecs = vector_list\n\n # Test no-ops w/ 0 deg rotations\n assert np.all(uvutils._rotate_one_axis(x_vecs, 0.0, 0) == x_vecs)\n assert np.all(\n uvutils._rotate_one_axis(x_vecs[:, 0], 0.0, 1)\n == x_vecs[np.newaxis, :, 0, np.newaxis],\n )\n assert np.all(\n uvutils._rotate_one_axis(x_vecs[:, :, np.newaxis], 0.0, 2,)\n == x_vecs[:, :, np.newaxis],\n )\n\n # Test no-ops w/ None\n assert np.all(uvutils._rotate_one_axis(test_vecs, None, 1) == test_vecs)\n assert np.all(\n uvutils._rotate_one_axis(test_vecs[:, 0], None, 2)\n == test_vecs[np.newaxis, :, 0, np.newaxis]\n )\n assert np.all(\n uvutils._rotate_one_axis(test_vecs[:, :, np.newaxis], None, 0,)\n == test_vecs[:, :, np.newaxis]\n )\n\n # Test some basic equivalencies to make sure rotations are working correctly\n assert np.allclose(x_vecs, uvutils._rotate_one_axis(x_vecs, 1.0, 0))\n assert np.allclose(y_vecs, uvutils._rotate_one_axis(y_vecs, 2.0, 1))\n assert np.allclose(z_vecs, uvutils._rotate_one_axis(z_vecs, 3.0, 2))\n\n assert np.allclose(x_vecs, uvutils._rotate_one_axis(y_vecs, -np.pi / 2.0, 2))\n assert np.allclose(y_vecs, uvutils._rotate_one_axis(x_vecs, np.pi / 2.0, 2))\n assert np.allclose(x_vecs, uvutils._rotate_one_axis(z_vecs, np.pi / 2.0, 1))\n assert np.allclose(z_vecs, uvutils._rotate_one_axis(x_vecs, -np.pi / 2.0, 1))\n assert np.allclose(y_vecs, uvutils._rotate_one_axis(z_vecs, -np.pi / 2.0, 0))\n assert np.allclose(z_vecs, uvutils._rotate_one_axis(y_vecs, np.pi / 2.0, 0))\n\n assert np.all(\n np.equal(\n uvutils._rotate_one_axis(test_vecs, 1.0, 2),\n uvutils._rotate_one_axis(test_vecs, 1.0, np.array([2])),\n )\n )\n\n # Testing a special case, where the xyz_array vectors are reshaped if there\n # is only a single rotation matrix used (helps speed things up significantly)\n mod_vec = x_vecs.T.reshape((2, 3, 1))\n assert np.all(uvutils._rotate_one_axis(mod_vec, 1.0, 0) == mod_vec)\n\n\ndef test_rotate_two_axis(vector_list):\n \"\"\"\n Tests some basic vector rotation operations with a double axis rotation.\n \"\"\"\n x_vecs, y_vecs, z_vecs, test_vecs = vector_list\n\n # These tests are used to verify the basic functionality of the primary\n # functions used to two-axis rotations\n assert np.allclose(x_vecs, uvutils._rotate_two_axis(x_vecs, 2 * np.pi, 1.0, 1, 0))\n assert np.allclose(y_vecs, uvutils._rotate_two_axis(y_vecs, 2 * np.pi, 2.0, 2, 1))\n assert np.allclose(z_vecs, uvutils._rotate_two_axis(z_vecs, 2 * np.pi, 3.0, 0, 2))\n\n # Do one more test, which verifies that we can filp our (1,1,1) test vector to\n # the postiion at (-1, -1 , -1)\n mod_vec = test_vecs.T.reshape((2, 3, 1))\n assert np.allclose(\n uvutils._rotate_two_axis(mod_vec, np.pi, np.pi / 2.0, 0, 1), -mod_vec\n )\n\n\[email protected](\n \"rot1,axis1,rot2,rot3,axis2,axis3\",\n (\n [2.0, 0, 1.0, 1.0, 0, 0],\n [2.0, 0, 2.0, 0.0, 0, 1],\n [2.0, 0, None, 2.0, 1, 0],\n [0.0, 0, None, 0.0, 1, 2],\n ),\n)\ndef test_compare_one_to_two_axis(vector_list, rot1, axis1, rot2, rot3, axis2, axis3):\n \"\"\"\n Check that one-axis and two-axis rotations provide the same values when the\n two-axis rotations are fundamentally rotating around a single axis.\n \"\"\"\n x_vecs, y_vecs, z_vecs, test_vecs = vector_list\n # If performing two rots on the same axis, that should be identical to using\n # a single rot (with the rot angle equal to the sum of the two rot angles)\n assert np.all(\n np.equal(\n uvutils._rotate_one_axis(test_vecs, rot1, axis1),\n uvutils._rotate_two_axis(test_vecs, rot2, rot3, axis2, axis3),\n )\n )\n\n\[email protected](\n \"arg_dict,err\",\n (\n [\n {\"lst_array\": None, \"to_enu\": True, \"use_ant_pos\": False},\n (ValueError, \"Must include lst_array to calculate baselines in ENU\"),\n ],\n [\n {\"lst_array\": None, \"to_enu\": True, \"telescope_lat\": None},\n (ValueError, \"Must include telescope_lat to calculate baselines\"),\n ],\n [\n {\"lst_array\": None},\n (ValueError, \"Must include lst_array if use_ant_pos=True and not\"),\n ],\n [\n {\"app_ra\": None, \"frame_pa\": None},\n (ValueError, \"Must include both app_ra and app_dec, or frame_pa to\"),\n ],\n [\n {\"app_dec\": None, \"frame_pa\": None},\n (ValueError, \"Must include both app_ra and app_dec, or frame_pa to\"),\n ],\n [\n {\"app_ra\": None, \"app_dec\": None, \"frame_pa\": None},\n (ValueError, \"Must include both app_ra and app_dec, or frame_pa to\"),\n ],\n [\n {\"antenna_positions\": None},\n (ValueError, \"Must include antenna_positions if use_ant_pos=True.\"),\n ],\n [\n {\"ant_1_array\": None},\n (ValueError, \"Must include ant_1_array, ant_2_array, and antenna_numbers\"),\n ],\n [\n {\"ant_2_array\": None},\n (ValueError, \"Must include ant_1_array, ant_2_array, and antenna_numbers\"),\n ],\n [\n {\"antenna_numbers\": None},\n (ValueError, \"Must include ant_1_array, ant_2_array, and antenna_numbers\"),\n ],\n [\n {\"telescope_lon\": None},\n (ValueError, \"Must include telescope_lon if use_ant_pos=True.\"),\n ],\n [\n {\"uvw_array\": None, \"use_ant_pos\": False},\n (ValueError, \"Must include uvw_array if use_ant_pos=False.\"),\n ],\n [\n {\"telescope_lat\": None, \"use_ant_pos\": False, \"from_enu\": True},\n (ValueError, \"Must include telescope_lat if moving \"),\n ],\n [\n {\"lst_array\": None, \"use_ant_pos\": False, \"from_enu\": True},\n (ValueError, \"Must include lst_array if moving between ENU (i.e.,\"),\n ],\n [\n {\"use_ant_pos\": False, \"old_app_ra\": None},\n (ValueError, \"Must include old_app_ra and old_app_dec values when data\"),\n ],\n [\n {\"use_ant_pos\": False, \"old_app_dec\": None},\n (ValueError, \"Must include old_app_ra and old_app_dec values when data\"),\n ],\n [\n {\"use_ant_pos\": False, \"old_frame_pa\": None},\n (ValueError, \"Must include old_frame_pa values if data are phased and \"),\n ],\n ),\n)\ndef test_calc_uvw_input_errors(calc_uvw_args, arg_dict, err):\n \"\"\"\n Check for argument errors with calc_uvw.\n \"\"\"\n for key in arg_dict.keys():\n calc_uvw_args[key] = arg_dict[key]\n\n with pytest.raises(err[0]) as cm:\n uvutils.calc_uvw(\n app_ra=calc_uvw_args[\"app_ra\"],\n app_dec=calc_uvw_args[\"app_dec\"],\n frame_pa=calc_uvw_args[\"frame_pa\"],\n lst_array=calc_uvw_args[\"lst_array\"],\n use_ant_pos=calc_uvw_args[\"use_ant_pos\"],\n uvw_array=calc_uvw_args[\"uvw_array\"],\n antenna_positions=calc_uvw_args[\"antenna_positions\"],\n antenna_numbers=calc_uvw_args[\"antenna_numbers\"],\n ant_1_array=calc_uvw_args[\"ant_1_array\"],\n ant_2_array=calc_uvw_args[\"ant_2_array\"],\n old_app_ra=calc_uvw_args[\"old_app_ra\"],\n old_app_dec=calc_uvw_args[\"old_app_dec\"],\n old_frame_pa=calc_uvw_args[\"old_frame_pa\"],\n telescope_lat=calc_uvw_args[\"telescope_lat\"],\n telescope_lon=calc_uvw_args[\"telescope_lon\"],\n from_enu=calc_uvw_args[\"from_enu\"],\n to_enu=calc_uvw_args[\"to_enu\"],\n )\n assert str(cm.value).startswith(err[1])\n\n\ndef test_calc_uvw_no_op(calc_uvw_args):\n \"\"\"\n Test that transfroming ENU -> ENU gives you an output identical to the input.\n \"\"\"\n # This should be a no-op, check for equality\n uvw_check = uvutils.calc_uvw(\n lst_array=calc_uvw_args[\"lst_array\"],\n use_ant_pos=False,\n uvw_array=calc_uvw_args[\"uvw_array\"],\n telescope_lat=calc_uvw_args[\"telescope_lat\"],\n telescope_lon=calc_uvw_args[\"telescope_lon\"],\n to_enu=True,\n from_enu=True,\n )\n assert np.all(np.equal(calc_uvw_args[\"uvw_array\"], uvw_check))\n\n\ndef test_calc_uvw_same_place(calc_uvw_args):\n \"\"\"\n Check and see that the uvw calculator derives the same values derived by hand\n (i.e, that calculating for the same position returns the same answer).\n \"\"\"\n # Check ant make sure that when we plug in the original values, we recover the\n # exact same values that we calculated above.\n uvw_ant_check = uvutils.calc_uvw(\n app_ra=calc_uvw_args[\"old_app_ra\"],\n app_dec=calc_uvw_args[\"old_app_dec\"],\n frame_pa=calc_uvw_args[\"old_frame_pa\"],\n lst_array=calc_uvw_args[\"lst_array\"],\n use_ant_pos=True,\n antenna_positions=calc_uvw_args[\"antenna_positions\"],\n antenna_numbers=calc_uvw_args[\"antenna_numbers\"],\n ant_1_array=calc_uvw_args[\"ant_1_array\"],\n ant_2_array=calc_uvw_args[\"ant_2_array\"],\n telescope_lat=calc_uvw_args[\"telescope_lat\"],\n telescope_lon=calc_uvw_args[\"telescope_lon\"],\n )\n\n uvw_base_check = uvutils.calc_uvw(\n app_ra=calc_uvw_args[\"old_app_ra\"],\n app_dec=calc_uvw_args[\"old_app_dec\"],\n frame_pa=calc_uvw_args[\"old_frame_pa\"],\n lst_array=calc_uvw_args[\"lst_array\"],\n use_ant_pos=False,\n uvw_array=calc_uvw_args[\"uvw_array\"],\n old_app_ra=calc_uvw_args[\"old_app_ra\"],\n old_app_dec=calc_uvw_args[\"old_app_dec\"],\n old_frame_pa=calc_uvw_args[\"old_frame_pa\"],\n )\n\n assert np.allclose(uvw_ant_check, calc_uvw_args[\"uvw_array\"])\n assert np.allclose(uvw_base_check, calc_uvw_args[\"uvw_array\"])\n\n\[email protected](\"to_enu\", [False, True])\ndef test_calc_uvw_base_vs_ants(calc_uvw_args, to_enu):\n \"\"\"\n Check to see that we get the same values for uvw coordinates whether we calculate\n them using antenna positions or the previously calculated uvw's.\n \"\"\"\n\n # Now change position, and make sure that whether we used ant positions of rotated\n # uvw vectors, we derived the same uvw-coordinates at the end\n uvw_ant_check = uvutils.calc_uvw(\n app_ra=calc_uvw_args[\"app_ra\"],\n app_dec=calc_uvw_args[\"app_dec\"],\n frame_pa=calc_uvw_args[\"frame_pa\"],\n lst_array=calc_uvw_args[\"lst_array\"],\n use_ant_pos=True,\n antenna_positions=calc_uvw_args[\"antenna_positions\"],\n antenna_numbers=calc_uvw_args[\"antenna_numbers\"],\n ant_1_array=calc_uvw_args[\"ant_1_array\"],\n ant_2_array=calc_uvw_args[\"ant_2_array\"],\n telescope_lat=calc_uvw_args[\"telescope_lat\"],\n telescope_lon=calc_uvw_args[\"telescope_lon\"],\n to_enu=to_enu,\n )\n\n uvw_base_check = uvutils.calc_uvw(\n app_ra=calc_uvw_args[\"app_ra\"],\n app_dec=calc_uvw_args[\"app_dec\"],\n frame_pa=calc_uvw_args[\"frame_pa\"],\n lst_array=calc_uvw_args[\"lst_array\"],\n use_ant_pos=False,\n uvw_array=calc_uvw_args[\"uvw_array\"],\n old_app_ra=calc_uvw_args[\"old_app_ra\"],\n old_app_dec=calc_uvw_args[\"old_app_dec\"],\n old_frame_pa=calc_uvw_args[\"old_frame_pa\"],\n telescope_lat=calc_uvw_args[\"telescope_lat\"],\n telescope_lon=calc_uvw_args[\"telescope_lon\"],\n to_enu=to_enu,\n )\n\n assert np.allclose(uvw_ant_check, uvw_base_check)\n\n\ndef test_calc_uvw_enu_roundtrip(calc_uvw_args):\n \"\"\"\n Check and see that we can go from uvw to ENU and back to uvw using the `uvw_array`\n argument alone (i.e., without antenna positions).\n \"\"\"\n # Now attempt to round trip from projected to ENU back to projected -- that should\n # give us the original set of uvw-coordinates.\n temp_uvw = uvutils.calc_uvw(\n lst_array=calc_uvw_args[\"lst_array\"],\n use_ant_pos=False,\n uvw_array=calc_uvw_args[\"uvw_array\"],\n old_app_ra=calc_uvw_args[\"old_app_ra\"],\n old_app_dec=calc_uvw_args[\"old_app_dec\"],\n old_frame_pa=calc_uvw_args[\"old_frame_pa\"],\n telescope_lat=calc_uvw_args[\"telescope_lat\"],\n telescope_lon=calc_uvw_args[\"telescope_lon\"],\n to_enu=True,\n )\n\n uvw_base_enu_check = uvutils.calc_uvw(\n app_ra=calc_uvw_args[\"old_app_ra\"],\n app_dec=calc_uvw_args[\"old_app_dec\"],\n frame_pa=calc_uvw_args[\"old_frame_pa\"],\n lst_array=calc_uvw_args[\"lst_array\"],\n use_ant_pos=False,\n uvw_array=temp_uvw,\n telescope_lat=calc_uvw_args[\"telescope_lat\"],\n telescope_lon=calc_uvw_args[\"telescope_lon\"],\n from_enu=True,\n )\n\n assert np.allclose(calc_uvw_args[\"uvw_array\"], uvw_base_enu_check)\n\n\ndef test_calc_uvw_pa_ex_post_facto(calc_uvw_args):\n \"\"\"\n Check and see that one can apply the frame position angle rotation after-the-fact\n and still get out the same answer you get if you were doing it during the initial\n uvw coordinate calculation.\n \"\"\"\n # Finally, check and see what happens if you do the PA rotation as part of the\n # first uvw calcuation, and make sure it agrees with what you get if you decide\n # to apply the PA rotation after-the-fact.\n uvw_base_check = uvutils.calc_uvw(\n app_ra=calc_uvw_args[\"app_ra\"],\n app_dec=calc_uvw_args[\"app_dec\"],\n frame_pa=calc_uvw_args[\"frame_pa\"],\n lst_array=calc_uvw_args[\"lst_array\"],\n use_ant_pos=False,\n uvw_array=calc_uvw_args[\"uvw_array\"],\n old_app_ra=calc_uvw_args[\"old_app_ra\"],\n old_app_dec=calc_uvw_args[\"old_app_dec\"],\n old_frame_pa=calc_uvw_args[\"old_frame_pa\"],\n )\n\n temp_uvw = uvutils.calc_uvw(\n app_ra=calc_uvw_args[\"app_ra\"],\n app_dec=calc_uvw_args[\"app_dec\"],\n lst_array=calc_uvw_args[\"lst_array\"],\n use_ant_pos=False,\n uvw_array=calc_uvw_args[\"uvw_array\"],\n old_app_ra=calc_uvw_args[\"old_app_ra\"],\n old_app_dec=calc_uvw_args[\"old_app_dec\"],\n old_frame_pa=calc_uvw_args[\"old_frame_pa\"],\n )\n\n uvw_base_late_pa_check = uvutils.calc_uvw(\n frame_pa=calc_uvw_args[\"frame_pa\"],\n use_ant_pos=False,\n uvw_array=temp_uvw,\n old_frame_pa=calc_uvw_args[\"old_frame_pa\"],\n )\n\n assert np.allclose(uvw_base_check, uvw_base_late_pa_check)\n\n\[email protected]('ignore:ERFA function \"pmsafe\" yielded')\[email protected]('ignore:ERFA function \"dtdtf\" yielded')\[email protected]('ignore:ERFA function \"utcut1\" yielded')\[email protected]('ignore:ERFA function \"utctai\" yielded')\[email protected](\n \"arg_dict,msg\",\n (\n [{\"library\": \"xyz\"}, \"Requested coordinate transformation library is not\"],\n [{\"icrs_ra\": np.arange(10)}, \"ra and dec must be the same shape.\"],\n [{\"icrs_dec\": np.arange(10)}, \"ra and dec must be the same shape.\"],\n [{\"pm_ra\": np.arange(10)}, \"pm_ra must be the same shape as ra and dec.\"],\n [{\"pm_dec\": np.arange(10)}, \"pm_dec must be the same shape as ra and dec.\"],\n [{\"dist\": np.arange(10)}, \"dist must be the same shape as ra and dec.\"],\n [{\"vrad\": np.arange(10)}, \"vrad must be the same shape as ra and dec.\"],\n [\n {\n \"icrs_ra\": [0, 0],\n \"icrs_dec\": [0, 0],\n \"pm_ra\": None,\n \"pm_dec\": None,\n \"dist\": None,\n \"vrad\": None,\n },\n \"time_array must be of either of\",\n ],\n [{\"time_array\": 0.0, \"library\": \"novas\"}, \"No current support for JPL ephems\"],\n ),\n)\ndef test_transform_icrs_to_app_arg_errs(astrometry_args, arg_dict, msg):\n \"\"\"\n Check for argument errors with transform_icrs_to_app\n \"\"\"\n pytest.importorskip(\"novas\")\n default_args = astrometry_args.copy()\n for key in arg_dict.keys():\n default_args[key] = arg_dict[key]\n\n # Start w/ the transform_icrs_to_app block\n with pytest.raises(ValueError) as cm:\n uvutils.transform_icrs_to_app(\n default_args[\"time_array\"],\n default_args[\"icrs_ra\"],\n default_args[\"icrs_dec\"],\n default_args[\"telescope_loc\"],\n pm_ra=default_args[\"pm_ra\"],\n pm_dec=default_args[\"pm_dec\"],\n dist=default_args[\"dist\"],\n vrad=default_args[\"vrad\"],\n epoch=default_args[\"epoch\"],\n astrometry_library=default_args[\"library\"],\n )\n assert str(cm.value).startswith(msg)\n\n\[email protected](\n \"arg_dict,msg\",\n (\n [{\"library\": \"xyz\"}, \"Requested coordinate transformation library is not\"],\n [{\"app_ra\": np.arange(10)}, \"app_ra and app_dec must be the same shape.\"],\n [{\"app_dec\": np.arange(10)}, \"app_ra and app_dec must be the same shape.\"],\n [{\"time_array\": np.arange(10)}, \"time_array must be of either of length 1\"],\n ),\n)\ndef test_transform_app_to_icrs_arg_errs(astrometry_args, arg_dict, msg):\n \"\"\"\n Check for argument errors with transform_app_to_icrs\n \"\"\"\n default_args = astrometry_args.copy()\n for key in arg_dict.keys():\n default_args[key] = arg_dict[key]\n\n with pytest.raises(ValueError) as cm:\n uvutils.transform_app_to_icrs(\n default_args[\"time_array\"],\n default_args[\"app_ra\"],\n default_args[\"app_dec\"],\n default_args[\"telescope_loc\"],\n astrometry_library=default_args[\"library\"],\n )\n assert str(cm.value).startswith(msg)\n\n\ndef test_transform_sidereal_coords_arg_errs():\n \"\"\"\n Check for argument errors with transform_sidereal_coords\n \"\"\"\n # Next on to sidereal to sidereal\n with pytest.raises(ValueError) as cm:\n uvutils.transform_sidereal_coords(\n [0.0],\n [0.0, 1.0],\n \"fk5\",\n \"icrs\",\n in_coord_epoch=\"J2000.0\",\n time_array=[0.0, 1.0, 2.0],\n )\n assert str(cm.value).startswith(\"lon and lat must be the same shape.\")\n\n with pytest.raises(ValueError) as cm:\n uvutils.transform_sidereal_coords(\n [0.0, 1.0],\n [0.0, 1.0],\n \"fk4\",\n \"fk4\",\n in_coord_epoch=1950.0,\n out_coord_epoch=1984.0,\n time_array=[0.0, 1.0, 2.0],\n )\n assert str(cm.value).startswith(\"Shape of time_array must be either that of \")\n\n\[email protected]('ignore:ERFA function \"d2dtf\" yielded')\[email protected](\n \"arg_dict,msg\",\n (\n [\n {\"force_lookup\": True, \"time_array\": np.arange(100000)},\n \"Requesting too many individual ephem points from JPL-Horizons.\",\n ],\n [{\"force_lookup\": False, \"high_cadence\": True}, \"Too many ephem points\"],\n [{\"time_array\": np.arange(10)}, \"No current support for JPL ephems outside\"],\n [{\"targ_name\": \"whoami\"}, \"Target ID is not recognized in either the small\"],\n ),\n)\ndef test_lookup_jplhorizons_arg_errs(arg_dict, msg):\n \"\"\"\n Check for argument errors with lookup_jplhorizons.\n \"\"\"\n # Don't do this test if we don't have astroquery loaded\n pytest.importorskip(\"astroquery\")\n default_args = {\n \"targ_name\": \"Mars\",\n \"time_array\": np.array([0.0, 1000.0]) + 2456789.0,\n \"telescope_loc\": EarthLocation.from_geodetic(0, 0, height=0.0),\n \"high_cadence\": False,\n \"force_lookup\": None,\n }\n\n for key in arg_dict.keys():\n default_args[key] = arg_dict[key]\n\n with pytest.raises(ValueError) as cm:\n uvutils.lookup_jplhorizons(\n default_args[\"targ_name\"],\n default_args[\"time_array\"],\n telescope_loc=default_args[\"telescope_loc\"],\n high_cadence=default_args[\"high_cadence\"],\n force_indv_lookup=default_args[\"force_lookup\"],\n )\n assert str(cm.value).startswith(msg)\n\n\[email protected](\n \"bad_arg,msg\",\n [\n [\"etimes\", \"ephem_ra must have the same shape as ephem_times.\"],\n [\"ra\", \"ephem_ra must have the same shape as ephem_times.\"],\n [\"dec\", \"ephem_dec must have the same shape as ephem_times.\"],\n [\"dist\", \"ephem_dist must have the same shape as ephem_times.\"],\n [\"vel\", \"ephem_vel must have the same shape as ephem_times.\"],\n ],\n)\ndef test_interpolate_ephem_arg_errs(bad_arg, msg):\n \"\"\"\n Check for argument errors with interpolate_ephem\n \"\"\"\n # Now moving on to the interpolation scheme\n with pytest.raises(ValueError) as cm:\n uvutils.interpolate_ephem(\n 0.0,\n 0.0 if (\"etimes\" == bad_arg) else [0.0, 1.0],\n 0.0 if (\"ra\" == bad_arg) else [0.0, 1.0],\n 0.0 if (\"dec\" == bad_arg) else [0.0, 1.0],\n ephem_dist=0.0 if (\"dist\" == bad_arg) else [0.0, 1.0],\n ephem_vel=0.0 if (\"vel\" == bad_arg) else [0.0, 1.0],\n )\n assert str(cm.value).startswith(msg)\n\n\ndef test_calc_app_coords_arg_errs():\n \"\"\"\n Check for argument errors with calc_app_coords\n \"\"\"\n # Now on to app_coords\n with pytest.raises(ValueError) as cm:\n uvutils.calc_app_coords(\n 0.0, 0.0, telescope_loc=(0, 1, 2), coord_type=\"whoknows\"\n )\n assert str(cm.value).startswith(\"Object type whoknows is not recognized.\")\n\n\ndef test_transform_multi_sidereal_coords(astrometry_args):\n \"\"\"\n Perform some basic tests to verify that we can transform between sidereal frames\n with multiple coordinates.\n \"\"\"\n # Check and make sure that we can deal with non-singleton times or coords with\n # singleton coords and times, respectively.\n check_ra, check_dec = uvutils.transform_sidereal_coords(\n astrometry_args[\"icrs_ra\"] * np.ones(2),\n astrometry_args[\"icrs_dec\"] * np.ones(2),\n \"icrs\",\n \"fk5\",\n in_coord_epoch=2000.0,\n out_coord_epoch=2000.0,\n time_array=astrometry_args[\"time_array\"][0] * np.ones(2),\n )\n assert np.all(np.equal(astrometry_args[\"fk5_ra\"], check_ra))\n assert np.all(np.equal(astrometry_args[\"fk5_dec\"], check_dec))\n\n\ndef test_transform_fk5_fk4_icrs_loop(astrometry_args):\n \"\"\"\n Do a roundtrip test between ICRS, FK5, FK4 and back to ICRS to verify that we can\n handle transformation between different sidereal frames correctly.\n \"\"\"\n # Now do a triangle between ICRS -> FK5 -> FK4 -> ICRS. If all is working well,\n # then we should recover the same position we started with.\n fk5_ra, fk5_dec = uvutils.transform_sidereal_coords(\n astrometry_args[\"icrs_ra\"],\n astrometry_args[\"icrs_dec\"],\n \"icrs\",\n \"fk5\",\n in_coord_epoch=2000.0,\n out_coord_epoch=2000.0,\n time_array=astrometry_args[\"time_array\"][0],\n )\n\n fk4_ra, fk4_dec = uvutils.transform_sidereal_coords(\n fk5_ra,\n fk5_dec,\n \"fk5\",\n \"fk4\",\n in_coord_epoch=\"J2000.0\",\n out_coord_epoch=\"B1950.0\",\n )\n\n check_ra, check_dec = uvutils.transform_sidereal_coords(\n fk4_ra,\n fk4_dec,\n \"fk4\",\n \"icrs\",\n in_coord_epoch=\"B1950.0\",\n out_coord_epoch=\"J2000.0\",\n )\n\n check_coord = SkyCoord(check_ra, check_dec, unit=\"rad\")\n assert np.all(check_coord.separation(astrometry_args[\"icrs_coord\"]).uarcsec < 0.1)\n\n\ndef test_roundtrip_icrs(astrometry_args):\n \"\"\"\n Performs a roundtrip test to verify that one can transform between\n ICRS <-> topocentric to the precision limit, without running into\n issues.\n \"\"\"\n in_lib_list = [\"erfa\", \"erfa\", \"astropy\", \"astropy\"]\n out_lib_list = [\"erfa\", \"astropy\", \"erfa\", \"astropy\"]\n\n for in_lib, out_lib in zip(in_lib_list, out_lib_list):\n app_ra, app_dec = uvutils.transform_icrs_to_app(\n astrometry_args[\"time_array\"],\n astrometry_args[\"icrs_ra\"],\n astrometry_args[\"icrs_dec\"],\n astrometry_args[\"telescope_loc\"],\n epoch=astrometry_args[\"epoch\"],\n astrometry_library=in_lib,\n )\n\n check_ra, check_dec = uvutils.transform_app_to_icrs(\n astrometry_args[\"time_array\"],\n app_ra,\n app_dec,\n astrometry_args[\"telescope_loc\"],\n astrometry_library=out_lib,\n )\n check_coord = SkyCoord(check_ra, check_dec, unit=\"rad\", frame=\"icrs\")\n # Verify that everything agrees to better than µas-level accuracy if the\n # libraries are the same, otherwise to 100 µas if cross-comparing libraries\n if in_lib == out_lib:\n assert np.all(\n astrometry_args[\"icrs_coord\"].separation(check_coord).uarcsec < 1.0\n )\n else:\n assert np.all(\n astrometry_args[\"icrs_coord\"].separation(check_coord).uarcsec < 100.0\n )\n\n\ndef test_calc_parallactic_angle():\n \"\"\"\n A relatively straightforward test to verify that we recover the parallactic\n angles we expect given some known inputs\n \"\"\"\n expected_vals = np.array([1.0754290375762232, 0.0, -0.6518070715011698])\n meas_vals = uvutils.calc_parallactic_angle(\n [0.0, 1.0, 2.0], [-1.0, 0.0, 1.0], [2.0, 1.0, 0], 1.0,\n )\n # Make sure things agree to better than ~0.1 uas (as it definitely should)\n assert np.allclose(expected_vals, meas_vals, 0.0, 1e-12)\n\n\ndef test_calc_frame_pos_angle():\n \"\"\"\n Verify that we recover frame position angles correctly\n \"\"\"\n # First test -- plug in \"topo\" for the frame, which should always produce an\n # array of all zeros (the topo frame is what the apparent coords are in)\n frame_pa = uvutils.calc_frame_pos_angle(\n np.array([2456789.0] * 100),\n np.arange(100) * (np.pi / 50),\n np.zeros(100),\n (0, 0, 0),\n \"topo\",\n )\n assert len(frame_pa) == 100\n assert np.all(frame_pa == 0.0)\n # PA of zero degrees (they're always aligned)\n # Next test -- plug in J2000 and see that we actually get back a frame PA\n # of basically 0 degrees.\n j2000_jd = Time(2000.0, format=\"jyear\").utc.jd\n frame_pa = uvutils.calc_frame_pos_angle(\n np.array([j2000_jd] * 100),\n np.arange(100) * (np.pi / 50),\n np.zeros(100),\n (0, 0, 0),\n \"fk5\",\n ref_epoch=2000.0,\n )\n # At J2000, the only frame PA terms come from aberation, which basically max out\n # at ~< 1e-4 rad. Check to make sure that lines up with what we measure.\n assert np.all(np.abs(frame_pa) < 1e-4)\n\n # JD 2458849.5 is Jan-01-2020, so 20 years of parallax ought to have accumulated\n # (with about 1 arcmin/yr of precession). Make sure these values are sensible\n frame_pa = uvutils.calc_frame_pos_angle(\n np.array([2458849.5] * 100),\n np.arange(100) * (np.pi / 50),\n np.zeros(100),\n (0, 0, 0),\n \"fk5\",\n ref_epoch=2000.0,\n )\n assert np.all(np.abs(frame_pa) < 20 * (50.3 / 3600) * (np.pi / 180.0))\n # Check the PA at a couple of chosen points, which just so happen to be very close\n # in magnitude (as they're basically in the same plane as the motion of the Earth)\n assert np.isclose(frame_pa[25], 0.001909957544309159)\n assert np.isclose(frame_pa[-25], -0.0019098101664715339)\n\n\ndef test_jphl_lookup():\n \"\"\"\n A very simple lookup query to verify that the astroquery tools for accessing\n JPL-Horizons are working. This test is very limited, on account of not wanting to\n slam JPL w/ coordinate requests.\n \"\"\"\n pytest.importorskip(\"astroquery\")\n\n [\n ephem_times,\n ephem_ra,\n ephem_dec,\n ephem_dist,\n ephem_vel,\n ] = uvutils.lookup_jplhorizons(\"Sun\", 2456789.0)\n\n assert np.all(np.equal(ephem_times, 2456789.0))\n assert np.allclose(ephem_ra, 0.8393066751804976)\n assert np.allclose(ephem_dec, 0.3120687480116649)\n assert np.allclose(ephem_dist, 1.00996185750717)\n assert np.allclose(ephem_vel, 0.386914)\n\n\ndef test_ephem_interp_one_point():\n \"\"\"\n These tests do some simple checks to verify that the interpolator behaves properly\n when only being provided singleton values.\n \"\"\"\n # First test the case where there is only one ephem point, and thus everything\n # takes on that value\n time_array = np.arange(100) * 0.01\n ephem_times = np.array([0])\n ephem_ra = np.array([1.0])\n ephem_dec = np.array([2.0])\n ephem_dist = np.array([3.0])\n ephem_vel = np.array([4.0])\n\n ra_vals0, dec_vals0, dist_vals0, vel_vals0 = uvutils.interpolate_ephem(\n time_array,\n ephem_times,\n ephem_ra,\n ephem_dec,\n ephem_dist=ephem_dist,\n ephem_vel=ephem_vel,\n )\n\n assert np.all(ra_vals0 == 1.0)\n assert np.all(dec_vals0 == 2.0)\n assert np.all(dist_vals0 == 3.0)\n assert np.all(vel_vals0 == 4.0)\n\n\ndef test_ephem_interp_multi_point():\n \"\"\"\n Test that ephem coords are interpolated correctly when supplying more than a\n singleton value for the various arrays.\n \"\"\"\n # Next test the case where the ephem only has a couple of points, in which case the\n # code will default to using a simple, linear interpolation scheme.\n time_array = np.arange(100) * 0.01\n ephem_times = np.array([0, 1])\n ephem_ra = np.array([0, 1]) + 1.0\n ephem_dec = np.array([0, 1]) + 2.0\n ephem_dist = np.array([0, 1]) + 3.0\n ephem_vel = np.array([0, 1]) + 4.0\n\n ra_vals1, dec_vals1, dist_vals1, vel_vals1 = uvutils.interpolate_ephem(\n time_array,\n ephem_times,\n ephem_ra,\n ephem_dec,\n ephem_dist=ephem_dist,\n ephem_vel=ephem_vel,\n )\n\n # When there are lots more data points, the interpolator will default to using a\n # cubic spline, which _should_ be very close (to numerical precision limits) to what\n # we get with the method above.\n ephem_times = np.arange(11) * 0.1\n ephem_ra = (np.arange(11) * 0.1) + 1.0\n ephem_dec = (np.arange(11) * 0.1) + 2.0\n ephem_dist = (np.arange(11) * 0.1) + 3.0\n ephem_vel = (np.arange(11) * 0.1) + 4.0\n\n ra_vals2, dec_vals2, dist_vals2, vel_vals2 = uvutils.interpolate_ephem(\n time_array,\n ephem_times,\n ephem_ra,\n ephem_dec,\n ephem_dist=ephem_dist,\n ephem_vel=ephem_vel,\n )\n\n # Make sure that everything is consistent to floating point precision\n assert np.allclose(ra_vals1, ra_vals2, 1e-15, 0.0)\n assert np.allclose(dec_vals1, dec_vals2, 1e-15, 0.0)\n assert np.allclose(dist_vals1, dist_vals2, 1e-15, 0.0)\n assert np.allclose(vel_vals1, vel_vals2, 1e-15, 0.0)\n assert np.allclose(time_array + 1.0, ra_vals2, 1e-15, 0.0)\n assert np.allclose(time_array + 2.0, dec_vals2, 1e-15, 0.0)\n assert np.allclose(time_array + 3.0, dist_vals2, 1e-15, 0.0)\n assert np.allclose(time_array + 4.0, vel_vals2, 1e-15, 0.0)\n\n\[email protected](\"frame\", [\"icrs\", \"fk5\"])\ndef test_calc_app_sidereal(astrometry_args, frame):\n \"\"\"\n Tests that we can calculate app coords for sidereal objects\n \"\"\"\n # First step is to check and make sure we can do sidereal coords. This is the most\n # basic thing to check, so this really _should work.\n check_ra, check_dec = uvutils.calc_app_coords(\n astrometry_args[\"fk5_ra\"] if (frame == \"fk5\") else astrometry_args[\"icrs_ra\"],\n astrometry_args[\"fk5_dec\"] if (frame == \"fk5\") else astrometry_args[\"icrs_dec\"],\n coord_type=\"sidereal\",\n telescope_loc=astrometry_args[\"telescope_loc\"],\n time_array=astrometry_args[\"time_array\"],\n coord_frame=frame,\n coord_epoch=astrometry_args[\"epoch\"],\n )\n check_coord = SkyCoord(check_ra, check_dec, unit=\"rad\")\n assert np.all(astrometry_args[\"app_coord\"].separation(check_coord).uarcsec < 1.0)\n\n\[email protected](\"frame\", [\"icrs\", \"fk5\"])\ndef test_calc_app_ephem(astrometry_args, frame):\n \"\"\"\n Tests that we can calculate app coords for ephem objects\n \"\"\"\n # Next, see what happens when we pass an ephem. Note that this is just a single\n # point ephem, so its not testing any of the fancy interpolation, but we have other\n # tests for poking at that. The two tests here are to check bot the ICRS and FK5\n # paths through the ephem.\n if frame == \"fk5\":\n ephem_ra = astrometry_args[\"fk5_ra\"]\n ephem_dec = astrometry_args[\"fk5_dec\"]\n else:\n ephem_ra = np.array([astrometry_args[\"icrs_ra\"]])\n ephem_dec = np.array([astrometry_args[\"icrs_dec\"]])\n\n ephem_times = np.array([astrometry_args[\"time_array\"][0]])\n check_ra, check_dec = uvutils.calc_app_coords(\n ephem_ra,\n ephem_dec,\n coord_times=ephem_times,\n coord_type=\"ephem\",\n telescope_loc=astrometry_args[\"telescope_loc\"],\n time_array=astrometry_args[\"time_array\"],\n coord_epoch=astrometry_args[\"epoch\"],\n coord_frame=frame,\n )\n check_coord = SkyCoord(check_ra, check_dec, unit=\"rad\")\n assert np.all(astrometry_args[\"app_coord\"].separation(check_coord).uarcsec < 1.0)\n\n\ndef test_calc_app_driftscan(astrometry_args):\n \"\"\"\n Tests that we can calculate app coords for driftscan objects\n \"\"\"\n # Now on to the driftscan, which takes in arguments in terms of az and el (and\n # the values we've given below should also be for zenith)\n check_ra, check_dec = uvutils.calc_app_coords(\n 0.0,\n np.pi / 2.0,\n coord_type=\"driftscan\",\n telescope_loc=astrometry_args[\"telescope_loc\"],\n time_array=astrometry_args[\"time_array\"],\n )\n check_coord = SkyCoord(check_ra, check_dec, unit=\"rad\")\n assert np.all(astrometry_args[\"drift_coord\"].separation(check_coord).uarcsec < 1.0)\n\n\ndef test_calc_app_unphased(astrometry_args):\n \"\"\"\n Tests that we can calculate app coords for unphased objects\n \"\"\"\n # Finally, check unphased, which is forced to point toward zenith (unlike driftscan,\n # which is allowed to point at any az/el position)\n check_ra, check_dec = uvutils.calc_app_coords(\n None,\n None,\n coord_type=\"unphased\",\n telescope_loc=astrometry_args[\"telescope_loc\"],\n time_array=astrometry_args[\"time_array\"],\n lst_array=astrometry_args[\"lst_array\"],\n )\n check_coord = SkyCoord(check_ra, check_dec, unit=\"rad\")\n\n assert np.all(astrometry_args[\"drift_coord\"].separation(check_coord).uarcsec < 1.0)\n\n\ndef test_calc_app_fk5_roundtrip(astrometry_args):\n # Do a round-trip with the two top-level functions and make sure they agree to\n # better than 1 µas, first in FK5\n app_ra, app_dec = uvutils.calc_app_coords(\n 0.0,\n 0.0,\n coord_type=\"sidereal\",\n telescope_loc=astrometry_args[\"telescope_loc\"],\n time_array=astrometry_args[\"time_array\"],\n coord_frame=\"fk5\",\n coord_epoch=\"J2000.0\",\n )\n\n check_ra, check_dec = uvutils.calc_sidereal_coords(\n astrometry_args[\"time_array\"],\n app_ra,\n app_dec,\n astrometry_args[\"telescope_loc\"],\n \"fk5\",\n coord_epoch=2000.0,\n )\n check_coord = SkyCoord(check_ra, check_dec, unit=\"rad\")\n assert np.all(SkyCoord(0, 0, unit=\"rad\").separation(check_coord).uarcsec < 1.0)\n\n\ndef test_calc_app_fk4_roundtrip(astrometry_args):\n # Finally, check and make sure that FK4 performs similarly\n app_ra, app_dec = uvutils.calc_app_coords(\n 0.0,\n 0.0,\n coord_type=\"sidereal\",\n telescope_loc=astrometry_args[\"telescope_loc\"],\n time_array=astrometry_args[\"time_array\"],\n coord_frame=\"fk4\",\n coord_epoch=1950.0,\n )\n\n check_ra, check_dec = uvutils.calc_sidereal_coords(\n astrometry_args[\"time_array\"],\n app_ra,\n app_dec,\n astrometry_args[\"telescope_loc\"],\n \"fk4\",\n coord_epoch=1950.0,\n )\n\n check_coord = SkyCoord(check_ra, check_dec, unit=\"rad\")\n assert np.all(SkyCoord(0, 0, unit=\"rad\").separation(check_coord).uarcsec < 1.0)\n\n\[email protected]('ignore:ERFA function \"pmsafe\" yielded 4 of')\[email protected]('ignore:ERFA function \"utcut1\" yielded 2 of')\[email protected]('ignore:ERFA function \"d2dtf\" yielded 1 of')\ndef test_astrometry_icrs_to_app(astrometry_args):\n \"\"\"\n Check for consistency beteen astrometry libraries when converting ICRS -> TOPP\n\n This test checks for consistency in apparent coordinate calculations using the\n three different libraries that are available to pyuvdata, namely: astropy, pyERFA,\n and python-novas. Between these three, we expect agreement within 100 µas in\n most instances, although for pyuvdata we tolerate differences of up to 1 mas since\n we don't expect to need astrometry better than this.\n \"\"\"\n pytest.importorskip(\"novas\")\n pytest.importorskip(\"novas_de405\")\n # Do some basic cross-checking between the different astrometry libraries\n # to see if they all line up correctly.\n astrometry_list = [\"novas\", \"erfa\", \"astropy\"]\n coord_results = [None, None, None, None]\n\n # These values were indepedently calculated using erfa v1.7.2, which at the\n # time of coding agreed to < 1 mas with astropy v4.2.1 and novas 3.1.1.5. We\n # use those values here as a sort of history check to make sure that something\n # hasn't changed in the underlying astrometry libraries without being caught\n precalc_ra = np.array(\n [2.4736400623737507, 2.4736352750862760, 2.4736085367439893, 2.4734781687162820]\n )\n precalc_dec = np.array(\n [1.2329576409345270, 1.2329556410623417, 1.2329541289890513, 1.2328577308430242]\n )\n\n coord_results[3] = (precalc_ra, precalc_dec)\n\n for idx, name in enumerate(astrometry_list):\n coord_results[idx] = uvutils.transform_icrs_to_app(\n astrometry_args[\"time_array\"],\n astrometry_args[\"icrs_ra\"],\n astrometry_args[\"icrs_dec\"],\n astrometry_args[\"telescope_loc\"],\n epoch=astrometry_args[\"epoch\"],\n pm_ra=astrometry_args[\"pm_ra\"],\n pm_dec=astrometry_args[\"pm_dec\"],\n vrad=astrometry_args[\"vrad\"],\n dist=astrometry_args[\"dist\"],\n astrometry_library=name,\n )\n\n for idx in range(len(coord_results) - 1):\n for jdx in range(idx + 1, len(coord_results)):\n alpha_coord = SkyCoord(\n coord_results[idx][0], coord_results[idx][1], unit=\"rad\"\n )\n beta_coord = SkyCoord(\n coord_results[jdx][0], coord_results[jdx][1], unit=\"rad\"\n )\n assert np.all(alpha_coord.separation(beta_coord).marcsec < 1.0)\n\n\ndef test_astrometry_app_to_icrs(astrometry_args):\n \"\"\"\n Check for consistency beteen astrometry libraries when converting TOPO -> ICRS\n\n This test checks for consistency between the pyERFA and astropy libraries for\n converting apparent coords back to ICRS. Between these two, we expect agreement\n within 100 µas in most instances, although for pyuvdata we tolerate differences of\n up to 1 mas since we don't expect to need astrometry better than this.\n \"\"\"\n astrometry_list = [\"erfa\", \"astropy\"]\n coord_results = [None, None, None]\n\n # These values were indepedently calculated using erfa v1.7.2, which at the\n # time of coding agreed to < 1 mas with astropy v4.2.1. We again are using\n # those values here as a sort of history check to make sure that something\n # hasn't changed in the underlying astrometry libraries without being caught\n precalc_ra = np.array(\n [2.4623360300722170, 2.4623407989706756, 2.4623676572008280, 2.4624965192217900]\n )\n precalc_dec = np.array(\n [1.2350407132378372, 1.2350427272595987, 1.2350443204758008, 1.2351412288987034]\n )\n coord_results[2] = (precalc_ra, precalc_dec)\n\n for idx, name in enumerate(astrometry_list):\n # Note we're using icrs_ra and icrs_dec instead of app_ra and app_dec keys\n # because the above pre-calculated values were generated using the ICRS\n # coordinate values\n coord_results[idx] = uvutils.transform_app_to_icrs(\n astrometry_args[\"time_array\"],\n astrometry_args[\"icrs_ra\"],\n astrometry_args[\"icrs_dec\"],\n astrometry_args[\"telescope_loc\"],\n astrometry_library=name,\n )\n\n for idx in range(len(coord_results) - 1):\n for jdx in range(idx + 1, len(coord_results)):\n alpha_coord = SkyCoord(\n coord_results[idx][0], coord_results[idx][1], unit=\"rad\"\n )\n beta_coord = SkyCoord(\n coord_results[jdx][0], coord_results[jdx][1], unit=\"rad\"\n )\n assert np.all(alpha_coord.separation(beta_coord).marcsec < 1.0)\n\n\ndef test_sidereal_reptime(astrometry_args):\n \"\"\"\n Check for equality when supplying a singleton time versus an array of identical\n values for transform_sidereal_coords\n \"\"\"\n\n gcrs_ra, gcrs_dec = uvutils.transform_sidereal_coords(\n astrometry_args[\"icrs_ra\"] * np.ones(2),\n astrometry_args[\"icrs_dec\"] * np.ones(2),\n \"icrs\",\n \"gcrs\",\n time_array=Time(astrometry_args[\"time_array\"][0], format=\"jd\"),\n )\n\n check_ra, check_dec = uvutils.transform_sidereal_coords(\n astrometry_args[\"icrs_ra\"] * np.ones(2),\n astrometry_args[\"icrs_dec\"] * np.ones(2),\n \"icrs\",\n \"gcrs\",\n time_array=Time(astrometry_args[\"time_array\"][0] * np.ones(2), format=\"jd\"),\n )\n\n assert np.all(gcrs_ra == check_ra)\n assert np.all(gcrs_dec == check_dec)\n\n\ndef test_transform_icrs_to_app_time_obj(astrometry_args):\n \"\"\"\n Test that we recover identical values when using a Time objects instead of a floats\n for the various time-related arguments in transform_icrs_to_app.\n \"\"\"\n check_ra, check_dec = uvutils.transform_icrs_to_app(\n Time(astrometry_args[\"time_array\"], format=\"jd\"),\n astrometry_args[\"icrs_ra\"],\n astrometry_args[\"icrs_dec\"],\n astrometry_args[\"telescope_loc\"],\n epoch=Time(astrometry_args[\"epoch\"], format=\"jyear\"),\n )\n\n assert np.all(check_ra == astrometry_args[\"app_ra\"])\n assert np.all(check_dec == astrometry_args[\"app_dec\"])\n\n\ndef test_transform_app_to_icrs_objs(astrometry_args):\n \"\"\"\n Test that we recover identical values when using Time/EarthLocation objects instead\n of floats for time_array and telescope_loc, respectively for transform_app_to_icrs.\n \"\"\"\n telescope_loc = EarthLocation.from_geodetic(\n astrometry_args[\"telescope_loc\"][1] * (180.0 / np.pi),\n astrometry_args[\"telescope_loc\"][0] * (180.0 / np.pi),\n height=astrometry_args[\"telescope_loc\"][2],\n )\n\n icrs_ra, icrs_dec = uvutils.transform_app_to_icrs(\n astrometry_args[\"time_array\"][0],\n astrometry_args[\"app_ra\"][0],\n astrometry_args[\"app_dec\"][0],\n astrometry_args[\"telescope_loc\"],\n )\n\n check_ra, check_dec = uvutils.transform_app_to_icrs(\n Time(astrometry_args[\"time_array\"][0], format=\"jd\"),\n astrometry_args[\"app_ra\"][0],\n astrometry_args[\"app_dec\"][0],\n telescope_loc,\n )\n\n assert np.all(check_ra == icrs_ra)\n assert np.all(check_dec == icrs_dec)\n\n\ndef test_calc_app_coords_objs(astrometry_args):\n \"\"\"\n Test that we recover identical values when using Time/EarthLocation objects instead\n of floats for time_array and telescope_loc, respectively for calc_app_coords.\n \"\"\"\n telescope_loc = EarthLocation.from_geodetic(\n astrometry_args[\"telescope_loc\"][1] * (180.0 / np.pi),\n astrometry_args[\"telescope_loc\"][0] * (180.0 / np.pi),\n height=astrometry_args[\"telescope_loc\"][2],\n )\n\n app_ra, app_dec = uvutils.calc_app_coords(\n astrometry_args[\"icrs_ra\"],\n astrometry_args[\"icrs_dec\"],\n time_array=astrometry_args[\"time_array\"][0],\n telescope_loc=astrometry_args[\"telescope_loc\"],\n )\n\n check_ra, check_dec = uvutils.calc_app_coords(\n astrometry_args[\"icrs_ra\"],\n astrometry_args[\"icrs_dec\"],\n time_array=Time(astrometry_args[\"time_array\"][0], format=\"jd\"),\n telescope_loc=telescope_loc,\n )\n\n assert np.all(check_ra == app_ra)\n assert np.all(check_dec == app_dec)\n\n\ndef test_astrometry_lst(astrometry_args):\n \"\"\"\n Check for consistency beteen astrometry libraries when calculating LAST\n\n This test evaluates consistency in calculating local apparent sidereal time when\n using the different astrometry libraries available in pyuvdata, namely: astropy,\n pyERFA, and python-novas. Between these three, we expect agreement within 6 µs in\n most instances, although for pyuvdata we tolerate differences of up to ~60 µs\n (which translates to 1 mas in sky position error) since we don't expect to need\n astrometry better than this.\n \"\"\"\n pytest.importorskip(\"novas\")\n pytest.importorskip(\"novas_de405\")\n astrometry_list = [\"erfa\", \"astropy\", \"novas\"]\n lst_results = [None, None, None, None]\n # These values were indepedently calculated using erfa v1.7.2, which at the\n # time of coding agreed to < 50 µs with astropy v4.2.1 and novas 3.1.1.5. We\n # use those values here as a sort of history check to make sure that something\n # hasn't changed in the underlying astrometry libraries without being caught\n lst_results[3] = np.array(\n [0.8506741803481069, 2.442973468758589, 4.1728965710160555, 1.0130589895999587]\n )\n\n for idx, name in enumerate(astrometry_list):\n # Note that the units aren't right here (missing a rad-> deg conversion), but\n # the above values were calculated using the arguments below.\n lst_results[idx] = uvutils.get_lst_for_time(\n astrometry_args[\"time_array\"],\n astrometry_args[\"telescope_loc\"][0],\n astrometry_args[\"telescope_loc\"][1],\n astrometry_args[\"telescope_loc\"][2],\n astrometry_library=name,\n )\n\n for idx in range(len(lst_results) - 1):\n for jdx in range(idx + 1, len(lst_results)):\n alpha_time = lst_results[idx] * units.rad\n beta_time = lst_results[jdx] * units.rad\n assert np.all(np.abs(alpha_time - beta_time).to_value(\"mas\") < 1.0)\n\n\ndef test_lst_for_time_float_vs_array(astrometry_args):\n \"\"\"\n Test for equality when passing a single float vs an ndarray (of length 1) when\n calling get_lst_for_time.\n \"\"\"\n lst_array = uvutils.get_lst_for_time(\n np.array(astrometry_args[\"time_array\"][0]),\n astrometry_args[\"telescope_loc\"][0] * (180.0 / np.pi),\n astrometry_args[\"telescope_loc\"][1] * (180.0 / np.pi),\n astrometry_args[\"telescope_loc\"][2],\n )\n\n check_lst = uvutils.get_lst_for_time(\n astrometry_args[\"time_array\"][0],\n astrometry_args[\"telescope_loc\"][0] * (180.0 / np.pi),\n astrometry_args[\"telescope_loc\"][1] * (180.0 / np.pi),\n astrometry_args[\"telescope_loc\"][2],\n )\n\n assert np.all(lst_array == check_lst)\n\n\ndef test_phasing_funcs():\n # these tests are based on a notebook where I tested against the mwa_tools\n # phasing code\n ra_hrs = 12.1\n dec_degs = -42.3\n mjd = 55780.1\n\n array_center_xyz = np.array([-2559454.08, 5095372.14, -2849057.18])\n lat_lon_alt = uvutils.LatLonAlt_from_XYZ(array_center_xyz)\n\n obs_time = Time(mjd, format=\"mjd\", location=(lat_lon_alt[1], lat_lon_alt[0]))\n\n icrs_coord = SkyCoord(\n ra=Angle(ra_hrs, unit=\"hr\"), dec=Angle(dec_degs, unit=\"deg\"), obstime=obs_time\n )\n gcrs_coord = icrs_coord.transform_to(\"gcrs\")\n\n # in east/north/up frame (relative to array center) in meters: (Nants, 3)\n ants_enu = np.array([-101.94, 156.41, 1.24])\n\n ant_xyz_abs = uvutils.ECEF_from_ENU(\n ants_enu, lat_lon_alt[0], lat_lon_alt[1], lat_lon_alt[2]\n )\n\n array_center_coord = SkyCoord(\n x=array_center_xyz[0] * units.m,\n y=array_center_xyz[1] * units.m,\n z=array_center_xyz[2] * units.m,\n frame=\"itrs\",\n obstime=obs_time,\n )\n\n itrs_coord = SkyCoord(\n x=ant_xyz_abs[0] * units.m,\n y=ant_xyz_abs[1] * units.m,\n z=ant_xyz_abs[2] * units.m,\n frame=\"itrs\",\n obstime=obs_time,\n )\n\n gcrs_array_center = array_center_coord.transform_to(\"gcrs\")\n gcrs_from_itrs_coord = itrs_coord.transform_to(\"gcrs\")\n\n gcrs_rel = (\n (gcrs_from_itrs_coord.cartesian - gcrs_array_center.cartesian).get_xyz().T\n )\n\n gcrs_uvw = uvutils.phase_uvw(gcrs_coord.ra.rad, gcrs_coord.dec.rad, gcrs_rel.value)\n\n mwa_tools_calcuvw_u = -97.122828\n mwa_tools_calcuvw_v = 50.388281\n mwa_tools_calcuvw_w = -151.27976\n\n assert np.allclose(gcrs_uvw[0, 0], mwa_tools_calcuvw_u, atol=1e-3)\n assert np.allclose(gcrs_uvw[0, 1], mwa_tools_calcuvw_v, atol=1e-3)\n assert np.allclose(gcrs_uvw[0, 2], mwa_tools_calcuvw_w, atol=1e-3)\n\n # also test unphasing\n temp2 = uvutils.unphase_uvw(\n gcrs_coord.ra.rad, gcrs_coord.dec.rad, np.squeeze(gcrs_uvw)\n )\n assert np.allclose(gcrs_rel.value, temp2)\n\n\ndef test_pol_funcs():\n \"\"\" Test utility functions to convert between polarization strings and numbers \"\"\"\n\n pol_nums = [-8, -7, -6, -5, -4, -3, -2, -1, 1, 2, 3, 4]\n pol_str = [\"yx\", \"xy\", \"yy\", \"xx\", \"lr\", \"rl\", \"ll\", \"rr\", \"pI\", \"pQ\", \"pU\", \"pV\"]\n assert pol_nums == uvutils.polstr2num(pol_str)\n assert pol_str == uvutils.polnum2str(pol_nums)\n # Check individuals\n assert -6 == uvutils.polstr2num(\"YY\")\n assert \"pV\" == uvutils.polnum2str(4)\n # Check errors\n pytest.raises(KeyError, uvutils.polstr2num, \"foo\")\n pytest.raises(ValueError, uvutils.polstr2num, 1)\n pytest.raises(ValueError, uvutils.polnum2str, 7.3)\n # Check parse\n assert uvutils.parse_polstr(\"xX\") == \"xx\"\n assert uvutils.parse_polstr(\"XX\") == \"xx\"\n assert uvutils.parse_polstr(\"i\") == \"pI\"\n\n\ndef test_pol_funcs_x_orientation():\n \"\"\"Test functions to convert between pol strings and numbers with x_orientation.\"\"\"\n\n pol_nums = [-8, -7, -6, -5, -4, -3, -2, -1, 1, 2, 3, 4]\n\n x_orient1 = \"e\"\n pol_str = [\"ne\", \"en\", \"nn\", \"ee\", \"lr\", \"rl\", \"ll\", \"rr\", \"pI\", \"pQ\", \"pU\", \"pV\"]\n assert pol_nums == uvutils.polstr2num(pol_str, x_orientation=x_orient1)\n assert pol_str == uvutils.polnum2str(pol_nums, x_orientation=x_orient1)\n # Check individuals\n assert -6 == uvutils.polstr2num(\"NN\", x_orientation=x_orient1)\n assert \"pV\" == uvutils.polnum2str(4)\n # Check errors\n pytest.raises(KeyError, uvutils.polstr2num, \"foo\", x_orientation=x_orient1)\n pytest.raises(ValueError, uvutils.polstr2num, 1, x_orientation=x_orient1)\n pytest.raises(ValueError, uvutils.polnum2str, 7.3, x_orientation=x_orient1)\n # Check parse\n assert uvutils.parse_polstr(\"eE\", x_orientation=x_orient1) == \"ee\"\n assert uvutils.parse_polstr(\"xx\", x_orientation=x_orient1) == \"ee\"\n assert uvutils.parse_polstr(\"NN\", x_orientation=x_orient1) == \"nn\"\n assert uvutils.parse_polstr(\"yy\", x_orientation=x_orient1) == \"nn\"\n assert uvutils.parse_polstr(\"i\", x_orientation=x_orient1) == \"pI\"\n\n x_orient2 = \"n\"\n pol_str = [\"en\", \"ne\", \"ee\", \"nn\", \"lr\", \"rl\", \"ll\", \"rr\", \"pI\", \"pQ\", \"pU\", \"pV\"]\n assert pol_nums == uvutils.polstr2num(pol_str, x_orientation=x_orient2)\n assert pol_str == uvutils.polnum2str(pol_nums, x_orientation=x_orient2)\n # Check individuals\n assert -6 == uvutils.polstr2num(\"EE\", x_orientation=x_orient2)\n assert \"pV\" == uvutils.polnum2str(4)\n # Check errors\n pytest.raises(KeyError, uvutils.polstr2num, \"foo\", x_orientation=x_orient2)\n pytest.raises(ValueError, uvutils.polstr2num, 1, x_orientation=x_orient2)\n pytest.raises(ValueError, uvutils.polnum2str, 7.3, x_orientation=x_orient2)\n # Check parse\n assert uvutils.parse_polstr(\"nN\", x_orientation=x_orient2) == \"nn\"\n assert uvutils.parse_polstr(\"xx\", x_orientation=x_orient2) == \"nn\"\n assert uvutils.parse_polstr(\"EE\", x_orientation=x_orient2) == \"ee\"\n assert uvutils.parse_polstr(\"yy\", x_orientation=x_orient2) == \"ee\"\n assert uvutils.parse_polstr(\"i\", x_orientation=x_orient2) == \"pI\"\n\n # check warnings for non-recognized x_orientation\n with uvtest.check_warnings(UserWarning, \"x_orientation not recognized\"):\n assert uvutils.polstr2num(\"xx\", x_orientation=\"foo\") == -5\n\n with uvtest.check_warnings(UserWarning, \"x_orientation not recognized\"):\n assert uvutils.polnum2str(-6, x_orientation=\"foo\") == \"yy\"\n\n\ndef test_jones_num_funcs():\n \"\"\"Test functions to convert between jones polarization strings and numbers.\"\"\"\n\n jnums = [-8, -7, -6, -5, -4, -3, -2, -1]\n jstr = [\"Jyx\", \"Jxy\", \"Jyy\", \"Jxx\", \"Jlr\", \"Jrl\", \"Jll\", \"Jrr\"]\n assert jnums == uvutils.jstr2num(jstr)\n assert jstr, uvutils.jnum2str(jnums)\n # Check shorthands\n jstr = [\"yx\", \"xy\", \"yy\", \"y\", \"xx\", \"x\", \"lr\", \"rl\", \"ll\", \"l\", \"rr\", \"r\"]\n jnums = [-8, -7, -6, -6, -5, -5, -4, -3, -2, -2, -1, -1]\n assert jnums == uvutils.jstr2num(jstr)\n # Check individuals\n assert -6 == uvutils.jstr2num(\"jyy\")\n assert \"Jxy\" == uvutils.jnum2str(-7)\n # Check errors\n pytest.raises(KeyError, uvutils.jstr2num, \"foo\")\n pytest.raises(ValueError, uvutils.jstr2num, 1)\n pytest.raises(ValueError, uvutils.jnum2str, 7.3)\n\n # check parse method\n assert uvutils.parse_jpolstr(\"x\") == \"Jxx\"\n assert uvutils.parse_jpolstr(\"xy\") == \"Jxy\"\n assert uvutils.parse_jpolstr(\"XY\") == \"Jxy\"\n\n\ndef test_jones_num_funcs_x_orientation():\n \"\"\"Test functions to convert jones pol strings and numbers with x_orientation.\"\"\"\n\n jnums = [-8, -7, -6, -5, -4, -3, -2, -1]\n x_orient1 = \"east\"\n jstr = [\"Jne\", \"Jen\", \"Jnn\", \"Jee\", \"Jlr\", \"Jrl\", \"Jll\", \"Jrr\"]\n assert jnums == uvutils.jstr2num(jstr, x_orientation=x_orient1)\n assert jstr == uvutils.jnum2str(jnums, x_orientation=x_orient1)\n # Check shorthands\n jstr = [\"ne\", \"en\", \"nn\", \"n\", \"ee\", \"e\", \"lr\", \"rl\", \"ll\", \"l\", \"rr\", \"r\"]\n jnums = [-8, -7, -6, -6, -5, -5, -4, -3, -2, -2, -1, -1]\n assert jnums == uvutils.jstr2num(jstr, x_orientation=x_orient1)\n # Check individuals\n assert -6 == uvutils.jstr2num(\"jnn\", x_orientation=x_orient1)\n assert \"Jen\" == uvutils.jnum2str(-7, x_orientation=x_orient1)\n # Check errors\n pytest.raises(KeyError, uvutils.jstr2num, \"foo\", x_orientation=x_orient1)\n pytest.raises(ValueError, uvutils.jstr2num, 1, x_orientation=x_orient1)\n pytest.raises(ValueError, uvutils.jnum2str, 7.3, x_orientation=x_orient1)\n\n # check parse method\n assert uvutils.parse_jpolstr(\"e\", x_orientation=x_orient1) == \"Jee\"\n assert uvutils.parse_jpolstr(\"x\", x_orientation=x_orient1) == \"Jee\"\n assert uvutils.parse_jpolstr(\"y\", x_orientation=x_orient1) == \"Jnn\"\n assert uvutils.parse_jpolstr(\"en\", x_orientation=x_orient1) == \"Jen\"\n assert uvutils.parse_jpolstr(\"NE\", x_orientation=x_orient1) == \"Jne\"\n\n jnums = [-8, -7, -6, -5, -4, -3, -2, -1]\n x_orient2 = \"north\"\n jstr = [\"Jen\", \"Jne\", \"Jee\", \"Jnn\", \"Jlr\", \"Jrl\", \"Jll\", \"Jrr\"]\n assert jnums == uvutils.jstr2num(jstr, x_orientation=x_orient2)\n assert jstr == uvutils.jnum2str(jnums, x_orientation=x_orient2)\n # Check shorthands\n jstr = [\"en\", \"ne\", \"ee\", \"e\", \"nn\", \"n\", \"lr\", \"rl\", \"ll\", \"l\", \"rr\", \"r\"]\n jnums = [-8, -7, -6, -6, -5, -5, -4, -3, -2, -2, -1, -1]\n assert jnums == uvutils.jstr2num(jstr, x_orientation=x_orient2)\n # Check individuals\n assert -6 == uvutils.jstr2num(\"jee\", x_orientation=x_orient2)\n assert \"Jne\" == uvutils.jnum2str(-7, x_orientation=x_orient2)\n # Check errors\n pytest.raises(KeyError, uvutils.jstr2num, \"foo\", x_orientation=x_orient2)\n pytest.raises(ValueError, uvutils.jstr2num, 1, x_orientation=x_orient2)\n pytest.raises(ValueError, uvutils.jnum2str, 7.3, x_orientation=x_orient2)\n\n # check parse method\n assert uvutils.parse_jpolstr(\"e\", x_orientation=x_orient2) == \"Jee\"\n assert uvutils.parse_jpolstr(\"x\", x_orientation=x_orient2) == \"Jnn\"\n assert uvutils.parse_jpolstr(\"y\", x_orientation=x_orient2) == \"Jee\"\n assert uvutils.parse_jpolstr(\"en\", x_orientation=x_orient2) == \"Jen\"\n assert uvutils.parse_jpolstr(\"NE\", x_orientation=x_orient2) == \"Jne\"\n\n # check warnings for non-recognized x_orientation\n with uvtest.check_warnings(UserWarning, \"x_orientation not recognized\"):\n assert uvutils.jstr2num(\"x\", x_orientation=\"foo\") == -5\n\n with uvtest.check_warnings(UserWarning, \"x_orientation not recognized\"):\n assert uvutils.jnum2str(-6, x_orientation=\"foo\") == \"Jyy\"\n\n\ndef test_conj_pol():\n \"\"\" Test function to conjugate pols \"\"\"\n\n pol_nums = [-8, -7, -6, -5, -4, -3, -2, -1, 1, 2, 3, 4]\n cpol_nums = [-7, -8, -6, -5, -3, -4, -2, -1, 1, 2, 3, 4]\n assert pol_nums == uvutils.conj_pol(cpol_nums)\n assert uvutils.conj_pol(pol_nums) == cpol_nums\n # fmt: off\n pol_str = ['yx', 'xy', 'yy', 'xx', 'ee', 'nn', 'en', 'ne', 'lr', 'rl', 'll',\n 'rr', 'pI', 'pQ', 'pU', 'pV']\n cpol_str = ['xy', 'yx', 'yy', 'xx', 'ee', 'nn', 'ne', 'en', 'rl', 'lr', 'll',\n 'rr', 'pI', 'pQ', 'pU', 'pV']\n # fmt: on\n assert pol_str == uvutils.conj_pol(cpol_str)\n assert uvutils.conj_pol(pol_str) == cpol_str\n assert [pol_str, pol_nums] == uvutils.conj_pol([cpol_str, cpol_nums])\n\n # Test error with jones\n cjstr = [\"Jxy\", \"Jyx\", \"Jyy\", \"Jxx\", \"Jrl\", \"Jlr\", \"Jll\", \"Jrr\"]\n assert pytest.raises(KeyError, uvutils.conj_pol, cjstr)\n\n # Test invalid pol\n with pytest.raises(ValueError) as cm:\n uvutils.conj_pol(2.3)\n assert str(cm.value).startswith(\n \"Polarization not recognized, cannot be conjugated.\"\n )\n\n\ndef test_redundancy_finder():\n \"\"\"\n Check that get_baseline_redundancies and get_antenna_redundancies return consistent\n redundant groups for a test file with the HERA19 layout.\n \"\"\"\n uvd = UVData()\n uvd.read_uvfits(\n os.path.join(DATA_PATH, \"fewant_randsrc_airybeam_Nsrc100_10MHz.uvfits\")\n )\n\n uvd.select(times=uvd.time_array[0])\n uvd.unphase_to_drift(use_ant_pos=True)\n # uvw_array is now equivalent to baseline positions\n uvd.conjugate_bls(convention=\"ant1<ant2\", use_enu=True)\n\n tol = 0.05 # meters\n\n bl_positions = uvd.uvw_array\n bl_pos_backup = copy.deepcopy(uvd.uvw_array)\n\n pytest.raises(\n ValueError,\n uvutils.get_baseline_redundancies,\n uvd.baseline_array,\n bl_positions[0:2, 0:1],\n )\n\n baseline_groups, vec_bin_centers, lens = uvutils.get_baseline_redundancies(\n uvd.baseline_array, bl_positions, tol=tol\n )\n\n for gi, gp in enumerate(baseline_groups):\n for bl in gp:\n bl_ind = np.where(uvd.baseline_array == bl)\n bl_vec = bl_positions[bl_ind]\n assert np.allclose(\n np.sqrt(np.dot(bl_vec, vec_bin_centers[gi])), lens[gi], atol=tol\n )\n\n # Shift the baselines around in a circle. Check that the same baselines are\n # recovered to the corresponding tolerance increase.\n # This moves one baseline at a time by a fixed displacement and checks that\n # the redundant groups are the same.\n\n hightol = 0.25 # meters. Less than the smallest baseline in the file.\n Nbls = uvd.Nbls\n Nshifts = 5\n shift_angs = np.linspace(0, 2 * np.pi, Nshifts)\n base_shifts = np.stack(\n (\n (hightol - tol) * np.cos(shift_angs),\n (hightol - tol) * np.sin(shift_angs),\n np.zeros(Nshifts),\n )\n ).T\n for sh in base_shifts:\n for bi in range(Nbls):\n # Shift one baseline at a time.\n bl_positions_new = uvd.uvw_array\n bl_positions_new[bi] += sh\n\n (\n baseline_groups_new,\n vec_bin_centers,\n lens,\n ) = uvutils.get_baseline_redundancies(\n uvd.baseline_array, bl_positions_new, tol=hightol\n )\n\n for gi, gp in enumerate(baseline_groups_new):\n for bl in gp:\n bl_ind = np.where(uvd.baseline_array == bl)\n bl_vec = bl_positions[bl_ind]\n assert np.allclose(\n np.sqrt(np.abs(np.dot(bl_vec, vec_bin_centers[gi]))),\n lens[gi],\n atol=hightol,\n )\n\n # Compare baseline groups:\n a = [tuple(el) for el in baseline_groups]\n b = [tuple(el) for el in baseline_groups_new]\n assert set(a) == set(b)\n\n tol = 0.05\n\n antpos, antnums = uvd.get_ENU_antpos()\n\n baseline_groups_ants, vec_bin_centers, lens = uvutils.get_antenna_redundancies(\n antnums, antpos, tol=tol, include_autos=False\n )\n # Under these conditions, should see 19 redundant groups in the file.\n assert len(baseline_groups_ants) == 19\n\n # Check with conjugated baseline redundancies returned\n # Ensure at least one baseline has u==0 and v!=0 (for coverage of this case)\n bl_positions[16, 0] = 0\n (\n baseline_groups,\n vec_bin_centers,\n lens,\n conjugates,\n ) = uvutils.get_baseline_redundancies(\n uvd.baseline_array, bl_positions, tol=tol, with_conjugates=True\n )\n\n # restore baseline (16,0) and repeat to get correct groups\n bl_positions = bl_pos_backup\n (\n baseline_groups,\n vec_bin_centers,\n lens,\n conjugates,\n ) = uvutils.get_baseline_redundancies(\n uvd.baseline_array, bl_positions, tol=tol, with_conjugates=True\n )\n\n # Apply flips to compare with get_antenna_redundancies().\n bl_gps_unconj = copy.deepcopy(baseline_groups)\n for gi, gp in enumerate(bl_gps_unconj):\n for bi, bl in enumerate(gp):\n if bl in conjugates:\n bl_gps_unconj[gi][bi] = uvutils.baseline_index_flip(bl, len(antnums))\n bl_gps_unconj = [sorted(bgp) for bgp in bl_gps_unconj]\n bl_gps_ants = [sorted(bgp) for bgp in baseline_groups_ants]\n assert np.all(sorted(bl_gps_ants) == sorted(bl_gps_unconj))\n for gi, gp in enumerate(baseline_groups):\n for bl in gp:\n bl_ind = np.where(uvd.baseline_array == bl)\n bl_vec = bl_positions[bl_ind]\n if bl in conjugates:\n bl_vec *= -1\n assert np.isclose(\n np.sqrt(np.dot(bl_vec, vec_bin_centers[gi])), lens[gi], atol=tol\n )\n\n\ndef test_high_tolerance_redundancy_error():\n \"\"\"\n Confirm that an error is raised if the redundancy tolerance is set too high,\n such that baselines end up in multiple\n \"\"\"\n uvd = UVData()\n uvd.read_uvfits(\n os.path.join(DATA_PATH, \"fewant_randsrc_airybeam_Nsrc100_10MHz.uvfits\")\n )\n\n uvd.select(times=uvd.time_array[0])\n uvd.unphase_to_drift(use_ant_pos=True)\n # uvw_array is now equivalent to baseline positions\n uvd.conjugate_bls(convention=\"ant1<ant2\", use_enu=True)\n bl_positions = uvd.uvw_array\n\n tol = 20.05 # meters\n\n with pytest.raises(ValueError) as cm:\n (\n baseline_groups,\n vec_bin_centers,\n lens,\n conjugates,\n ) = uvutils.get_baseline_redundancies(\n uvd.baseline_array, bl_positions, tol=tol, with_conjugates=True\n )\n assert \"Some baselines are falling into\" in str(cm.value)\n\n\ndef test_redundancy_conjugates():\n \"\"\"\n Check that redundancy finding with conjugation works.\n\n Check that the correct baselines are flipped.\n \"\"\"\n Nants = 10\n tol = 0.5\n ant1_arr = np.tile(np.arange(Nants), Nants)\n ant2_arr = np.repeat(np.arange(Nants), Nants)\n Nbls = ant1_arr.size\n bl_inds = uvutils.antnums_to_baseline(ant1_arr, ant2_arr, Nants)\n\n maxbl = 100.0\n bl_vecs = np.random.uniform(-maxbl, maxbl, (Nbls, 3))\n bl_vecs[0, 0] = 0\n bl_vecs[1, 0:2] = 0\n\n expected_conjugates = []\n for i, (u, v, w) in enumerate(bl_vecs):\n uneg = u < -tol\n uzer = np.isclose(u, 0.0, atol=tol)\n vneg = v < -tol\n vzer = np.isclose(v, 0.0, atol=tol)\n wneg = w < -tol\n if uneg or (uzer and vneg) or (uzer and vzer and wneg):\n expected_conjugates.append(bl_inds[i])\n bl_gps, vecs, lens, conjugates = uvutils.get_baseline_redundancies(\n bl_inds, bl_vecs, tol=tol, with_conjugates=True\n )\n\n assert sorted(conjugates) == sorted(expected_conjugates)\n\n\ndef test_redundancy_finder_fully_redundant_array():\n \"\"\"Test the redundancy finder for a fully redundant array.\"\"\"\n uvd = UVData()\n uvd.read_uvfits(os.path.join(DATA_PATH, \"test_redundant_array.uvfits\"))\n uvd.select(times=uvd.time_array[0])\n\n tol = 1 # meters\n bl_positions = uvd.uvw_array\n\n (\n baseline_groups,\n vec_bin_centers,\n lens,\n conjugates,\n ) = uvutils.get_baseline_redundancies(\n uvd.baseline_array, bl_positions, tol=tol, with_conjugates=True\n )\n\n # Only 1 set of redundant baselines\n assert len(baseline_groups) == 1\n # Should return the input baselines\n assert baseline_groups[0].sort() == np.unique(uvd.baseline_array).sort()\n\n\[email protected](\"n_blocks\", [1, 10])\ndef test_adjacency_lists(n_blocks):\n \"\"\"Test the adjacency list method in utils.\"\"\"\n # n_blocks: in _adj_list, loop over chunks of vectors when computing distances.\n\n # Make a grid.\n Nx = 5\n Lmax = 50\n\n xbase = np.linspace(0, Lmax, Nx)\n x, y, z = map(np.ndarray.flatten, np.meshgrid(xbase, xbase, xbase))\n\n # Make more vectors by shifting by Lmax/Nx/3 in x, y, and z:\n dx = (Lmax / Nx) / 3 # One third of cell size.\n x = np.append(x, x + dx)\n y = np.append(y, y + dx)\n z = np.append(z, z + dx)\n\n # Construct vectors\n vecs = np.vstack((x, y, z)).T\n Npts = x.size\n\n # Reorder randomly.\n np.random.shuffle(vecs)\n\n # Tolerance = half of cell diagonal.\n tol = Lmax / Nx * np.sqrt(2) / 2\n\n adj = uvutils._adj_list(vecs, tol, n_blocks=n_blocks)\n\n # Confirm that each adjacency set contains all of the vectors that\n # are within the tolerance distance.\n for vi in range(Npts):\n for vj in range(Npts):\n dist = np.linalg.norm(vecs[vi] - vecs[vj])\n if dist < tol:\n assert vj in adj[vi]\n assert vi in adj[vj]\n else:\n assert vj not in adj[vi]\n assert vi not in adj[vj]\n\n # The way the grid is set up, every clique should have two elements.\n assert all(len(vi) == 2 for vi in adj)\n\n\ndef test_strict_cliques():\n # Adjacency lists comprising only isolated cliques.\n adj_isol = [\n {0, 1, 2},\n {1, 0, 2},\n {2, 0, 1},\n {3},\n {4},\n {5, 6, 7, 8},\n {5, 6, 7, 8},\n {5, 6, 7, 8},\n {5, 6, 7, 8},\n ]\n adj_isol = [frozenset(st) for st in adj_isol]\n exp_cliques = [[0, 1, 2], [3], [4], [5, 6, 7, 8]]\n\n res = uvutils._find_cliques(adj_isol, strict=True)\n assert res == exp_cliques\n\n # Error if two cliques are not isolated\n adj_link = adj_isol\n adj_link[-1] = frozenset({5, 6, 7, 8, 1})\n\n with pytest.raises(ValueError, match=\"Non-isolated cliques found in graph.\"):\n uvutils._find_cliques(adj_link, strict=True),\n\n\ndef test_reorder_conj_pols_non_list():\n pytest.raises(ValueError, uvutils.reorder_conj_pols, 4)\n\n\ndef test_reorder_conj_pols_strings():\n pols = [\"xx\", \"xy\", \"yx\"]\n corder = uvutils.reorder_conj_pols(pols)\n assert np.array_equal(corder, [0, 2, 1])\n\n\ndef test_reorder_conj_pols_ints():\n pols = [-5, -7, -8] # 'xx', 'xy', 'yx'\n corder = uvutils.reorder_conj_pols(pols)\n assert np.array_equal(corder, [0, 2, 1])\n\n\ndef test_reorder_conj_pols_missing_conj():\n pols = [\"xx\", \"xy\"] # Missing 'yx'\n pytest.raises(ValueError, uvutils.reorder_conj_pols, pols)\n\n\ndef test_collapse_mean_no_return_no_weights():\n # Fake data\n data = np.zeros((50, 25))\n for i in range(data.shape[1]):\n data[:, i] = i * np.ones_like(data[:, i])\n out = uvutils.collapse(data, \"mean\", axis=0)\n out1 = uvutils.mean_collapse(data, axis=0)\n # Actual values are tested in test_mean_no_weights\n assert np.array_equal(out, out1)\n\n\ndef test_collapse_mean_returned_no_weights():\n # Fake data\n data = np.zeros((50, 25))\n for i in range(data.shape[1]):\n data[:, i] = i * np.ones_like(data[:, i])\n out, wo = uvutils.collapse(data, \"mean\", axis=0, return_weights=True)\n out1, wo1 = uvutils.mean_collapse(data, axis=0, return_weights=True)\n # Actual values are tested in test_mean_no_weights\n assert np.array_equal(out, out1)\n assert np.array_equal(wo, wo1)\n\n\ndef test_collapse_mean_returned_with_weights():\n # Fake data\n data = np.zeros((50, 25))\n for i in range(data.shape[1]):\n data[:, i] = i * np.ones_like(data[:, i]) + 1\n w = 1.0 / data\n out, wo = uvutils.collapse(data, \"mean\", weights=w, axis=0, return_weights=True)\n out1, wo1 = uvutils.mean_collapse(data, weights=w, axis=0, return_weights=True)\n # Actual values are tested in test_mean_weights\n assert np.array_equal(out, out1)\n assert np.array_equal(wo, wo1)\n\n\ndef test_collapse_mean_returned_with_weights_and_weights_square():\n # Fake data\n data = np.zeros((50, 25))\n for i in range(data.shape[1]):\n data[:, i] = i * np.ones_like(data[:, i]) + 1\n w = 1.0 / data\n out, wo, wso = uvutils.collapse(\n data, \"mean\", weights=w, axis=0, return_weights=True, return_weights_square=True\n )\n out1, wo1, wso1 = uvutils.mean_collapse(\n data, weights=w, axis=0, return_weights=True, return_weights_square=True\n )\n # Actual values are tested in test_mean_weights\n assert np.array_equal(out, out1)\n assert np.array_equal(wo, wo1)\n assert np.array_equal(wso, wso1)\n\n\ndef test_collapse_mean_returned_with_weights_square_no_return_weights():\n # Fake data\n data = np.zeros((50, 25))\n for i in range(data.shape[1]):\n data[:, i] = i * np.ones_like(data[:, i]) + 1\n w = 1.0 / data\n out, wso = uvutils.collapse(\n data,\n \"mean\",\n weights=w,\n axis=0,\n return_weights=False,\n return_weights_square=True,\n )\n out1, wso1 = uvutils.mean_collapse(\n data, weights=w, axis=0, return_weights=False, return_weights_square=True\n )\n # Actual values are tested in test_mean_weights\n assert np.array_equal(out, out1)\n assert np.array_equal(wso, wso1)\n\n\ndef test_collapse_absmean_no_return_no_weights():\n # Fake data\n data = np.zeros((50, 25))\n for i in range(data.shape[1]):\n data[:, i] = (-1) ** i * np.ones_like(data[:, i])\n out = uvutils.collapse(data, \"absmean\", axis=0)\n out1 = uvutils.absmean_collapse(data, axis=0)\n # Actual values are tested in test_absmean_no_weights\n assert np.array_equal(out, out1)\n\n\ndef test_collapse_quadmean_no_return_no_weights():\n # Fake data\n data = np.zeros((50, 25))\n for i in range(data.shape[1]):\n data[:, i] = i * np.ones_like(data[:, i])\n out = uvutils.collapse(data, \"quadmean\", axis=0)\n out1 = uvutils.quadmean_collapse(data, axis=0)\n # Actual values are tested elsewhere?\n assert np.array_equal(out, out1)\n\n\ndef test_collapse_quadmean_returned_with_weights_and_weights_square():\n # Fake data\n data = np.zeros((50, 25))\n for i in range(data.shape[1]):\n data[:, i] = i * np.ones_like(data[:, i]) + 1\n w = 1.0 / data\n out, wo, wso = uvutils.collapse(\n data,\n \"quadmean\",\n weights=w,\n axis=0,\n return_weights=True,\n return_weights_square=True,\n )\n out1, wo1, wso1 = uvutils.quadmean_collapse(\n data, weights=w, axis=0, return_weights=True, return_weights_square=True\n )\n # Actual values are tested elsewhere?\n assert np.array_equal(out, out1)\n assert np.array_equal(wo, wo1)\n assert np.array_equal(wso, wso1)\n\n\ndef test_collapse_quadmean_returned_with_weights_square_no_return_weights():\n # Fake data\n data = np.zeros((50, 25))\n for i in range(data.shape[1]):\n data[:, i] = i * np.ones_like(data[:, i]) + 1\n w = 1.0 / data\n out, wso = uvutils.collapse(\n data,\n \"quadmean\",\n weights=w,\n axis=0,\n return_weights=False,\n return_weights_square=True,\n )\n out1, wso1 = uvutils.quadmean_collapse(\n data, weights=w, axis=0, return_weights=False, return_weights_square=True\n )\n # Actual values are tested elsewhere?\n assert np.array_equal(out, out1)\n assert np.array_equal(wso, wso1)\n\n\ndef test_collapse_quadmean_returned_without_weights_square_with_return_weights():\n # Fake data\n data = np.zeros((50, 25))\n for i in range(data.shape[1]):\n data[:, i] = i * np.ones_like(data[:, i]) + 1\n w = 1.0 / data\n out, wo = uvutils.collapse(\n data,\n \"quadmean\",\n weights=w,\n axis=0,\n return_weights=True,\n return_weights_square=False,\n )\n out1, wo1 = uvutils.quadmean_collapse(\n data, weights=w, axis=0, return_weights=True, return_weights_square=False\n )\n # Actual values are tested elsewhere?\n assert np.array_equal(out, out1)\n assert np.array_equal(wo, wo1)\n\n\ndef test_collapse_quadmean_returned_with_weights_square_without_weights():\n # Fake data\n data = np.zeros((50, 25))\n for i in range(data.shape[1]):\n data[:, i] = i * np.ones_like(data[:, i]) + 1\n w = 1.0 / data\n out, wo = uvutils.collapse(\n data,\n \"quadmean\",\n weights=w,\n axis=0,\n return_weights=False,\n return_weights_square=True,\n )\n out1, wo1 = uvutils.quadmean_collapse(\n data, weights=w, axis=0, return_weights=False, return_weights_square=True\n )\n # Actual values are tested elsewhere?\n assert np.array_equal(out, out1)\n assert np.array_equal(wo, wo1)\n\n\ndef test_collapse_or_no_return_no_weights():\n # Fake data\n data = np.zeros((50, 25), np.bool_)\n data[0, 8] = True\n o = uvutils.collapse(data, \"or\", axis=0)\n o1 = uvutils.or_collapse(data, axis=0)\n assert np.array_equal(o, o1)\n\n\ndef test_collapse_and_no_return_no_weights():\n # Fake data\n data = np.zeros((50, 25), np.bool_)\n data[0, :] = True\n o = uvutils.collapse(data, \"and\", axis=0)\n o1 = uvutils.and_collapse(data, axis=0)\n assert np.array_equal(o, o1)\n\n\ndef test_collapse_error():\n pytest.raises(ValueError, uvutils.collapse, np.ones((2, 3)), \"fooboo\")\n\n\ndef test_mean_no_weights():\n # Fake data\n data = np.zeros((50, 25))\n for i in range(data.shape[1]):\n data[:, i] = i * np.ones_like(data[:, i])\n out, wo = uvutils.mean_collapse(data, axis=0, return_weights=True)\n assert np.array_equal(out, np.arange(data.shape[1]))\n assert np.array_equal(wo, data.shape[0] * np.ones(data.shape[1]))\n out, wo = uvutils.mean_collapse(data, axis=1, return_weights=True)\n assert np.all(out == np.mean(np.arange(data.shape[1])))\n assert len(out) == data.shape[0]\n assert np.array_equal(wo, data.shape[1] * np.ones(data.shape[0]))\n out, wo = uvutils.mean_collapse(data, return_weights=True)\n assert out == np.mean(np.arange(data.shape[1]))\n assert wo == data.size\n out = uvutils.mean_collapse(data)\n assert out == np.mean(np.arange(data.shape[1]))\n\n\ndef test_mean_weights_and_weights_square():\n # Fake data\n data = np.zeros((50, 25))\n for i in range(data.shape[1]):\n data[:, i] = i * np.ones_like(data[:, i]) + 1\n w = 1.0 / data\n out, wo, wso = uvutils.mean_collapse(\n data, weights=w, axis=0, return_weights=True, return_weights_square=True\n )\n assert np.allclose(out * wo, data.shape[0])\n assert np.allclose(wo, float(data.shape[0]) / (np.arange(data.shape[1]) + 1))\n assert np.allclose(wso, float(data.shape[0]) / (np.arange(data.shape[1]) + 1) ** 2)\n out, wo, wso = uvutils.mean_collapse(\n data, weights=w, axis=1, return_weights=True, return_weights_square=True\n )\n assert np.allclose(out * wo, data.shape[1])\n assert np.allclose(wo, np.sum(1.0 / (np.arange(data.shape[1]) + 1)))\n assert np.allclose(wso, np.sum(1.0 / (np.arange(data.shape[1]) + 1) ** 2))\n\n # Zero weights\n w = np.ones_like(w)\n w[0, :] = 0\n w[:, 0] = 0\n out, wo = uvutils.mean_collapse(data, weights=w, axis=0, return_weights=True)\n ans = np.arange(data.shape[1]).astype(np.float64) + 1\n ans[0] = np.inf\n assert np.array_equal(out, ans)\n ans = (data.shape[0] - 1) * np.ones(data.shape[1])\n ans[0] = 0\n assert np.all(wo == ans)\n out, wo = uvutils.mean_collapse(data, weights=w, axis=1, return_weights=True)\n ans = np.mean(np.arange(data.shape[1])[1:] + 1) * np.ones(data.shape[0])\n ans[0] = np.inf\n assert np.all(out == ans)\n ans = (data.shape[1] - 1) * np.ones(data.shape[0])\n ans[0] = 0\n assert np.all(wo == ans)\n\n\ndef test_mean_infs():\n # Fake data\n data = np.zeros((50, 25))\n for i in range(data.shape[1]):\n data[:, i] = i * np.ones_like(data[:, i])\n data[:, 0] = np.inf\n data[0, :] = np.inf\n out, wo = uvutils.mean_collapse(data, axis=0, return_weights=True)\n ans = np.arange(data.shape[1]).astype(np.float64)\n ans[0] = np.inf\n assert np.array_equal(out, ans)\n ans = (data.shape[0] - 1) * np.ones(data.shape[1])\n ans[0] = 0\n assert np.all(wo == ans)\n out, wo = uvutils.mean_collapse(data, axis=1, return_weights=True)\n ans = np.mean(np.arange(data.shape[1])[1:]) * np.ones(data.shape[0])\n ans[0] = np.inf\n assert np.all(out == ans)\n ans = (data.shape[1] - 1) * np.ones(data.shape[0])\n ans[0] = 0\n assert np.all(wo == ans)\n\n\ndef test_absmean():\n # Fake data\n data1 = np.zeros((50, 25))\n for i in range(data1.shape[1]):\n data1[:, i] = (-1) ** i * np.ones_like(data1[:, i])\n data2 = np.ones_like(data1)\n out1 = uvutils.absmean_collapse(data1)\n out2 = uvutils.absmean_collapse(data2)\n assert out1 == out2\n\n\ndef test_quadmean():\n # Fake data\n data = np.zeros((50, 25))\n for i in range(data.shape[1]):\n data[:, i] = i * np.ones_like(data[:, i])\n o1, w1 = uvutils.quadmean_collapse(data, return_weights=True)\n o2, w2 = uvutils.mean_collapse(np.abs(data) ** 2, return_weights=True)\n o3 = uvutils.quadmean_collapse(data) # without return_weights\n o2 = np.sqrt(o2)\n assert o1 == o2\n assert w1 == w2\n assert o1 == o3\n\n\ndef test_or_collapse():\n # Fake data\n data = np.zeros((50, 25), np.bool_)\n data[0, 8] = True\n o = uvutils.or_collapse(data, axis=0)\n ans = np.zeros(25, np.bool_)\n ans[8] = True\n assert np.array_equal(o, ans)\n o = uvutils.or_collapse(data, axis=1)\n ans = np.zeros(50, np.bool_)\n ans[0] = True\n assert np.array_equal(o, ans)\n o = uvutils.or_collapse(data)\n assert o\n\n\ndef test_or_collapse_weights():\n # Fake data\n data = np.zeros((50, 25), np.bool_)\n data[0, 8] = True\n w = np.ones_like(data, np.float64)\n o, wo = uvutils.or_collapse(data, axis=0, weights=w, return_weights=True)\n ans = np.zeros(25, np.bool_)\n ans[8] = True\n assert np.array_equal(o, ans)\n assert np.array_equal(wo, np.ones_like(o, dtype=np.float64))\n w[0, 8] = 0.3\n with uvtest.check_warnings(UserWarning, \"Currently weights are\"):\n o = uvutils.or_collapse(data, axis=0, weights=w)\n assert np.array_equal(o, ans)\n\n\ndef test_or_collapse_errors():\n data = np.zeros(5)\n pytest.raises(ValueError, uvutils.or_collapse, data)\n\n\ndef test_and_collapse():\n # Fake data\n data = np.zeros((50, 25), np.bool_)\n data[0, :] = True\n o = uvutils.and_collapse(data, axis=0)\n ans = np.zeros(25, np.bool_)\n assert np.array_equal(o, ans)\n o = uvutils.and_collapse(data, axis=1)\n ans = np.zeros(50, np.bool_)\n ans[0] = True\n assert np.array_equal(o, ans)\n o = uvutils.and_collapse(data)\n assert not o\n\n\ndef test_and_collapse_weights():\n # Fake data\n data = np.zeros((50, 25), np.bool_)\n data[0, :] = True\n w = np.ones_like(data, np.float64)\n o, wo = uvutils.and_collapse(data, axis=0, weights=w, return_weights=True)\n ans = np.zeros(25, np.bool_)\n assert np.array_equal(o, ans)\n assert np.array_equal(wo, np.ones_like(o, dtype=np.float64))\n w[0, 8] = 0.3\n with uvtest.check_warnings(UserWarning, \"Currently weights are\"):\n o = uvutils.and_collapse(data, axis=0, weights=w)\n assert np.array_equal(o, ans)\n\n\ndef test_and_collapse_errors():\n data = np.zeros(5)\n pytest.raises(ValueError, uvutils.and_collapse, data)\n\n\[email protected](\"ignore:The uvw_array does not match the expected values\")\ndef test_uvcalibrate_apply_gains_oldfiles():\n # read data\n uvd = UVData()\n uvd.read(os.path.join(DATA_PATH, \"zen.2457698.40355.xx.HH.uvcAA.uvh5\"))\n # give it an x_orientation\n uvd.x_orientation = \"east\"\n uvc = UVCal()\n uvc.read_calfits(os.path.join(DATA_PATH, \"zen.2457698.40355.xx.gain.calfits\"))\n # downselect to match each other in shape (but not in actual values!)\n uvd.select(frequencies=uvd.freq_array[0, :10])\n uvc.select(times=uvc.time_array[:3])\n\n with pytest.raises(\n ValueError,\n match=re.escape(\n \"All antenna names with data on UVData are missing \"\n \"on UVCal. To continue with calibration \"\n \"(and flag all the data), set ant_check=False.\"\n ),\n ):\n uvutils.uvcalibrate(uvd, uvc, prop_flags=True, ant_check=True, inplace=False)\n\n ants_expected = [\n \"All antenna names with data on UVData are missing \"\n \"on UVCal. Since ant_check is False, calibration will \"\n \"proceed but all data will be flagged.\",\n ]\n missing_times = [2457698.4036761867, 2457698.4038004624]\n\n time_expected = f\"Time {missing_times[0]} exists on UVData but not on UVCal.\"\n\n freq_expected = (\n f\"Frequency {uvd.freq_array[0, 0]} exists on UVData but not on UVCal.\"\n )\n\n with uvtest.check_warnings(UserWarning, match=ants_expected):\n with pytest.raises(ValueError, match=time_expected):\n uvutils.uvcalibrate(\n uvd, uvc, prop_flags=True, ant_check=False, inplace=False\n )\n\n uvc.select(times=uvc.time_array[0])\n\n with uvtest.check_warnings(UserWarning, match=ants_expected):\n with pytest.raises(ValueError, match=freq_expected):\n uvutils.uvcalibrate(\n uvd, uvc, prop_flags=True, ant_check=False, time_check=False,\n )\n\n\[email protected](\"ignore:The uvw_array does not match the expected values\")\ndef test_uvcalibrate_delay_oldfiles():\n uvd = UVData()\n uvd.read(os.path.join(DATA_PATH, \"zen.2457698.40355.xx.HH.uvcAA.uvh5\"))\n\n uvc = UVCal()\n uvc.read_calfits(os.path.join(DATA_PATH, \"zen.2457698.40355.xx.delay.calfits\"))\n # downselect to match\n uvc.select(times=uvc.time_array[3], frequencies=uvd.freq_array[0, :])\n uvc.gain_convention = \"multiply\"\n ant_expected = [\n \"All antenna names with data on UVData are missing \"\n \"on UVCal. Since ant_check is False, calibration will \"\n \"proceed but all data will be flagged.\"\n ]\n with uvtest.check_warnings(UserWarning, match=ant_expected):\n uvdcal = uvutils.uvcalibrate(\n uvd, uvc, prop_flags=False, ant_check=False, time_check=False, inplace=False\n )\n\n uvc.convert_to_gain()\n with uvtest.check_warnings(UserWarning, match=ant_expected):\n uvdcal2 = uvutils.uvcalibrate(\n uvd, uvc, prop_flags=False, ant_check=False, time_check=False, inplace=False\n )\n\n assert uvdcal == uvdcal2\n\n\[email protected](\"future_shapes\", [True, False])\[email protected](\"flip_gain_conj\", [False, True])\[email protected](\"gain_convention\", [\"divide\", \"multiply\"])\ndef test_uvcalibrate(uvcalibrate_data, future_shapes, flip_gain_conj, gain_convention):\n uvd, uvc = uvcalibrate_data\n\n if future_shapes:\n uvd.use_future_array_shapes()\n\n uvc.gain_convention = gain_convention\n\n if gain_convention == \"divide\":\n assert uvc.gain_scale is None\n else:\n # set the gain_scale to \"Jy\" to test that vis units are set properly\n uvc.gain_scale = \"Jy\"\n\n uvdcal = uvutils.uvcalibrate(uvd, uvc, inplace=False, flip_gain_conj=flip_gain_conj)\n if gain_convention == \"divide\":\n assert uvdcal.vis_units == \"uncalib\"\n else:\n assert uvdcal.vis_units == \"Jy\"\n\n key = (1, 13, \"xx\")\n ant1 = (1, \"Jxx\")\n ant2 = (13, \"Jxx\")\n\n if flip_gain_conj:\n gain_product = (uvc.get_gains(ant1).conj() * uvc.get_gains(ant2)).T\n else:\n gain_product = (uvc.get_gains(ant1) * uvc.get_gains(ant2).conj()).T\n\n if gain_convention == \"divide\":\n np.testing.assert_array_almost_equal(\n uvdcal.get_data(key), uvd.get_data(key) / gain_product,\n )\n else:\n np.testing.assert_array_almost_equal(\n uvdcal.get_data(key), uvd.get_data(key) * gain_product,\n )\n\n # test undo\n uvdcal = uvutils.uvcalibrate(\n uvdcal,\n uvc,\n prop_flags=True,\n ant_check=False,\n inplace=False,\n undo=True,\n flip_gain_conj=flip_gain_conj,\n )\n\n np.testing.assert_array_almost_equal(uvd.get_data(key), uvdcal.get_data(key))\n assert uvdcal.vis_units == \"uncalib\"\n\n\[email protected](\"ignore:Combined frequencies are not contiguous.\")\ndef test_uvcalibrate_dterm_handling(uvcalibrate_data):\n uvd, uvc = uvcalibrate_data\n\n # test d-term exception\n with pytest.raises(\n ValueError, match=\"Cannot apply D-term calibration without -7 or -8\"\n ):\n uvutils.uvcalibrate(uvd, uvc, Dterm_cal=True)\n\n # d-term not implemented error\n uvcDterm = copy.deepcopy(uvc)\n uvcDterm.jones_array = np.array([-7, -8])\n uvcDterm = uvc + uvcDterm\n with pytest.raises(\n NotImplementedError, match=\"D-term calibration is not yet implemented.\"\n ):\n uvutils.uvcalibrate(uvd, uvcDterm, Dterm_cal=True)\n\n\[email protected](\"ignore:Cannot preserve total_quality_array\")\[email protected](\"future_shapes\", [True, False])\ndef test_uvcalibrate_flag_propagation(uvcalibrate_data, future_shapes):\n uvd, uvc = uvcalibrate_data\n\n if future_shapes:\n uvd.use_future_array_shapes()\n\n # test flag propagation\n uvc.flag_array[0] = True\n uvc.gain_array[1] = 0.0\n uvdcal = uvutils.uvcalibrate(\n uvd, uvc, prop_flags=True, ant_check=False, inplace=False\n )\n\n assert np.all(uvdcal.get_flags(1, 13, \"xx\")) # assert completely flagged\n assert np.all(uvdcal.get_flags(0, 12, \"xx\")) # assert completely flagged\n np.testing.assert_array_almost_equal(\n uvd.get_data(1, 13, \"xx\"), uvdcal.get_data(1, 13, \"xx\")\n )\n np.testing.assert_array_almost_equal(\n uvd.get_data(0, 12, \"xx\"), uvdcal.get_data(0, 12, \"xx\")\n )\n\n uvd_ant_dict = dict(zip(uvd.antenna_numbers, uvd.antenna_names))\n uvc_ant_dict = dict(zip(uvc.antenna_numbers, uvc.antenna_names))\n\n for key in uvc_ant_dict.keys():\n if key in uvd_ant_dict.keys():\n uvc_ant_dict[key] = str(uvd_ant_dict[key])\n else:\n uvc_ant_dict[key] = str(uvd_ant_dict[key])\n\n uvc.antenna_names = np.array(\n [uvc_ant_dict[ant_number] for ant_number in uvc.antenna_numbers]\n )\n\n uvc_sub = uvc.select(antenna_nums=[1, 12], inplace=False)\n\n uvdata_unique_nums = np.unique(np.append(uvd.ant_1_array, uvd.ant_2_array))\n uvd.antenna_names = np.array(uvd.antenna_names)\n missing_ants = uvdata_unique_nums.tolist()\n missing_ants.remove(1)\n missing_ants.remove(12)\n missing_ant_names = [\n uvd.antenna_names[np.where(uvd.antenna_numbers == antnum)[0][0]]\n for antnum in missing_ants\n ]\n\n exp_err = (\n f\"Antennas {missing_ant_names} have data on UVData but \"\n \"are missing on UVCal. To continue calibration and \"\n \"flag the data from missing antennas, set ant_check=False.\"\n )\n\n with pytest.raises(ValueError) as errinfo:\n uvdcal = uvutils.uvcalibrate(\n uvd, uvc_sub, prop_flags=True, ant_check=True, inplace=False\n )\n\n assert exp_err == str(errinfo.value)\n\n with pytest.warns(UserWarning) as warninfo:\n uvdcal = uvutils.uvcalibrate(\n uvd, uvc_sub, prop_flags=True, ant_check=False, inplace=False\n )\n warns = {warn.message.args[0] for warn in warninfo}\n ant_expected = {\n f\"Antennas {missing_ant_names} have data on UVData but are missing \"\n \"on UVCal. Since ant_check is False, calibration will \"\n \"proceed and the data for these antennas will be flagged.\"\n }\n\n assert warns == ant_expected\n assert np.all(uvdcal.get_flags(13, 24, \"xx\")) # assert completely flagged\n\n\[email protected](\"ignore:Cannot preserve total_quality_array\")\ndef test_uvcalibrate_flag_propagation_name_mismatch(uvcalibrate_init_data):\n uvd, uvc = uvcalibrate_init_data\n\n # test flag propagation\n uvc.flag_array[0] = True\n uvc.gain_array[1] = 0.0\n with pytest.raises(\n ValueError,\n match=re.escape(\n \"All antenna names with data on UVData are missing \"\n \"on UVCal. To continue with calibration \"\n \"(and flag all the data), set ant_check=False.\"\n ),\n ):\n uvdcal = uvutils.uvcalibrate(\n uvd, uvc, prop_flags=True, ant_check=True, inplace=False\n )\n\n with uvtest.check_warnings(\n UserWarning,\n match=\"All antenna names with data on UVData are missing \"\n \"on UVCal. Since ant_check is False, calibration will \"\n \"proceed but all data will be flagged.\",\n ):\n uvdcal = uvutils.uvcalibrate(\n uvd, uvc, prop_flags=True, ant_check=False, inplace=False\n )\n\n assert np.all(uvdcal.get_flags(1, 13, \"xx\")) # assert completely flagged\n assert np.all(uvdcal.get_flags(0, 12, \"xx\")) # assert completely flagged\n np.testing.assert_array_almost_equal(\n uvd.get_data(1, 13, \"xx\"), uvdcal.get_data(1, 13, \"xx\")\n )\n np.testing.assert_array_almost_equal(\n uvd.get_data(0, 12, \"xx\"), uvdcal.get_data(0, 12, \"xx\")\n )\n\n\ndef test_uvcalibrate_extra_cal_antennas(uvcalibrate_data):\n uvd, uvc = uvcalibrate_data\n\n # remove some antennas from the data\n uvd.select(antenna_nums=[0, 1, 12, 13])\n\n uvdcal = uvutils.uvcalibrate(uvd, uvc, inplace=False)\n\n key = (1, 13, \"xx\")\n ant1 = (1, \"Jxx\")\n ant2 = (13, \"Jxx\")\n\n np.testing.assert_array_almost_equal(\n uvdcal.get_data(key),\n uvd.get_data(key) / (uvc.get_gains(ant1) * uvc.get_gains(ant2).conj()).T,\n )\n\n\[email protected](\"future_shapes\", [True, False])\ndef test_uvcalibrate_antenna_names_mismatch(uvcalibrate_init_data, future_shapes):\n uvd, uvc = uvcalibrate_init_data\n\n if future_shapes:\n uvd.use_future_array_shapes()\n\n with pytest.raises(\n ValueError,\n match=re.escape(\n \"All antenna names with data on UVData are missing \"\n \"on UVCal. To continue with calibration \"\n \"(and flag all the data), set ant_check=False.\"\n ),\n ):\n uvutils.uvcalibrate(uvd, uvc, inplace=False)\n\n # now test that they're all flagged if ant_check is False\n with uvtest.check_warnings(\n UserWarning,\n match=\"All antenna names with data on UVData are missing \"\n \"on UVCal. Since ant_check is False, calibration will \"\n \"proceed but all data will be flagged.\",\n ):\n uvdcal = uvutils.uvcalibrate(uvd, uvc, ant_check=False, inplace=False)\n\n assert np.all(uvdcal.flag_array) # assert completely flagged\n\n\ndef test_uvcalibrate_time_mismatch(uvcalibrate_data):\n uvd, uvc = uvcalibrate_data\n\n # change times to get warnings\n uvc.time_array = uvc.time_array + 1\n\n expected_err = {\n f\"Time {this_time} exists on UVData but not on UVCal.\"\n for this_time in np.unique(uvd.time_array)\n }\n\n with pytest.raises(ValueError) as errinfo:\n uvutils.uvcalibrate(uvd, uvc, inplace=False)\n assert str(errinfo.value) in expected_err\n\n\ndef test_uvcalibrate_time_wrong_size(uvcalibrate_data):\n uvd, uvc = uvcalibrate_data\n\n # downselect by one time to get error\n uvc.select(times=uvc.time_array[1:])\n with pytest.raises(\n ValueError,\n match=\"The uvcal object has more than one time but fewer than the \"\n \"number of unique times on the uvdata object.\",\n ):\n uvutils.uvcalibrate(uvd, uvc, inplace=False)\n\n\[email protected](\"len_time_range\", [0, 1])\ndef test_uvcalibrate_time_types(uvcalibrate_data, len_time_range):\n uvd, uvc = uvcalibrate_data\n\n # only one time\n uvc.select(times=uvc.time_array[0])\n if len_time_range == 0:\n uvc.time_range = None\n else:\n # check cal runs fine with a good time range\n uvdcal = uvutils.uvcalibrate(uvd, uvc, inplace=False)\n\n key = (1, 13, \"xx\")\n ant1 = (1, \"Jxx\")\n ant2 = (13, \"Jxx\")\n\n np.testing.assert_array_almost_equal(\n uvdcal.get_data(key),\n uvd.get_data(key) / (uvc.get_gains(ant1) * uvc.get_gains(ant2).conj()).T,\n )\n\n # then change time_range to get warnings\n uvc.time_range = np.array(uvc.time_range) + 1\n\n with pytest.raises(\n ValueError,\n match=(\n \"Times do not match between UVData and UVCal. \"\n \"Set time_check=False to apply calibration anyway.\"\n ),\n ):\n uvutils.uvcalibrate(uvd, uvc, inplace=False)\n\n # set time_check=False to test the user warning\n with uvtest.check_warnings(\n UserWarning,\n match=(\n \"Times do not match between UVData and UVCal \"\n \"but time_check is False, so calibration \"\n \"will be applied anyway.\"\n ),\n ):\n uvdcal = uvutils.uvcalibrate(uvd, uvc, inplace=False, time_check=False)\n\n key = (1, 13, \"xx\")\n ant1 = (1, \"Jxx\")\n ant2 = (13, \"Jxx\")\n\n np.testing.assert_array_almost_equal(\n uvdcal.get_data(key),\n uvd.get_data(key) / (uvc.get_gains(ant1) * uvc.get_gains(ant2).conj()).T,\n )\n\n\[email protected](\"ignore:Combined frequencies are not contiguous.\")\ndef test_uvcalibrate_extra_cal_times(uvcalibrate_data):\n uvd, uvc = uvcalibrate_data\n\n uvc2 = copy.deepcopy(uvc)\n uvc2.time_array = uvc.time_array + 1\n uvc_use = uvc + uvc2\n\n uvdcal = uvutils.uvcalibrate(uvd, uvc_use, inplace=False)\n\n key = (1, 13, \"xx\")\n ant1 = (1, \"Jxx\")\n ant2 = (13, \"Jxx\")\n\n np.testing.assert_array_almost_equal(\n uvdcal.get_data(key),\n uvd.get_data(key) / (uvc.get_gains(ant1) * uvc.get_gains(ant2).conj()).T,\n )\n\n\ndef test_uvcalibrate_freq_mismatch(uvcalibrate_data):\n uvd, uvc = uvcalibrate_data\n\n # change some frequencies to get warnings\n maxf = np.max(uvc.freq_array)\n uvc.freq_array[0, uvc.Nfreqs // 2 :] = uvc.freq_array[0, uvc.Nfreqs // 2 :] + maxf\n expected_err = {\n f\"Frequency {this_freq} exists on UVData but not on UVCal.\"\n for this_freq in uvd.freq_array[0, uvd.Nfreqs // 2 :]\n }\n with pytest.raises(ValueError) as errinfo:\n uvutils.uvcalibrate(uvd, uvc, inplace=False)\n assert str(errinfo.value) in expected_err\n\n\[email protected](\"ignore:Combined frequencies are not evenly spaced.\")\ndef test_uvcalibrate_extra_cal_freqs(uvcalibrate_data):\n uvd, uvc = uvcalibrate_data\n\n uvc2 = copy.deepcopy(uvc)\n uvc2.freq_array = uvc.freq_array + np.max(uvc.freq_array)\n uvc_use = uvc + uvc2\n\n uvdcal = uvutils.uvcalibrate(uvd, uvc_use, inplace=False)\n\n key = (1, 13, \"xx\")\n ant1 = (1, \"Jxx\")\n ant2 = (13, \"Jxx\")\n\n np.testing.assert_array_almost_equal(\n uvdcal.get_data(key),\n uvd.get_data(key) / (uvc.get_gains(ant1) * uvc.get_gains(ant2).conj()).T,\n )\n\n\ndef test_uvcalibrate_feedpol_mismatch(uvcalibrate_data):\n uvd, uvc = uvcalibrate_data\n\n # downselect the feed polarization to get warnings\n uvc.select(jones=uvutils.jstr2num(\"Jnn\", x_orientation=uvc.x_orientation))\n with pytest.raises(\n ValueError, match=(\"Feed polarization e exists on UVData but not on UVCal.\"),\n ):\n uvutils.uvcalibrate(uvd, uvc, inplace=False)\n\n\[email protected](\"ignore:The uvw_array does not match the expected values\")\[email protected](\"future_shapes\", [True, False])\ndef test_apply_uvflag(future_shapes):\n # load data and insert some flags\n uvd = UVData()\n uvd.read(os.path.join(DATA_PATH, \"zen.2457698.40355.xx.HH.uvcAA.uvh5\"))\n uvd.flag_array[uvd.antpair2ind(9, 20)] = True\n\n if future_shapes:\n uvd.use_future_array_shapes()\n\n # load a UVFlag into flag type\n uvf = UVFlag(uvd)\n uvf.to_flag()\n\n # insert flags for 2 out of 3 times\n uvf.flag_array[uvf.antpair2ind(9, 10)[:2]] = True\n\n # apply flags and check for basic flag propagation\n uvdf = uvutils.apply_uvflag(uvd, uvf, inplace=False)\n assert np.all(uvdf.flag_array[uvdf.antpair2ind(9, 10)][:2])\n\n # test inplace\n uvdf = copy.deepcopy(uvd)\n uvutils.apply_uvflag(uvdf, uvf, inplace=True)\n assert np.all(uvdf.flag_array[uvdf.antpair2ind(9, 10)][:2])\n\n # test flag missing\n uvf2 = uvf.select(bls=uvf.get_antpairs()[:-1], inplace=False)\n uvdf = uvutils.apply_uvflag(uvd, uvf2, inplace=False, flag_missing=True)\n assert np.all(uvdf.flag_array[uvdf.antpair2ind(uvf.get_antpairs()[-1])])\n uvdf = uvutils.apply_uvflag(uvd, uvf2, inplace=False, flag_missing=False)\n assert not np.any(uvdf.flag_array[uvdf.antpair2ind(uvf.get_antpairs()[-1])])\n\n # test force polarization\n uvdf = copy.deepcopy(uvd)\n uvdf2 = copy.deepcopy(uvd)\n uvdf2.polarization_array[0] = -6\n uvdf += uvdf2\n uvdf = uvutils.apply_uvflag(uvdf, uvf, inplace=False, force_pol=True)\n assert np.all(uvdf.flag_array[uvdf.antpair2ind(9, 10)][:2])\n with pytest.raises(ValueError) as cm:\n uvutils.apply_uvflag(uvdf, uvf, inplace=False, force_pol=False)\n assert \"Input uvf and uvd polarizations do not match\" in str(cm.value)\n\n # test unflag first\n uvdf = uvutils.apply_uvflag(uvd, uvf, inplace=False, unflag_first=True)\n assert np.all(uvdf.flag_array[uvdf.antpair2ind(9, 10)][:2])\n assert not np.any(uvdf.flag_array[uvdf.antpair2ind(9, 20)])\n\n # convert uvf to waterfall and test\n uvfw = copy.deepcopy(uvf)\n uvfw.to_waterfall(method=\"or\")\n uvdf = uvutils.apply_uvflag(uvd, uvfw, inplace=False)\n assert np.all(uvdf.flag_array[uvdf.antpair2ind(9, 10)][:2])\n assert np.all(uvdf.flag_array[uvdf.antpair2ind(9, 20)][:2])\n assert np.all(uvdf.flag_array[uvdf.antpair2ind(20, 22)][:2])\n\n # test mode exception\n uvfm = copy.deepcopy(uvf)\n uvfm.mode = \"metric\"\n with pytest.raises(ValueError) as cm:\n uvutils.apply_uvflag(uvd, uvfm)\n assert \"UVFlag must be flag mode\" in str(cm.value)\n\n # test polarization exception\n uvd2 = copy.deepcopy(uvd)\n uvd2.polarization_array[0] = -6\n uvf2 = UVFlag(uvd)\n uvf2.to_flag()\n uvd2.polarization_array[0] = -8\n with pytest.raises(ValueError) as cm:\n uvutils.apply_uvflag(uvd2, uvf2, force_pol=False)\n assert \"Input uvf and uvd polarizations do not match\" in str(cm.value)\n\n # test time and frequency mismatch exceptions\n uvf2 = uvf.select(frequencies=uvf.freq_array[:, :2], inplace=False)\n with pytest.raises(ValueError) as cm:\n uvutils.apply_uvflag(uvd, uvf2)\n assert \"UVFlag and UVData have mismatched frequency arrays\" in str(cm.value)\n\n uvf2 = copy.deepcopy(uvf)\n uvf2.freq_array += 1.0\n with pytest.raises(ValueError) as cm:\n uvutils.apply_uvflag(uvd, uvf2)\n assert \"UVFlag and UVData have mismatched frequency arrays\" in str(cm.value)\n\n uvf2 = uvf.select(times=np.unique(uvf.time_array)[:2], inplace=False)\n with pytest.raises(ValueError) as cm:\n uvutils.apply_uvflag(uvd, uvf2)\n assert \"UVFlag and UVData have mismatched time arrays\" in str(cm.value)\n\n uvf2 = copy.deepcopy(uvf)\n uvf2.time_array += 1.0\n with pytest.raises(ValueError) as cm:\n uvutils.apply_uvflag(uvd, uvf2)\n assert \"UVFlag and UVData have mismatched time arrays\" in str(cm.value)\n\n # assert implicit broadcasting works\n uvf2 = uvf.select(frequencies=uvf.freq_array[:, :1], inplace=False)\n uvd2 = uvutils.apply_uvflag(uvd, uvf2, inplace=False)\n assert np.all(uvd2.get_flags(9, 10)[:2])\n uvf2 = uvf.select(times=np.unique(uvf.time_array)[:1], inplace=False)\n uvd2 = uvutils.apply_uvflag(uvd, uvf2, inplace=False)\n assert np.all(uvd2.get_flags(9, 10))\n\n\ndef test_upos_tol_reds():\n # Checks that the u-positive convention in get_antenna_redundancies\n # is enforced to the specificed tolerance.\n\n # Make a layout with two NS baselines, one with u ~ -2*eps, and another with u == 0\n # This would previously cause one to be flipped, when they should be redundant.\n\n eps = 1e-5\n tol = 3 * eps\n\n ant_pos = np.array(\n [[-eps, 1.0, 0.0], [1.0, 1.0, 0.0], [eps, 0.0, 0.0], [1.0, 0.0, 0.0]]\n )\n\n ant_nums = np.arange(4)\n\n red_grps, _, _ = uvutils.get_antenna_redundancies(ant_nums, ant_pos, tol=tol)\n\n assert len(red_grps) == 4\n\n\nclass FakeClass:\n def __init__(self):\n pass\n\n\ndef test_parse_ants_error():\n test_obj = FakeClass()\n with pytest.raises(\n ValueError,\n match=(\n \"UVBased objects must have all the following attributes in order \"\n \"to call 'parse_ants': \"\n ),\n ):\n uvutils.parse_ants(test_obj, ant_str=\"\")\n\n\[email protected](\n \"filename1,filename2,answer\",\n [\n ([\"foo.uvh5\"], [\"bar.uvh5\"], [\"foo.uvh5\", \"bar.uvh5\"]),\n ([\"foo.uvh5\", \"bar.uvh5\"], [\"foo.uvh5\"], [\"foo.uvh5\", \"bar.uvh5\"]),\n ([\"foo.uvh5\"], None, [\"foo.uvh5\"]),\n (None, [\"bar.uvh5\"], [\"bar.uvh5\"]),\n (None, None, None),\n ],\n)\ndef test_combine_filenames(filename1, filename2, answer):\n combined_filenames = uvutils._combine_filenames(filename1, filename2)\n if answer is None:\n assert combined_filenames is answer\n else:\n # use sets to test equality so that order doesn't matter\n assert set(combined_filenames) == set(answer)\n\n return\n\n\[email protected](\"ignore:The uvw_array does not match the expected values\")\ndef test_read_slicing():\n \"\"\"Test HDF5 slicing helper functions\"\"\"\n # check trivial slice representations\n slices, _ = uvutils._convert_to_slices([])\n assert slices == [slice(0, 0, 0)]\n slices, _ = uvutils._convert_to_slices(10)\n assert slices == [slice(10, 11, 1)]\n\n # dataset shape checking\n # check various kinds of indexing give the right answer\n indices = [slice(0, 10), 0, [0, 1, 2], [0]]\n dset = np.empty((100, 1, 1024, 2), dtype=np.float64)\n shape, _ = uvutils._get_dset_shape(dset, indices)\n assert tuple(shape) == (10, 1, 3, 1)\n\n # dataset indexing\n # check various kinds of indexing give the right answer\n slices = [uvutils._convert_to_slices(ind)[0] for ind in indices]\n slices[1] = 0\n data = uvutils._index_dset(dset, slices)\n assert data.shape == tuple(shape)\n"
] |
[
[
"numpy.dot",
"numpy.sqrt",
"numpy.linspace",
"numpy.squeeze",
"numpy.vstack",
"numpy.all",
"numpy.max",
"numpy.where",
"numpy.ones_like",
"numpy.allclose",
"numpy.unique",
"numpy.arange",
"numpy.stack",
"numpy.sin",
"numpy.load",
"numpy.zeros",
"numpy.isclose",
"numpy.append",
"numpy.equal",
"numpy.array",
"numpy.meshgrid",
"numpy.abs",
"numpy.random.seed",
"numpy.array_equal",
"numpy.linalg.norm",
"numpy.random.shuffle",
"numpy.ones",
"numpy.cos",
"numpy.random.normal",
"numpy.random.uniform",
"numpy.empty"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Smirenost/sqlflow
|
[
"244366196e71834ea2a3a67b90406f7e99e4bcf0",
"244366196e71834ea2a3a67b90406f7e99e4bcf0"
] |
[
"python/runtime/pai/tensorflow/explain.py",
"python/runtime/explainer.py"
] |
[
"# Copyright 2020 The SQLFlow Authors. All rights reserved.\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License\n\nimport os\nimport sys\n\nimport matplotlib\nimport pandas as pd\nimport tensorflow as tf\nfrom runtime import oss\nfrom runtime.import_model import import_model\nfrom runtime.tensorflow import is_tf_estimator\nfrom runtime.tensorflow.explain import explain_boosted_trees, explain_dnns\nfrom runtime.tensorflow.input_fn import input_fn\nfrom runtime.tensorflow.keras_with_feature_column_input import \\\n init_model_with_feature_column\n\nif os.environ.get('DISPLAY', '') == '':\n print('no display found. Using non-interactive Agg backend')\n matplotlib.use('Agg')\n\n\ndef explain(datasource, select, data_table, result_table, label_column,\n oss_model_path):\n try:\n tf.enable_eager_execution()\n except Exception as e:\n sys.stderr.write(\"warning: failed to enable_eager_execution: %s\" % e)\n pass\n\n (estimator, feature_column_names, feature_column_names_map, feature_metas,\n label_meta, model_params,\n feature_columns_code) = oss.load_metas(oss_model_path,\n \"tensorflow_model_desc\")\n\n feature_columns = eval(feature_columns_code)\n # NOTE(typhoonzero): No need to eval model_params[\"optimizer\"] and\n # model_params[\"loss\"] because predicting do not need these parameters.\n\n is_estimator = is_tf_estimator(import_model(estimator))\n\n # Keras single node is using h5 format to save the model, no need to deal\n # with export model format. Keras distributed mode will use estimator, so\n # this is also needed.\n if is_estimator:\n oss.load_file(oss_model_path, \"exported_path\")\n # NOTE(typhoonzero): directory \"model_save\" is hardcoded in\n # codegen/tensorflow/codegen.go\n oss.load_dir(\"%s/model_save\" % oss_model_path)\n else:\n oss.load_file(oss_model_path, \"model_save\")\n\n # (TODO: lhw) use oss to store result image\n _explain(datasource=datasource,\n estimator_string=estimator,\n select=select,\n feature_columns=feature_columns,\n feature_column_names=feature_column_names,\n feature_metas=feature_metas,\n label_meta=label_meta,\n model_params=model_params,\n save=\"model_save\",\n result_table=result_table,\n pai_table=data_table,\n oss_dest=None,\n oss_ak=None,\n oss_sk=None,\n oss_endpoint=None,\n oss_bucket_name=None)\n\n\ndef _explain(datasource,\n estimator_string,\n select,\n feature_columns,\n feature_column_names,\n feature_metas={},\n label_meta={},\n model_params={},\n save=\"\",\n pai_table=\"\",\n plot_type='bar',\n result_table=\"\",\n oss_dest=None,\n oss_ak=None,\n oss_sk=None,\n oss_endpoint=None,\n oss_bucket_name=None):\n estimator_cls = import_model(estimator_string)\n FLAGS = tf.app.flags.FLAGS\n model_params[\"model_dir\"] = FLAGS.checkpointDir\n model_params.update(feature_columns)\n\n def _input_fn():\n dataset = input_fn(\"\",\n datasource,\n feature_column_names,\n feature_metas,\n label_meta,\n is_pai=True,\n pai_table=pai_table)\n return dataset.batch(1).cache()\n\n estimator = init_model_with_feature_column(estimator_cls, model_params)\n driver = \"pai_maxcompute\"\n conn = None\n if estimator_cls in (tf.estimator.BoostedTreesClassifier,\n tf.estimator.BoostedTreesRegressor):\n explain_boosted_trees(datasource, estimator, _input_fn, plot_type,\n result_table, feature_column_names, driver, conn,\n \"\", \"\", \"\", \"\", oss_dest, oss_ak, oss_sk,\n oss_endpoint, oss_bucket_name)\n else:\n shap_dataset = pd.DataFrame(columns=feature_column_names)\n for i, (features, label) in enumerate(_input_fn()):\n shap_dataset.loc[i] = [\n item.numpy()[0][0] for item in features.values()\n ]\n explain_dnns(datasource, estimator, shap_dataset, plot_type,\n result_table, feature_column_names, driver, conn, \"\", \"\",\n \"\", \"\", oss_dest, oss_ak, oss_sk, oss_endpoint,\n oss_bucket_name)\n",
"# Copyright 2020 The SQLFlow Authors. All rights reserved.\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport sys\n\nimport matplotlib\n# The default backend\nimport matplotlib.pyplot as plt\nfrom runtime.oss import copyfileobj\n\n# TODO(shendiaomo): extract common code from tensorflow/explain.py\n# and xgboost/explain.py\n# TODO(shendiaomo): add a unit test for this file later\n\n\ndef plot_and_save(plotfunc,\n oss_dest=None,\n oss_ak=None,\n oss_sk=None,\n oss_endpoint=None,\n oss_bucket_name=None,\n filename='summary'):\n '''\n plot_and_save plots and saves matplotlib figures using different backends\n Args:\n plotfunc: A callable that plot the figures\n oss_dest: The oss path to save the figures\n oss_ak: The access key of the oss service\n oss_sk: The security key of the oss service\n oss_endpoint: The endpoint of the oss service\n oss_bucket_name: The bucket name of the oss service\n filename: The prefix of the figure files to be saved\n Return:\n None\n '''\n\n plotfunc()\n plt.savefig(filename, bbox_inches='tight')\n\n if oss_dest:\n copyfileobj(filename + '.png', oss_dest, oss_ak, oss_sk, oss_endpoint,\n oss_bucket_name)\n else:\n # NOTE(weiguoz), I failed test on the PAI platform here.\n # If we plan to support plotille_text_backend on PAI, please test it.\n # The plotille text backend\n matplotlib.use('module://plotille_text_backend')\n import matplotlib.pyplot as plt_text_backend\n sys.stdout.isatty = lambda: True\n plotfunc()\n plt_text_backend.savefig(filename, bbox_inches='tight')\n"
] |
[
[
"matplotlib.use",
"tensorflow.enable_eager_execution",
"pandas.DataFrame"
],
[
"matplotlib.use",
"matplotlib.pyplot.savefig"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": [
"1.13",
"1.7",
"1.10",
"1.12"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
nibydlo/modAL
|
[
"c0fe0200001c8c34e3fabb099fb70cf1e4bfb680",
"c0fe0200001c8c34e3fabb099fb70cf1e4bfb680"
] |
[
"experiments/models/topics_autoencoders.py",
"experiments/topics_uncertainty.py"
] |
[
"import time\n\nimport torch\nimport torch.nn.functional as F\nimport torch.nn as nn\nfrom torch.utils.data import TensorDataset, DataLoader\n\n\nIMG_LEN = 1024\nTXT_LEN = 300\nN_CLASSES = 50\nBATCH_SIZE = 2048\ncriterion = nn.MSELoss()\n\n\ndef prepare_data_for_torch(X_train, X_val):\n x_img_train, x_txt_train = X_train[0], X_train[1]\n x_img_val, x_txt_val = X_val[0], X_val[1]\n\n x_img_train_t = torch.tensor(x_img_train).float()\n x_img_val_t = torch.tensor(x_img_val).float()\n\n x_txt_train_t = torch.tensor(x_txt_train).float()\n x_txt_val_t = torch.tensor(x_txt_val).float()\n\n train_ds = TensorDataset(x_img_train_t, x_txt_train_t)\n val_ds = TensorDataset(x_img_val_t, x_txt_val_t)\n\n train_loader = DataLoader(train_ds, batch_size=BATCH_SIZE)\n val_loader = DataLoader(val_ds, batch_size=BATCH_SIZE)\n return train_loader, val_loader\n\n\ndef fit_autoencoder(autoencoder, optimizer, epochs, X_train, X_val, verbose=1):\n train_loader, val_loader = prepare_data_for_torch(X_train, X_val)\n\n train_img_loss_history = []\n train_txt_loss_history = []\n\n val_img_loss_history = []\n val_txt_loss_history = []\n\n start_time = time.time()\n\n for epoch in range(epochs):\n autoencoder.train()\n\n loss_img_sum = 0.0\n loss_txt_sum = 0.0\n loss_sum = 0.0\n loss_count = 0\n\n for x_img_cur, x_txt_cur in train_loader:\n autoencoder.zero_grad()\n out_img, out_txt = autoencoder(inp_img=x_img_cur, inp_txt=x_txt_cur)\n loss_img = criterion(out_img, x_img_cur)\n loss_txt = criterion(out_txt, x_txt_cur)\n loss = loss_img + loss_txt\n\n loss_img_sum += loss_img\n loss_txt_sum += loss_txt\n loss_sum += loss\n loss_count += 1\n\n loss.backward()\n optimizer.step()\n\n if verbose != 0:\n print(\n 'epoch:', epoch,\n 'train img loss:', \"%.3f\" % (loss_img_sum / loss_count).item(),\n 'txt_loss:', \"%.3f\" % (loss_txt_sum / loss_count).item(),\n 'img + txt loss', \"%.3f\" % (loss_sum / loss_count).item()\n )\n train_img_loss_history.append((loss_img_sum / loss_count).item())\n train_txt_loss_history.append((loss_txt_sum / loss_count).item())\n\n autoencoder.eval()\n\n val_loss_img_sum = 0.0\n val_loss_txt_sum = 0.0\n val_loss_sum = 0.0\n val_loss_count = 0\n\n with torch.no_grad():\n for x_img_cur, x_txt_cur in val_loader:\n out_img, out_txt = autoencoder(x_img_cur, x_txt_cur)\n loss_img = criterion(out_img, x_img_cur)\n loss_txt = criterion(out_txt, x_txt_cur)\n loss = loss_img + loss_txt\n\n val_loss_img_sum += loss_img\n val_loss_txt_sum += loss_txt\n val_loss_sum += loss\n val_loss_count += 1\n\n if verbose != 0:\n print(\n 'val img loss:', \"%.3f\" % (val_loss_img_sum / val_loss_count).item(),\n 'val txt_loss:', \"%.3f\" % (val_loss_txt_sum / val_loss_count).item(),\n 'img + txt loss', \"%.3f\" % (val_loss_sum / val_loss_count).item()\n )\n val_img_loss_history.append((val_loss_img_sum / val_loss_count).item())\n val_txt_loss_history.append((val_loss_txt_sum / val_loss_count).item())\n\n operation_time = time.time() - start_time\n\n if verbose != 0:\n print('autoencoder fitting finished for', operation_time, 'seconds')\n\n return train_img_loss_history, train_txt_loss_history, val_img_loss_history, val_txt_loss_history, operation_time\n\n\nclass Encoder(nn.Module):\n def __init__(self, d, drop=0.5):\n super().__init__()\n self.fc_img_1 = nn.Linear(IMG_LEN, d * 4)\n self.bn_img_1 = nn.BatchNorm1d(num_features=d * 4)\n self.fc_img_2 = nn.Linear(d * 4, d * 2)\n self.bn_img_2 = nn.BatchNorm1d(num_features=d * 2)\n\n self.fc_txt_1 = nn.Linear(TXT_LEN, d * 2)\n self.bn_txt_1 = nn.BatchNorm1d(num_features=d * 2)\n self.fc_txt_2 = nn.Linear(d * 2, d * 2)\n self.bn_txt_2 = nn.BatchNorm1d(num_features=d * 2)\n\n self.fc = nn.Linear(d * 4, d)\n self.bn = nn.BatchNorm1d(num_features=d)\n\n self.dropout = nn.modules.Dropout(p=drop)\n\n def forward(self, inp_img, inp_txt):\n x_img = self.dropout(self.bn_img_1(F.relu(self.fc_img_1(inp_img))))\n x_img = self.dropout(self.bn_img_2(F.relu(self.fc_img_2(x_img))))\n\n x_txt = self.dropout(self.bn_txt_1(F.relu(self.fc_txt_1(inp_txt))))\n x_txt = self.dropout(self.bn_txt_2(F.relu(self.fc_txt_2(x_txt))))\n\n x = torch.cat((x_img, x_txt), 1)\n x = self.dropout(self.bn(F.relu(self.fc(x))))\n return x\n\n\nclass Decoder(nn.Module):\n def __init__(self, d, drop=0.5):\n super().__init__()\n\n self.fc_img_1 = nn.Linear(d, 4 * d)\n self.fc_img_2 = nn.Linear(4 * d, IMG_LEN)\n\n self.fc_txt_1 = nn.Linear(d, 2 * d)\n self.fc_txt_2 = nn.Linear(2 * d, TXT_LEN)\n\n self.dropout = nn.modules.Dropout(p=drop)\n\n def forward(self, x):\n x_img = self.dropout(F.relu(self.fc_img_1(x)))\n x_img = self.fc_img_2(x_img)\n\n x_txt = self.dropout(F.relu(self.fc_txt_1(x)))\n x_txt = self.fc_txt_2(x_txt)\n\n return x_img, x_txt\n\n\nclass Autoencoder(nn.Module):\n def __init__(self, d):\n super().__init__()\n self.encoder = Encoder(d)\n self.decoder = Decoder(d)\n\n def forward(self, inp_img, inp_txt):\n x = self.encoder(inp_img, inp_txt)\n x_img, x_txt = self.decoder(x)\n return x_img, x_txt\n\n\nclass EncoderTrident(nn.Module):\n def __init__(self, d, drop=0.5):\n super().__init__()\n self.fc_img_1 = nn.Linear(IMG_LEN, d * 4)\n self.bn_img_1 = nn.BatchNorm1d(num_features=d * 4)\n self.fc_img_2 = nn.Linear(d * 4, d * 2)\n self.bn_img_2 = nn.BatchNorm1d(num_features=d * 2)\n\n self.fc_txt_1 = nn.Linear(TXT_LEN, d * 2)\n self.bn_txt_1 = nn.BatchNorm1d(num_features=d * 2)\n self.fc_txt_2 = nn.Linear(d * 2, d * 2)\n self.bn_txt_2 = nn.BatchNorm1d(num_features=d * 2)\n\n self.dropout = nn.modules.Dropout(p=drop)\n\n def forward(self, inp_img, inp_txt):\n x_img = self.dropout(self.bn_img_1(F.relu(self.fc_img_1(inp_img))))\n x_img = self.dropout(self.bn_img_2(F.relu(self.fc_img_2(x_img))))\n\n x_txt = self.dropout(self.bn_txt_1(F.relu(self.fc_txt_1(inp_txt))))\n x_txt = self.dropout(self.bn_txt_2(F.relu(self.fc_txt_2(x_txt))))\n\n return x_img, x_txt\n\n\nclass DecoderTrident(nn.Module):\n def __init__(self, d, drop=0.5):\n super().__init__()\n self.fc = nn.Linear(4 * d, 2 * d)\n\n self.fc_img_1 = nn.Linear(2 * d, 4 * d)\n self.fc_img_2 = nn.Linear(4 * d, IMG_LEN)\n\n self.fc_txt_1 = nn.Linear(2 * d, 2 * d)\n self.fc_txt_2 = nn.Linear(2 * d, TXT_LEN)\n\n self.dropout = nn.modules.Dropout(p=drop)\n\n def forward(self, x_img, x_txt):\n x = self.dropout(F.relu(self.fc(torch.cat((x_img, x_txt), 1))))\n\n x_img = self.dropout(F.relu(self.fc_img_1(x)))\n x_img = self.fc_img_2(x_img)\n\n x_txt = self.dropout(F.relu(self.fc_txt_1(x)))\n x_txt = self.fc_txt_2(x_txt)\n\n return x_img, x_txt\n\n\nclass AutoencoderTrident(nn.Module):\n def __init__(self, d):\n super().__init__()\n self.encoder = EncoderTrident(d)\n self.decoder = DecoderTrident(d)\n\n def forward(self, inp_img, inp_txt):\n x_img, x_txt = self.encoder(inp_img, inp_txt)\n x_img, x_txt = self.decoder(x_img, x_txt)\n return x_img, x_txt\n",
"from functools import partial\n\nimport numpy as np\n\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.model_selection import train_test_split\n\nfrom keras.callbacks import EarlyStopping\n\nfrom experiments.models.topics_models import get_model_residual_concat_radam\nfrom experiments.datasets.topics_ds import get_unpacked_data\nfrom experiments.al_experiment import Experiment\n\nfrom modAL.uncertainty import uncertainty_sampling, margin_sampling, entropy_sampling, entropy_top_sampling\nfrom modAL import KerasActiveLearner\n\n\nx_img, x_txt, y = get_unpacked_data()\n\nx_img_train, x_img_test, x_txt_train, x_txt_test, y_train, y_test = train_test_split(\n x_img,\n x_txt,\n y,\n test_size=0.2,\n random_state=42,\n stratify=y\n)\n\nx_img_train, x_img_val, x_txt_train, x_txt_val, y_train, y_val = train_test_split(\n x_img_train,\n x_txt_train,\n y_train,\n test_size=0.2,\n random_state=42,\n stratify=y_train\n)\n\nimg_sscaler = StandardScaler()\nimg_sscaler.fit(x_img_train)\n\nx_img_train = img_sscaler.transform(x_img_train)\nx_img_val = img_sscaler.transform(x_img_val)\nx_img_test = img_sscaler.transform(x_img_test)\n\ntxt_sscaler = StandardScaler()\ntxt_sscaler.fit(x_txt_train)\n\nx_txt_train = txt_sscaler.transform(x_txt_train)\nx_txt_val = txt_sscaler.transform(x_txt_val)\nx_txt_test = txt_sscaler.transform(x_txt_test)\n\nn_labeled_examples = x_img_train.shape[0]\n\nPOOL_SIZE = 20000\nINIT_SIZE = 2000\nBATCH_SIZE = 20\nN_QUERIES = 30\nINIT_EPOCHS = 30\n\n# preset_least_confident = partial(uncertainty_sampling, n_instances=BATCH_SIZE, proba=False)\n# preset_margin = partial(margin_sampling, n_instances=BATCH_SIZE, proba=False)\n# preset_entropy = partial(entropy_sampling, n_instances=BATCH_SIZE, proba=False)\npreset_entropy_top_3 = partial(entropy_top_sampling, n_instances=BATCH_SIZE, n_top=3, proba=False)\npreset_entropy_top_4 = partial(entropy_top_sampling, n_instances=BATCH_SIZE, n_top=4, proba=False)\npreset_entropy_top_5 = partial(entropy_top_sampling, n_instances=BATCH_SIZE, n_top=5, proba=False)\n\n\nquery_dict = {\n # 'least_confident' : preset_least_confident,\n # 'margin' : preset_margin,\n # 'entropy' : preset_entropy,\n # 'entropy_top_3': preset_entropy_top_3,\n # 'entropy_top_4': preset_entropy_top_4,\n 'entropy_top_5': preset_entropy_top_5\n}\n\nes = EarlyStopping(monitor='val_accuracy', mode='max', min_delta=0.001, patience=3)\n\nfor query_name in query_dict:\n for i in range(4, 6):\n training_indices = np.random.randint(low=0, high=n_labeled_examples, size=INIT_SIZE)\n model = get_model_residual_concat_radam()\n\n x_init_train = [x_img_train[training_indices], x_txt_train[training_indices]]\n y_init_train = y_train[training_indices]\n\n x_pool = [np.delete(x_img_train, training_indices, axis=0), np.delete(x_txt_train, training_indices, axis=0)]\n y_pool = np.delete(y_train, training_indices, axis=0)\n\n learner = KerasActiveLearner(\n estimator=model,\n X_training=x_init_train,\n y_training=y_init_train,\n query_strategy=query_dict[query_name],\n validation_data=([x_img_val, x_txt_val], y_val),\n epochs=INIT_EPOCHS,\n callbacks=[es]\n )\n\n experiment = Experiment(\n learner=learner,\n X_pool=x_pool,\n y_pool=y_pool,\n X_val=[x_img_val, x_txt_val],\n y_val=y_val,\n n_queries=N_QUERIES,\n random_seed=i,\n pool_size=POOL_SIZE,\n name='topics_' + query_name + '_i2000_b20_' + str(i)\n )\n\n experiment.run()\n experiment.save_state('statistic/topics/keras/' + query_name + '_i2000_b20_' + str(i))"
] |
[
[
"torch.nn.BatchNorm1d",
"torch.cat",
"torch.utils.data.TensorDataset",
"torch.utils.data.DataLoader",
"torch.tensor",
"torch.nn.Linear",
"torch.no_grad",
"torch.nn.modules.Dropout",
"torch.nn.MSELoss"
],
[
"sklearn.preprocessing.StandardScaler",
"numpy.delete",
"sklearn.model_selection.train_test_split",
"numpy.random.randint"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
thughes-IAS/OpenNMT-tf
|
[
"ecdf430ba82c62e520c75b6a30911cbad31d4a16"
] |
[
"opennmt/tokenizers/tokenizer.py"
] |
[
"# -*- coding: utf-8 -*-\n\n\"\"\"Define base tokenizers.\"\"\"\n\nimport sys\nimport abc\nimport yaml\n\nimport tensorflow as tf\n\nfrom opennmt.utils import misc\n\n\nclass Tokenizer(abc.ABC):\n \"\"\"Base class for tokenizers.\"\"\"\n\n @property\n def in_graph(self):\n \"\"\"Returns ``True`` if this tokenizer can be run in graph (i.e. uses TensorFlow ops).\"\"\"\n return False\n\n def export_assets(self, asset_dir, asset_prefix=\"\"): # pylint: disable=unused-argument\n \"\"\"Exports assets for this tokenizer.\n\n Args:\n asset_dir: The directory where assets can be written.\n asset_prefix: The prefix to attach to assets filename.\n\n Returns:\n A dictionary containing additional assets used by the tokenizer.\n \"\"\"\n return {}\n\n def tokenize_stream(self, input_stream=sys.stdin, output_stream=sys.stdout, delimiter=\" \"):\n \"\"\"Tokenizes a stream of sentences.\n\n Args:\n input_stream: The input stream.\n output_stream: The output stream.\n delimiter: The token delimiter to use for text serialization.\n \"\"\"\n for line in input_stream:\n line = line.strip()\n tokens = self.tokenize(line)\n merged_tokens = delimiter.join(tokens)\n misc.print_as_bytes(merged_tokens, stream=output_stream)\n\n def detokenize_stream(self, input_stream=sys.stdin, output_stream=sys.stdout, delimiter=\" \"):\n \"\"\"Detokenizes a stream of sentences.\n\n Args:\n input_stream: The input stream.\n output_stream: The output stream.\n delimiter: The token delimiter used for text serialization.\n \"\"\"\n for line in input_stream:\n tokens = line.strip().split(delimiter)\n string = self.detokenize(tokens)\n misc.print_as_bytes(string, stream=output_stream)\n\n def tokenize(self, text):\n \"\"\"Tokenizes text.\n\n Args:\n text: A string or batch of strings to tokenize as a ``tf.Tensor`` or\n Python values.\n\n Returns:\n - If :obj:`text` is a Python string, a list of Python strings.\n - If :obj:`text` is a list of Python strings, a list of list of Python\n strings.\n - If :obj:`text` is a 0-D ``tf.Tensor``, a 1-D ``tf.Tensor``.\n - If :obj:`text` is a 1-D ``tf.Tensor``, a 2-D ``tf.RaggedTensor``.\n\n Raises:\n ValueError: if the rank of :obj:`text` is greater than 1.\n \"\"\"\n with tf.device(\"cpu:0\"):\n return self._tokenize(text)\n\n def _tokenize(self, text):\n if tf.is_tensor(text):\n rank = len(text.shape)\n if rank == 0:\n return self._tokenize_tensor(text)\n elif rank == 1:\n return self._tokenize_batch_tensor(text)\n else:\n raise ValueError(\"Unsupported tensor rank %d for tokenization\" % rank)\n elif isinstance(text, list):\n return list(map(self.tokenize, text))\n else:\n text = tf.compat.as_text(text)\n return self._tokenize_string(text)\n\n def detokenize(self, tokens, sequence_length=None):\n \"\"\"Detokenizes tokens.\n\n The Tensor version supports batches of tokens.\n\n Args:\n tokens: Tokens or batch of tokens as a ``tf.Tensor``, ``tf.RaggedTensor``,\n or Python values.\n sequence_length: The length of each sequence. Required if :obj:`tokens`\n is a dense 2-D ``tf.Tensor``.\n\n Returns:\n - If :obj:`tokens` is a list of list of Python strings, a list of Python strings.\n - If :obj:`tokens` is a list of Python strings, a Python string.\n - If :obj:`tokens` is a N-D ``tf.Tensor`` (or ``tf.RaggedTensor``), a\n (N-1)-D ``tf.Tensor``.\n\n Raises:\n ValueError: if the rank of :obj:`tokens` is greater than 2.\n ValueError: if :obj:`tokens` is a 2-D dense ``tf.Tensor`` and\n :obj:`sequence_length` is not set.\n \"\"\"\n with tf.device(\"cpu:0\"):\n return self._detokenize(tokens, sequence_length)\n\n def _detokenize(self, tokens, sequence_length):\n if tf.is_tensor(tokens):\n rank = len(tokens.shape)\n if rank == 1:\n return self._detokenize_tensor(tokens)\n elif rank == 2:\n if sequence_length is None:\n raise ValueError(\"sequence_length is required for Tensor detokenization\")\n return self._detokenize_batch_tensor(tokens, sequence_length)\n else:\n raise ValueError(\"Unsupported tensor rank %d for detokenization\" % rank)\n elif isinstance(tokens, tf.RaggedTensor):\n rank = len(tokens.shape)\n if rank == 1:\n return self._detokenize_tensor(tokens.values)\n elif rank == 2:\n return self._detokenize_ragged_tensor(tokens)\n else:\n raise ValueError(\"Unsupported RaggedTensor rank %d for detokenization\" % rank)\n elif isinstance(tokens, list) and tokens and isinstance(tokens[0], list):\n return list(map(self.detokenize, tokens))\n else:\n tokens = [tf.compat.as_text(token) for token in tokens]\n return self._detokenize_string(tokens)\n\n @tf.autograph.experimental.do_not_convert\n def _tokenize_tensor(self, text):\n \"\"\"Tokenizes a tensor.\n\n When not overriden, this default implementation calls the string-based\n tokenization.\n\n Args:\n text: A 1-D string ``tf.Tensor``.\n\n Returns:\n A 1-D string ``tf.Tensor``.\n \"\"\"\n def _python_wrapper(string_t):\n string = tf.compat.as_text(string_t.numpy())\n tokens = self._tokenize_string(string)\n return tf.constant(tokens, dtype=tf.string)\n tokens = tf.py_function(_python_wrapper, [text], tf.string)\n tokens.set_shape([None])\n return tokens\n\n def _tokenize_batch_tensor(self, text):\n \"\"\"Tokenizes a batch of texts.\n\n When not overriden, this default implementation calls _tokenize_tensor on\n each tensor within the batch.\n\n Args:\n text: A 1-D string ``tf.Tensor``.\n\n Returns:\n A 2-D string ``tf.RaggedTensor``.\n \"\"\"\n # map_fn expects each output element to have the same shape, so join tokens with\n # spaces first and then split on spaces with a function returning a RaggedTensor.\n tokens = tf.map_fn(\n lambda x: tf.strings.reduce_join(self._tokenize_tensor(x), axis=0, separator=\" \"), text)\n return tf.strings.split(tokens, sep=\" \")\n\n @tf.autograph.experimental.do_not_convert\n def _detokenize_tensor(self, tokens):\n \"\"\"Detokenizes tokens.\n\n When not overriden, this default implementation calls the string-based\n detokenization.\n\n Args:\n tokens: A 1-D ``tf.Tensor``.\n\n Returns:\n A 0-D string ``tf.Tensor``.\n \"\"\"\n def _python_wrapper(tokens_t):\n tokens = [tf.compat.as_text(s) for s in tokens_t.numpy()]\n string = self._detokenize_string(tokens)\n return tf.constant(string)\n text = tf.py_function(_python_wrapper, [tokens], tf.string)\n text.set_shape([])\n return text\n\n def _detokenize_batch_tensor(self, tokens, sequence_length):\n \"\"\"Detokenizes a batch of tokens.\n\n When not overriden, this default implementation calls _detokenize_tensor on\n each tensor within the batch.\n\n Args:\n tokens: A 2-D ``tf.Tensor``.\n\n Returns:\n A 1-D string ``tf.Tensor``.\n \"\"\"\n return tf.map_fn(\n lambda x: self._detokenize_tensor(x[0][:x[1]]),\n (tokens, sequence_length),\n dtype=tf.string)\n\n def _detokenize_ragged_tensor(self, tokens):\n \"\"\"Detokenizes a batch of tokens as a ``tf.RaggedTensor``\n\n When not overriden, this default implementation calls _detokenize_batch_tensor\n on the dense representation.\n\n Args:\n tokens: A 2-D ``tf.RaggedTensor``.\n\n Returns:\n A 1-D string ``tf.Tensor``.\n \"\"\"\n return self._detokenize_batch_tensor(tokens.to_tensor(), tokens.row_lengths())\n\n @abc.abstractmethod\n def _tokenize_string(self, text):\n \"\"\"Tokenizes a Python unicode string.\n\n This method should be thread-safe.\n\n Args:\n text: A Python unicode string.\n\n Returns:\n A list of Python unicode strings.\n \"\"\"\n raise NotImplementedError()\n\n @abc.abstractmethod\n def _detokenize_string(self, tokens):\n \"\"\"Detokenizes tokens.\n\n Args:\n tokens: A list of Python unicode strings.\n\n Returns:\n A unicode Python string.\n \"\"\"\n raise NotImplementedError()\n\n\n_TOKENIZERS_REGISTRY = misc.ClassRegistry(base_class=Tokenizer)\n\nregister_tokenizer = _TOKENIZERS_REGISTRY.register # pylint: disable=invalid-name\n\ndef make_tokenizer(config=None):\n \"\"\"Creates a tokenizer instance from the configuration.\n\n Args:\n config: Path to a configuration file or the configuration dictionary.\n\n Returns:\n A :class:`opennmt.tokenizers.Tokenizer` instance.\n\n Raises:\n ValueError: if :obj:`config` is invalid.\n \"\"\"\n if config:\n if isinstance(config, str) and tf.io.gfile.exists(config):\n with tf.io.gfile.GFile(config, mode=\"rb\") as config_file:\n config = yaml.load(config_file, Loader=yaml.UnsafeLoader)\n if isinstance(config, dict):\n tokenizer_type = config.get(\"type\")\n if tokenizer_type is None:\n tokenizer_type = \"OpenNMTTokenizer\"\n tokenizer_params = config\n else:\n tokenizer_params = config.get(\"params\", {})\n tokenizer_class = _TOKENIZERS_REGISTRY.get(tokenizer_type)\n if tokenizer_class is None:\n raise ValueError(\"%s is not in list of accepted tokenizers: %s\" % (\n tokenizer_type, \", \".join(sorted(_TOKENIZERS_REGISTRY.class_names))))\n tokenizer = tokenizer_class(**tokenizer_params)\n else:\n raise ValueError(\"Invalid tokenization configuration: %s\" % str(config))\n else:\n # If the tokenization was not configured, we assume that an external tokenization\n # was used and we don't include the tokenizer in the exported graph.\n tokenizer = SpaceTokenizer(in_graph=False)\n return tokenizer\n\n@register_tokenizer\nclass SpaceTokenizer(Tokenizer):\n \"\"\"A tokenizer that splits on spaces.\"\"\"\n\n def __init__(self, in_graph=True):\n \"\"\"Initializes the tokenizer.\n\n Args:\n in_graph: If ``True``, this tokenizer should be integrated in the exported graph.\n \"\"\"\n self._in_graph = in_graph\n\n @property\n def in_graph(self):\n return self._in_graph\n\n def _tokenize_tensor(self, text):\n return self._tokenize_batch_tensor(text)\n\n def _tokenize_batch_tensor(self, text):\n return tf.strings.split(text)\n\n def _detokenize_tensor(self, tokens):\n return self._detokenize_ragged_tensor(tokens)\n\n def _detokenize_batch_tensor(self, tokens, sequence_length):\n ragged = tf.RaggedTensor.from_tensor(tokens, lengths=sequence_length)\n return self._detokenize_ragged_tensor(ragged)\n\n def _detokenize_ragged_tensor(self, tokens):\n return tf.strings.reduce_join(tokens, axis=tokens.shape.rank - 1, separator=\" \")\n\n def _tokenize_string(self, text):\n return text.split()\n\n def _detokenize_string(self, tokens):\n return \" \".join(tokens)\n\n\n@register_tokenizer\nclass CharacterTokenizer(Tokenizer):\n \"\"\"A tokenizer that splits unicode characters.\"\"\"\n\n @property\n def in_graph(self):\n return True\n\n def _tokenize_tensor(self, text):\n return self._tokenize_batch_tensor(text)\n\n def _tokenize_batch_tensor(self, text):\n text = tf.strings.regex_replace(text, \" \", \"▁\")\n return tf.strings.unicode_split(text, \"UTF-8\")\n\n def _detokenize_tensor(self, tokens):\n return self._detokenize_ragged_tensor(tokens)\n\n def _detokenize_batch_tensor(self, tokens, sequence_length):\n _ = sequence_length\n return self._detokenize_ragged_tensor(tokens)\n\n def _detokenize_ragged_tensor(self, tokens):\n text = tf.strings.reduce_join(tokens, axis=tokens.shape.rank - 1)\n return tf.strings.regex_replace(text, \"▁\", \" \")\n\n def _tokenize_string(self, text):\n return list(text.replace(\" \", u\"▁\"))\n\n def _detokenize_string(self, tokens):\n return \"\".join(tokens).replace(u\"▁\", \" \")\n"
] |
[
[
"tensorflow.strings.regex_replace",
"tensorflow.device",
"tensorflow.is_tensor",
"tensorflow.strings.unicode_split",
"tensorflow.constant",
"tensorflow.io.gfile.exists",
"tensorflow.io.gfile.GFile",
"tensorflow.strings.split",
"tensorflow.RaggedTensor.from_tensor",
"tensorflow.strings.reduce_join",
"tensorflow.py_function",
"tensorflow.compat.as_text"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
vipermu/bigotis
|
[
"a40cd50eb533d05e26dd71c5ab78076d425e912f",
"a40cd50eb533d05e26dd71c5ab78076d425e912f"
] |
[
"server/models/taming/discriminator.py",
"server/models/taming/vqgan.py"
] |
[
"import functools\nimport torch.nn as nn\n\nfrom models.taming.util_modules import ActNorm\n\n\ndef weights_init(m):\n classname = m.__class__.__name__\n if classname.find('Conv') != -1:\n nn.init.normal_(m.weight.data, 0.0, 0.02)\n elif classname.find('BatchNorm') != -1:\n nn.init.normal_(m.weight.data, 1.0, 0.02)\n nn.init.constant_(m.bias.data, 0)\n\n\nclass NLayerDiscriminator(nn.Module):\n \"\"\"Defines a PatchGAN discriminator as in Pix2Pix\n --> see https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/models/networks.py\n \"\"\"\n def __init__(self, input_nc=3, ndf=64, n_layers=3, use_actnorm=False):\n \"\"\"Construct a PatchGAN discriminator\n Parameters:\n input_nc (int) -- the number of channels in input images\n ndf (int) -- the number of filters in the last conv layer\n n_layers (int) -- the number of conv layers in the discriminator\n norm_layer -- normalization layer\n \"\"\"\n super(NLayerDiscriminator, self).__init__()\n if not use_actnorm:\n norm_layer = nn.BatchNorm2d\n else:\n norm_layer = ActNorm\n if type(\n norm_layer\n ) == functools.partial: # no need to use bias as BatchNorm2d has affine parameters\n use_bias = norm_layer.func != nn.BatchNorm2d\n else:\n use_bias = norm_layer != nn.BatchNorm2d\n\n kw = 4\n padw = 1\n sequence = [\n nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw),\n nn.LeakyReLU(0.2, True)\n ]\n nf_mult = 1\n nf_mult_prev = 1\n for n in range(1,\n n_layers): # gradually increase the number of filters\n nf_mult_prev = nf_mult\n nf_mult = min(2**n, 8)\n sequence += [\n nn.Conv2d(ndf * nf_mult_prev,\n ndf * nf_mult,\n kernel_size=kw,\n stride=2,\n padding=padw,\n bias=use_bias),\n norm_layer(ndf * nf_mult),\n nn.LeakyReLU(0.2, True)\n ]\n\n nf_mult_prev = nf_mult\n nf_mult = min(2**n_layers, 8)\n sequence += [\n nn.Conv2d(ndf * nf_mult_prev,\n ndf * nf_mult,\n kernel_size=kw,\n stride=1,\n padding=padw,\n bias=use_bias),\n norm_layer(ndf * nf_mult),\n nn.LeakyReLU(0.2, True)\n ]\n\n sequence += [\n nn.Conv2d(ndf * nf_mult, 1, kernel_size=kw, stride=1, padding=padw)\n ] # output 1 channel prediction map\n self.main = nn.Sequential(*sequence)\n\n def forward(self, input):\n \"\"\"Standard forward.\"\"\"\n return self.main(input)\n",
"import torch\nimport torch.nn.functional as F\nimport pytorch_lightning as pl\n\nfrom models.taming.main import instantiate_from_config\n\nfrom models.taming.difussion_models import Encoder, Decoder, VUNet\nfrom models.taming.quantize import VectorQuantizer\n\n\nclass VQModel(pl.LightningModule):\n def __init__(self,\n ddconfig,\n lossconfig,\n n_embed,\n embed_dim,\n ckpt_path=None,\n ignore_keys=[],\n image_key=\"image\",\n colorize_nlabels=None,\n monitor=None):\n super().__init__()\n self.image_key = image_key\n self.encoder = Encoder(**ddconfig)\n self.decoder = Decoder(**ddconfig)\n self.loss = instantiate_from_config(lossconfig)\n self.quantize = VectorQuantizer(n_embed, embed_dim, beta=0.25)\n self.quant_conv = torch.nn.Conv2d(ddconfig[\"z_channels\"], embed_dim, 1)\n self.post_quant_conv = torch.nn.Conv2d(embed_dim,\n ddconfig[\"z_channels\"], 1)\n if ckpt_path is not None:\n self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys)\n self.image_key = image_key\n if colorize_nlabels is not None:\n assert type(colorize_nlabels) == int\n self.register_buffer(\"colorize\",\n torch.randn(3, colorize_nlabels, 1, 1))\n if monitor is not None:\n self.monitor = monitor\n\n def init_from_ckpt(self, path, ignore_keys=list()):\n sd = torch.load(path, map_location=\"cpu\")[\"state_dict\"]\n keys = list(sd.keys())\n for k in keys:\n for ik in ignore_keys:\n if k.startswith(ik):\n print(\"Deleting key {} from state_dict.\".format(k))\n del sd[k]\n self.load_state_dict(sd, strict=False)\n print(f\"Restored from {path}\")\n\n def encode(self, x):\n h = self.encoder(x)\n h = self.quant_conv(h)\n quant, emb_loss, info = self.quantize(h)\n return quant, emb_loss, info\n\n def decode(self, quant):\n quant = self.post_quant_conv(quant)\n dec = self.decoder(quant)\n return dec\n\n def decode_code(self, code_b):\n quant_b = self.quantize.embed_code(code_b)\n dec = self.decode(quant_b)\n return dec\n\n def forward(self, input):\n quant, diff, _ = self.encode(input)\n dec = self.decode(quant)\n return dec, diff\n\n def get_input(self, batch, k):\n x = batch[k]\n if len(x.shape) == 3:\n x = x[..., None]\n x = x.permute(0, 3, 1, 2).to(memory_format=torch.contiguous_format)\n return x.float()\n\n def training_step(self, batch, batch_idx, optimizer_idx):\n x = self.get_input(batch, self.image_key)\n xrec, qloss = self(x)\n\n if optimizer_idx == 0:\n # autoencode\n aeloss, log_dict_ae = self.loss(qloss,\n x,\n xrec,\n optimizer_idx,\n self.global_step,\n last_layer=self.get_last_layer(),\n split=\"train\")\n\n self.log(\"train/aeloss\",\n aeloss,\n prog_bar=True,\n logger=True,\n on_step=True,\n on_epoch=True)\n self.log_dict(log_dict_ae,\n prog_bar=False,\n logger=True,\n on_step=True,\n on_epoch=True)\n return aeloss\n\n if optimizer_idx == 1:\n # discriminator\n discloss, log_dict_disc = self.loss(\n qloss,\n x,\n xrec,\n optimizer_idx,\n self.global_step,\n last_layer=self.get_last_layer(),\n split=\"train\")\n self.log(\"train/discloss\",\n discloss,\n prog_bar=True,\n logger=True,\n on_step=True,\n on_epoch=True)\n self.log_dict(log_dict_disc,\n prog_bar=False,\n logger=True,\n on_step=True,\n on_epoch=True)\n return discloss\n\n def validation_step(self, batch, batch_idx):\n x = self.get_input(batch, self.image_key)\n xrec, qloss = self(x)\n aeloss, log_dict_ae = self.loss(qloss,\n x,\n xrec,\n 0,\n self.global_step,\n last_layer=self.get_last_layer(),\n split=\"val\")\n\n discloss, log_dict_disc = self.loss(qloss,\n x,\n xrec,\n 1,\n self.global_step,\n last_layer=self.get_last_layer(),\n split=\"val\")\n rec_loss = log_dict_ae[\"val/rec_loss\"]\n self.log(\"val/rec_loss\",\n rec_loss,\n prog_bar=True,\n logger=True,\n on_step=True,\n on_epoch=True,\n sync_dist=True)\n self.log(\"val/aeloss\",\n aeloss,\n prog_bar=True,\n logger=True,\n on_step=True,\n on_epoch=True,\n sync_dist=True)\n self.log_dict(log_dict_ae)\n self.log_dict(log_dict_disc)\n return self.log_dict\n\n def configure_optimizers(self):\n lr = self.learning_rate\n opt_ae = torch.optim.Adam(list(self.encoder.parameters()) +\n list(self.decoder.parameters()) +\n list(self.quantize.parameters()) +\n list(self.quant_conv.parameters()) +\n list(self.post_quant_conv.parameters()),\n lr=lr,\n betas=(0.5, 0.9))\n opt_disc = torch.optim.Adam(self.loss.discriminator.parameters(),\n lr=lr,\n betas=(0.5, 0.9))\n return [opt_ae, opt_disc], []\n\n def get_last_layer(self):\n return self.decoder.conv_out.weight\n\n def log_images(self, batch, **kwargs):\n log = dict()\n x = self.get_input(batch, self.image_key)\n x = x.to(self.device)\n xrec, _ = self(x)\n if x.shape[1] > 3:\n # colorize with random projection\n assert xrec.shape[1] > 3\n x = self.to_rgb(x)\n xrec = self.to_rgb(xrec)\n log[\"inputs\"] = x\n log[\"reconstructions\"] = xrec\n return log\n\n def to_rgb(self, x):\n assert self.image_key == \"segmentation\"\n if not hasattr(self, \"colorize\"):\n self.register_buffer(\"colorize\",\n torch.randn(3, x.shape[1], 1, 1).to(x))\n x = F.conv2d(x, weight=self.colorize)\n x = 2. * (x - x.min()) / (x.max() - x.min()) - 1.\n return x\n\n\nclass VQSegmentationModel(VQModel):\n def __init__(self, n_labels, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.register_buffer(\"colorize\", torch.randn(3, n_labels, 1, 1))\n\n def configure_optimizers(self):\n lr = self.learning_rate\n opt_ae = torch.optim.Adam(list(self.encoder.parameters()) +\n list(self.decoder.parameters()) +\n list(self.quantize.parameters()) +\n list(self.quant_conv.parameters()) +\n list(self.post_quant_conv.parameters()),\n lr=lr,\n betas=(0.5, 0.9))\n return opt_ae\n\n def training_step(self, batch, batch_idx):\n x = self.get_input(batch, self.image_key)\n xrec, qloss = self(x)\n aeloss, log_dict_ae = self.loss(qloss, x, xrec, split=\"train\")\n self.log_dict(log_dict_ae,\n prog_bar=False,\n logger=True,\n on_step=True,\n on_epoch=True)\n return aeloss\n\n def validation_step(self, batch, batch_idx):\n x = self.get_input(batch, self.image_key)\n xrec, qloss = self(x)\n aeloss, log_dict_ae = self.loss(qloss, x, xrec, split=\"val\")\n self.log_dict(log_dict_ae,\n prog_bar=False,\n logger=True,\n on_step=True,\n on_epoch=True)\n total_loss = log_dict_ae[\"val/total_loss\"]\n self.log(\"val/total_loss\",\n total_loss,\n prog_bar=True,\n logger=True,\n on_step=True,\n on_epoch=True,\n sync_dist=True)\n return aeloss\n\n @torch.no_grad()\n def log_images(self, batch, **kwargs):\n log = dict()\n x = self.get_input(batch, self.image_key)\n x = x.to(self.device)\n xrec, _ = self(x)\n if x.shape[1] > 3:\n # colorize with random projection\n assert xrec.shape[1] > 3\n # convert logits to indices\n xrec = torch.argmax(xrec, dim=1, keepdim=True)\n xrec = F.one_hot(xrec, num_classes=x.shape[1])\n xrec = xrec.squeeze(1).permute(0, 3, 1, 2).float()\n x = self.to_rgb(x)\n xrec = self.to_rgb(xrec)\n log[\"inputs\"] = x\n log[\"reconstructions\"] = xrec\n return log\n\n\nclass VQNoDiscModel(VQModel):\n def __init__(self,\n ddconfig,\n lossconfig,\n n_embed,\n embed_dim,\n ckpt_path=None,\n ignore_keys=[],\n image_key=\"image\",\n colorize_nlabels=None):\n super().__init__(ddconfig=ddconfig,\n lossconfig=lossconfig,\n n_embed=n_embed,\n embed_dim=embed_dim,\n ckpt_path=ckpt_path,\n ignore_keys=ignore_keys,\n image_key=image_key,\n colorize_nlabels=colorize_nlabels)\n\n def training_step(self, batch, batch_idx):\n x = self.get_input(batch, self.image_key)\n xrec, qloss = self(x)\n # autoencode\n aeloss, log_dict_ae = self.loss(qloss,\n x,\n xrec,\n self.global_step,\n split=\"train\")\n output = pl.TrainResult(minimize=aeloss)\n output.log(\"train/aeloss\",\n aeloss,\n prog_bar=True,\n logger=True,\n on_step=True,\n on_epoch=True)\n output.log_dict(log_dict_ae,\n prog_bar=False,\n logger=True,\n on_step=True,\n on_epoch=True)\n return output\n\n def validation_step(self, batch, batch_idx):\n x = self.get_input(batch, self.image_key)\n xrec, qloss = self(x)\n aeloss, log_dict_ae = self.loss(qloss,\n x,\n xrec,\n self.global_step,\n split=\"val\")\n rec_loss = log_dict_ae[\"val/rec_loss\"]\n output = pl.EvalResult(checkpoint_on=rec_loss)\n output.log(\"val/rec_loss\",\n rec_loss,\n prog_bar=True,\n logger=True,\n on_step=True,\n on_epoch=True)\n output.log(\"val/aeloss\",\n aeloss,\n prog_bar=True,\n logger=True,\n on_step=True,\n on_epoch=True)\n output.log_dict(log_dict_ae)\n\n return output\n\n def configure_optimizers(self):\n optimizer = torch.optim.Adam(list(self.encoder.parameters()) +\n list(self.decoder.parameters()) +\n list(self.quantize.parameters()) +\n list(self.quant_conv.parameters()) +\n list(self.post_quant_conv.parameters()),\n lr=self.learning_rate,\n betas=(0.5, 0.9))\n return optimizer\n"
] |
[
[
"torch.nn.Sequential",
"torch.nn.init.constant_",
"torch.nn.Conv2d",
"torch.nn.init.normal_",
"torch.nn.LeakyReLU"
],
[
"torch.load",
"torch.randn",
"torch.nn.functional.conv2d",
"torch.nn.Conv2d",
"torch.no_grad",
"torch.nn.functional.one_hot",
"torch.argmax"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
seyedrezamirkhani/keras_lstm_vae
|
[
"94774c9838a37ea533585df21aa1f7dcd36476b7"
] |
[
"example.py"
] |
[
"import numpy as np\nimport matplotlib.pyplot as plt\n\nfrom lstm_vae import create_lstm_vae\n\ndef get_data():\n # read data from file\n data = np.fromfile('sample_data.dat').reshape(419,13)\n timesteps = 3\n dataX = []\n for i in range(len(data) - timesteps - 1):\n x = data[i:(i+timesteps), :]\n dataX.append(x)\n return np.array(dataX)\n\n\nif __name__ == \"__main__\":\n x = get_data()\n input_dim = x.shape[-1] # 13\n timesteps = x.shape[1] # 3\n batch_size = 1\n\n vae, enc, gen = create_lstm_vae(input_dim, \n timesteps=timesteps, \n batch_size=batch_size, \n intermediate_dim=32,\n latent_dim=100,\n epsilon_std=1.)\n\n vae.fit(x, x, epochs=20)\n\n preds = vae.predict(x, batch_size=batch_size)\n\n # pick a column to plot.\n print(\"[plotting...]\")\n print(\"x: %s, preds: %s\" % (x.shape, preds.shape))\n plt.plot(x[:,0,3], label='data')\n plt.plot(preds[:,0,3], label='predict')\n plt.legend()\n plt.show()\n\n\n"
] |
[
[
"matplotlib.pyplot.legend",
"numpy.fromfile",
"matplotlib.pyplot.plot",
"numpy.array",
"matplotlib.pyplot.show"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ntienvu/TW_NAS
|
[
"72a6d3c933978663c583661eee765bc316f66572"
] |
[
"cyDPP/sample_k.py"
] |
[
"import numpy as np\nfrom cyDPP.elem_sympoly import elem_sympoly\n\n\ndef sample_k(eigenvals, k):\n \"\"\"\n Sample a given number of eigenvalues according to p(S) \\propto prod eigenvals \\in S\n \"\"\"\n E = elem_sympoly(eigenvals, k)\n\n i = len(eigenvals)\n remaining = k\n\n S = np.zeros((k, ), dtype=int)\n while remaining > 0:\n # compute marginal of i given that we choose remaining values from 1:i\n if i == remaining:\n marg = 1\n else:\n marg = eigenvals[i-1] * E[remaining-1, i-1] / E[remaining, i]\n\n # sample marginal\n rand = np.random.rand()\n if rand < marg:\n S[remaining-1] = i-1\n remaining = remaining - 1\n i = i-1\n\n return S\n"
] |
[
[
"numpy.zeros",
"numpy.random.rand"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
CongzheUalberta/Deep-learning-based-decoding-of-constrained-sequence-codes
|
[
"1ab3626dc6034455e3324fa0d054ae48e78d13b2"
] |
[
"utils.py"
] |
[
"# authors: Congzhe Cao, email:[email protected]\n# Duanshun Li, email:[email protected]\n# This is the code repo for the paper \"Deep-learning based decoding of constrained sequence codes\",\n# in IEEE Journal on Selected Areas in Communications, https://ieeexplore.ieee.org/document/8792188.\n# Credit is also given to Tobias Gruber et al and their github repo https://github.com/gruberto/DL-ChannelDecoding,\n# where this code repo is initially partly written based on theirs.\n#!/usr/bin/env python\n\nimport numpy as np\nimport random\nfrom keras import backend as K ###\n\ndef modulateBPSK(x):\n return -2 * x + 1\n\ndef addNoise_fixedlengthCode(x, sigma):\n w = K.random_normal(K.shape(x), mean=0.0, stddev=sigma)\n return x + w\n\ndef addNoise(x, sigma, len_test = None):\n if len_test is None:\n w = K.random_normal(K.shape(x), mean=0.0, stddev=sigma)\n positives = K.equal(x, 3)\n positives = K.cast(positives, K.floatx())\n noisy = x + w\n noisy = noisy - noisy*positives + 3*positives\n K.print_tensor(noisy)\n return noisy\n else:\n w = np.random.normal(0.0, sigma, x.shape)\n noisy = x + w\n for noisy_test_i in range(0, noisy.shape[0]):\n if len_test[noisy_test_i][0] < noisy.shape[1]:\n noisy[noisy_test_i][int(len_test[noisy_test_i][0]):] = [3] * (noisy.shape[1] - int(len_test[noisy_test_i][0]))\n return noisy;\n\ndef ber(y_true, y_pred):\n return K.mean(K.not_equal(y_true, K.round(y_pred)))\n\n\ndef return_output_shape(input_shape):\n return input_shape\n\n\ndef log_likelihood_ratio(x, sigma):\n return 2 * x / np.float32(sigma ** 2)\n\n\ndef errors(y_true, y_pred):\n return K.sum(K.cast(K.not_equal(y_true, K.round(y_pred)), dtype='float'))\n\n\ndef half_adder(a, b):\n s = a ^ b\n c = a & b\n return s, c\n\n\ndef full_adder(a, b, c):\n s = (a ^ b) ^ c # for current bit position\n c = (a & b) | (c & (a ^ b)) # for the next bit position\n # print(\"s: \", s,\" c: \", c);\n return s, c\n\n\ndef add_bool(a, b):\n if len(a) != len(b):\n raise ValueError('arrays with different length')\n k = len(a)\n s = np.zeros(k, dtype=bool)\n c = False\n for i in reversed(range(0, k)):\n s[i], c = full_adder(a[i], b[i], c)\n if c:\n warnings.warn(\"Addition overflow!\")\n return s\n\n\ndef inc_bool(a):\n k = len(a)\n increment = np.hstack((np.zeros(k - 1, dtype=bool), np.ones(1, dtype=bool)))\n # print(\"a: \", a,\" increment: \", increment);\n a = add_bool(a, increment)\n return a\n\n\ndef bitrevorder(x):\n m = np.amax(x)\n n = np.ceil(np.log2(m)).astype(int)\n for i in range(0, len(x)):\n x[i] = int('{:0{n}b}'.format(x[i], n=n)[::-1], 2)\n return x\n\n\ndef int2bin(x, N):\n if isinstance(x, list) or isinstance(x, np.ndarray):\n binary = np.zeros((len(x), N), dtype='bool')\n for i in range(0, len(x)):\n binary[i] = np.array([int(j) for j in bin(x[i])[2:].zfill(N)])\n else:\n binary = np.array([int(j) for j in bin(x)[2:].zfill(N)], dtype=bool)\n\n return binary\n\n\ndef bin2int(b):\n if isinstance(b[0], list):\n integer = np.zeros((len(b),), dtype=int)\n for i in range(0, len(b)):\n out = 0\n for bit in b[i]:\n out = (out << 1) | bit\n integer[i] = out\n elif isinstance(b, np.ndarray):\n if len(b.shape) == 1:\n out = 0\n for bit in b:\n out = (out << 1) | bit\n integer = out\n else:\n integer = np.zeros((b.shape[0],), dtype=int)\n for i in range(0, b.shape[0]):\n out = 0\n for bit in b[i]:\n out = (out << 1) | bit\n integer[i] = out\n\n return integer\n\n\ndef polar_design_awgn(N, k, design_snr_dB):\n S = 10 ** (design_snr_dB / 10)\n z0 = np.zeros(N)\n\n z0[0] = np.exp(-S)\n for j in range(1, int(np.log2(N)) + 1):\n u = 2 ** j\n for t in range(0, int(u / 2)):\n T = z0[t]\n z0[t] = 2 * T - T ** 2 # upper channel\n z0[int(u / 2) + t] = T ** 2 # lower channel\n\n # sort into increasing order\n idx = np.argsort(z0)\n\n # select k best channels\n idx = np.sort(bitrevorder(idx[0:k]))\n\n A = np.zeros(N, dtype=bool)\n A[idx] = True\n\n return A\n\n\ndef polar_transform_iter(u):\n N = len(u)\n n = 1\n x = np.copy(u)\n stages = np.log2(N).astype(int)\n for s in range(0, stages):\n i = 0\n while i < N:\n for j in range(0, n):\n idx = i + j\n x[idx] = x[idx] ^ x[idx + n]\n i = i + 2 * n\n n = 2 * n\n return x\n\ndef error_correction_hard(clen, received, codebook_decode_array_shuffle = None):\n if codebook_decode_array_shuffle.size != 0:\n codebook = codebook_decode_array_shuffle\n else:\n codebook = code_word_4b6b\n\n min_hamming_distance = clen + 1\n for key in codebook:\n hamming_distance = 0\n for bit in range(0, clen):\n if received[bit] != key[bit]:\n hamming_distance += 1\n if hamming_distance < min_hamming_distance:\n min_hamming_distance = hamming_distance\n corrected = key\n return corrected\n\ndef error_correction_soft(clen, received, codebook_decode_array_shuffle = None):\n if codebook_decode_array_shuffle.size != 0:\n codebook = codebook_decode_array_shuffle\n else:\n codebook = code_word_4b6b\n\n min_distance = 10.0 ** 10.0\n for key in codebook:\n distance = 0.0\n for bit in range(0, clen):\n distance += abs(received[bit] - (-2.0 * key[bit] + 1.0)) * abs(received[bit] - (-2.0 * key[bit] + 1.0))\n if distance < min_distance:\n # print(distance,\"\\n\")\n min_distance = distance\n corrected = key\n return corrected\n\ndef error_correction_soft_DCfreeN5(clen, received):\n if clen == 2:\n codebook = code_word_DCfreeN5_len2\n elif clen == 4:\n codebook = code_word_DCfreeN5_len4\n else:\n print(\"received word not recoginzed (length can only be 2 or 4)\")\n exit(-1)\n\n min_distance = 10.0 ** 10.0\n for key in codebook:\n distance = 0.0\n for bit in range(0, clen):\n distance += abs(received[bit] - (-2.0 * key[bit] + 1.0)) * abs(received[bit] - (-2.0 * key[bit] + 1.0))\n if distance < min_distance:\n # print(distance,\"\\n\")\n min_distance = distance\n corrected = key\n return corrected\n\ndef bit_err(ber, bits, clen):\n \"\"\"\n bit error rate vs S/N ratio\n :param ber: ber array [sigma, error, bits]\n :param bits: number of bit\n :param clen: code length\n :return:\n \"\"\"\n biterr = np.zeros((ber.shape[0], 2))\n biterr[:, 0] = 10 * np.log10(1 / (2.0 * ber[:, 0] * ber[:, 0])) - 10 * np.log10(float(bits) / clen)\n biterr[:, 1] = ber[:, 1] / ber[:, 2]\n return biterr\n\ndef score(biterr0, biterr1):\n \"\"\"\n score to evaluate the decoder\n :param biterr0: bit error rate (optimal) [sigma, biterr]\n :param biterr1: bit error rate for evaluation [sigma. biterr]\n :return:\n \"\"\"\n\n n = biterr1[0:len(biterr0) - 1, 1]/biterr0[0:len(biterr0) - 1, 1]\n s = np.nansum(n)\n if biterr1[len(biterr0) - 1, 1] == 0:\n s += 1\n else:\n s += 10\n s = s / 5.0\n return s\n\ndef scoreBLER(BLER0, BLER1):\n s= 0.0\n for i in range(0, len(BLER0)):\n if BLER1[i] == 0:\n if BLER0[i] == 0:\n s += 0\n else:\n s += 10\n else:\n s += float(BLER0[i]) / float(BLER1[i])\n\n s = s / len(BLER0)\n return s\n\ndef shuffle_code_book(encode_book):\n \"\"\"\n shuffle the code book\n :param encode_book: code book\n :return: shuffled code book\n \"\"\"\n codbok = np.array(list(encode_book.items()))\n ids0 = np.random.permutation(codbok.shape[0])\n ids1 = np.random.permutation(codbok.shape[0])\n\n cod = codbok[ids0, 0]\n word = codbok[ids1, 1]\n shuff_encode_book = dict()\n\n for i in range(len(cod)):\n shuff_encode_book[cod[i]] = word[i]\n return shuff_encode_book\n\ndef cartesian(arrays, out=None):\n \"\"\"\n Generate a cartesian product of input arrays.\n Parameters\n ----------\n arrays : list of array-like\n 1-D arrays to form the cartesian product of.\n out : ndarray\n Array to place the cartesian product in.\n Returns\n -------\n out : ndarray\n 2-D array of shape (M, len(arrays)) containing cartesian products\n formed of input arrays.\n Examples\n --------\n cartesian(([1, 2, 3], [4, 5], [6, 7]))\n array([[1, 4, 6],\n [1, 4, 7],\n [1, 5, 6],\n [1, 5, 7],\n [2, 4, 6],\n [2, 4, 7],\n [2, 5, 6],\n [2, 5, 7],\n [3, 4, 6],\n [3, 4, 7],\n [3, 5, 6],\n [3, 5, 7]])\n \"\"\"\n\n arrays = [np.asarray(x) for x in arrays]\n dtype = arrays[0].dtype\n\n n = np.prod([x.size for x in arrays])\n if out is None:\n out = np.zeros([n, len(arrays)], dtype=dtype)\n\n m = int(n / arrays[0].size)\n out[:,0] = np.repeat(arrays[0], m)\n if arrays[1:]:\n cartesian(arrays[1:], out=out[0:m,1:])\n for j in range(1, arrays[0].size):\n out[j*m:(j+1)*m,1:] = out[0:m,1:]\n return out\n\ndef create_codebook_shuffle(nframe = 5):\n cbs = []\n np.random.seed(0)\n for i in range(nframe):\n cbi = shuffle_code_book(codebook_4b6b)\n cbs.append(cbi)\n\n comb_codbok = combine_codes(cbs)\n return comb_codbok\n\ndef combine_codes(codboks):\n \"\"\"\n combine multiple code books to a big code\n :param codboks: tuple/list of code books\n :return: code book for the combined code\n \"\"\"\n idx = ()\n for cb in codboks:\n key = cb.keys()\n idx = idx + (list(key),)\n idx = cartesian(idx)\n\n res = dict()\n\n cur_index = 0;\n for id in idx:\n cur_index += 1;\n print(\"processing \", cur_index, \" hash entry in the shuffled codebook\")\n\n cod = ''\n word = ''\n for i in range(len(id)):\n cod += id[i]\n word += \" \" + codboks[i][id[i]]\n cod = np.array(cod.replace('[', ' ').replace(']', ' ').split()).astype(int)\n word = word.lstrip(\" \")\n\n res[str(cod)] = word\n\n return res\n\ndef get_decode_book(codbok):\n \"\"\"\n get decode book from code boook\n :param codbok: code book\n :return: decode book\n \"\"\"\n decodbok = dict()\n decodbok_array = []\n for key, val in codbok.items():\n decodbok[str(np.array(val.split()).astype(int))] = key.replace('[', '').replace(']', '')\n decodbok_array.append(np.array(list(val.split(' ')), dtype = 'int'))\n decodbok_array = np.array(decodbok_array)\n return decodbok, decodbok_array\n\n\n# hash table for encoding\ncodebook_4b6b = {str(np.array([0, 0, 0, 0])):'0 0 1 1 1 0',\n str(np.array([0, 0, 0, 1])):'0 0 1 1 0 1',\n str(np.array([0, 0, 1, 0])):'0 1 0 0 1 1',\n str(np.array([0, 0, 1, 1])):'0 1 0 1 1 0',\n str(np.array([0, 1, 0, 0])):'0 1 0 1 0 1',\n str(np.array([0, 1, 0, 1])):'1 0 0 0 1 1',\n str(np.array([0, 1, 1, 0])):'1 0 0 1 1 0',\n str(np.array([0, 1, 1, 1])):'1 0 0 1 0 1',\n str(np.array([1, 0, 0, 0])):'0 1 1 0 0 1',\n str(np.array([1, 0, 0, 1])):'0 1 1 0 1 0',\n str(np.array([1, 0, 1, 0])):'0 1 1 1 0 0',\n str(np.array([1, 0, 1, 1])):'1 1 0 0 0 1',\n str(np.array([1, 1, 0, 0])):'1 1 0 0 1 0',\n str(np.array([1, 1, 0, 1])):'1 0 1 0 0 1',\n str(np.array([1, 1, 1, 0])):'1 0 1 0 1 0',\n str(np.array([1, 1, 1, 1])):'1 0 1 1 0 0'}\n\n# hash table for decoding\ndecode_4b6b = {str(np.array([0, 0, 1, 1, 1, 0])): '0 0 0 0',\n str(np.array([0, 0, 1, 1, 0, 1])):'0 0 0 1',\n str(np.array([0, 1, 0, 0, 1, 1])):'0 0 1 0',\n str(np.array([0, 1, 0, 1, 1, 0])):'0 0 1 1',\n str(np.array([0, 1, 0, 1, 0, 1])):'0 1 0 0',\n str(np.array([1, 0, 0, 0, 1, 1])):'0 1 0 1',\n str(np.array([1, 0, 0, 1, 1, 0])):'0 1 1 0',\n str(np.array([1, 0, 0, 1, 0, 1])):'0 1 1 1',\n str(np.array([0, 1, 1, 0, 0, 1])):'1 0 0 0',\n str(np.array([0, 1, 1, 0, 1, 0])):'1 0 0 1',\n str(np.array([0, 1, 1, 1, 0, 0])):'1 0 1 0',\n str(np.array([1, 1, 0, 0, 0, 1])):'1 0 1 1',\n str(np.array([1, 1, 0, 0, 1, 0])):'1 1 0 0',\n str(np.array([1, 0, 1, 0, 0, 1])):'1 1 0 1',\n str(np.array([1, 0, 1, 0, 1, 0])):'1 1 1 0',\n str(np.array([1, 0, 1, 1, 0, 0])):'1 1 1 1'}\n\n# hash table for error correction during decoding, same as codebook_decode but different type,\n# to be compatible with function error_correction_hard() and error_correction_soft()\ncode_word_4b6b = np.array([[0, 0, 1, 1, 1, 0],\n [0, 0, 1, 1, 0, 1],\n [0, 1, 0, 0, 1, 1],\n [0, 1, 0, 1, 1, 0],\n [0, 1, 0, 1, 0, 1],\n [1, 0, 0, 0, 1, 1],\n [1, 0, 0, 1, 1, 0],\n [1, 0, 0, 1, 0, 1],\n [0, 1, 1, 0, 0, 1],\n [0, 1, 1, 0, 1, 0],\n [0, 1, 1, 1, 0, 0],\n [1, 1, 0, 0, 0, 1],\n [1, 1, 0, 0, 1, 0],\n [1, 0, 1, 0, 0, 1],\n [1, 0, 1, 0, 1, 0],\n [1, 0, 1, 1, 0, 0]])\n\n\nvary_length_1_3dk = {\n str(np.array([0])): '0 1',\n str(np.array([1, 0])): '0 0 1',\n str(np.array([1, 1])): '0 0 0 1'\n}\n\nvary_length_DCfreeN5_state_1 = {\n str(np.array([0, 1, 0])): [0, 1, 1, 1],\n str(np.array([0, 1, 1])): [0, 1, 0, 1],\n str(np.array([1, 0, 0])): [0, 1, 1, 0],\n str(np.array([1, 0, 1])): [1, 0, 1, 1],\n str(np.array([1, 1, 0])): [1, 0, 0, 1],\n str(np.array([1, 1, 1])): [1, 0, 1, 0],\n}\n\nvary_length_DCfreeN5_state_2 = {\n str(np.array([0, 1, 0])): [1, 0, 0, 0],\n str(np.array([0, 1, 1])): [0, 1, 0, 1],\n str(np.array([1, 0, 0])): [0, 1, 1, 0],\n str(np.array([1, 0, 1])): [0, 1, 0, 0],\n str(np.array([1, 1, 0])): [1, 0, 0, 1],\n str(np.array([1, 1, 1])): [1, 0, 1, 0],\n}\n\nvary_length_DCfreeN5_for_decode = {\n str(np.array([1, 1])): [0, 0],\n str(np.array([0, 0])): [0, 0],\n str(np.array([0, 1, 1, 1])): [0, 1, 0],\n str(np.array([1, 0, 0, 0])): [0, 1, 0],\n str(np.array([0, 1, 0, 1])): [0, 1, 1],\n str(np.array([0, 1, 1, 0])): [1, 0, 0],\n str(np.array([1, 0, 1, 1])): [1, 0, 1],\n str(np.array([0, 1, 0, 0])): [1, 0, 1],\n str(np.array([1, 0, 0, 1])): [1, 1, 0],\n str(np.array([1, 0, 1, 0])): [1, 1, 1],\n}\n\ncode_word_DCfreeN5_len2 = np.array([[1, 1], [0, 0]])\n\n\ncode_word_DCfreeN5_len4 = np.array([[0, 1, 1, 1],\n [1, 0, 0, 0],\n [0, 1, 0, 1],\n [0, 1, 1, 0],\n [1, 0, 1, 1],\n [0, 1, 0, 0],\n [1, 0, 0, 1],\n [1, 0, 1, 0],])\n\nif __name__ == \"__main__\":\n shuf_code = shuffle_code_book(codebook_4b6b)\n shuf_decode = get_decode_book(shuf_code)\n res = combine_codes([codebook_4b6b, shuf_code])\n"
] |
[
[
"numpy.amax",
"numpy.log2",
"numpy.random.seed",
"numpy.asarray",
"numpy.repeat",
"numpy.ones",
"numpy.copy",
"numpy.nansum",
"numpy.random.permutation",
"numpy.random.normal",
"numpy.prod",
"numpy.float32",
"numpy.log10",
"numpy.argsort",
"numpy.array",
"numpy.exp",
"numpy.zeros"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
IBM/NeuronAlignment
|
[
"5b82b60666db1fac72e53db07529a3328ee549c4",
"5b82b60666db1fac72e53db07529a3328ee549c4"
] |
[
"utils/birkhoff.py",
"training/train_curve_pam.py"
] |
[
"# birkhoff.py - decompose a doubly stochastic matrix into permutation matrices\n#\n# Copyright 2015 Jeffrey Finkelstein.\n#\n# This file is part of Birkhoff.\n#\n# Birkhoff is free software: you can redistribute it and/or modify it under the\n# terms of the GNU General Public License as published by the Free Software\n# Foundation, either version 3 of the License, or (at your option) any later\n# version.\n#\n# Birkhoff is distributed in the hope that it will be useful, but WITHOUT ANY\n# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR\n# A PARTICULAR PURPOSE. See the GNU Affero General Public License for more\n# details.\n#\n# You should have received a copy of the GNU General Public License along with\n# Birkhoff. If not, see <http://www.gnu.org/licenses/>.\n\"\"\"Provides a function for computing the Birkhoff--von Neumann decomposition of\na doubly stochastic matrix into a convex combination of permutation matrices.\n\n\"\"\"\n# Imports from built-in libraries.\nfrom __future__ import division\nimport itertools\n\n# Imports from third-party libraries.\nimport numpy as np\nimport scipy\n\n#: The current version of this package.\n__version__ = '0.0.5'\n# THIS ALGORITHM HAS BEEN HEAVILY EDITED FROM THE FILE IT HAS BEEN MODIFIED FROM\n\n#: Any number smaller than this will be rounded down to 0 when computing the\n#: difference between NumPy arrays of floats.\nTOLERANCE = np.finfo(np.float).eps * 10 # was 1E1, -15\n\n\ndef to_permutation_matrix(matches, n):\n \"\"\"Converts a permutation into a permutation matrix.\n\n `matches` is a dictionary whose keys are vertices and whose values are\n partners. For each vertex ``u`` and ``v``, entry (``u``, ``v``) in the\n returned matrix will be a ``1`` if and only if ``matches[u] == v``.\n\n Pre-condition: `matches` must be a permutation on an initial subset of the\n natural numbers.\n\n Returns a permutation matrix as a square NumPy array.\n\n \"\"\"\n # n = len(matches)\n P = np.zeros((n, n))\n # This is a cleverer way of doing\n #\n # for (u, v) in matches.items():\n # P[u, v] = 1\n #\n P[list(zip(*(matches.items())))] = 1\n return P\n\n\ndef zeros(m, n):\n \"\"\"Convenience function for ``numpy.zeros((m, n))``.\"\"\"\n return np.zeros((m, n))\n\n\ndef hstack(left, right):\n \"\"\"Convenience function for ``numpy.hstack((left, right))``.\"\"\"\n return np.hstack((left, right))\n\n\ndef vstack(top, bottom):\n \"\"\"Convenience function for ``numpy.vstack((top, bottom))``.\"\"\"\n return np.vstack((top, bottom))\n\n\ndef four_blocks(topleft, topright, bottomleft, bottomright):\n \"\"\"Convenience function that creates a block matrix with the specified\n blocks.\n\n Each argument must be a NumPy matrix. The two top matrices must have the\n same number of rows, as must the two bottom matrices. The two left matrices\n must have the same number of columns, as must the two right matrices.\n\n \"\"\"\n return vstack(hstack(topleft, topright),\n hstack(bottomleft, bottomright))\n\n\ndef to_bipartite_matrix(A):\n \"\"\"Returns the adjacency matrix of a bipartite graph whose biadjacency\n matrix is `A`.\n\n `A` must be a NumPy array.\n\n If `A` has **m** rows and **n** columns, then the returned matrix has **m +\n n** rows and columns.\n\n \"\"\"\n m, n = A.shape\n return four_blocks(zeros(m, m), A, A.T, zeros(n, n))\n\n\ndef to_pattern_matrix(D):\n \"\"\"Returns the Boolean matrix in the same shape as `D` with ones exactly\n where there are nonzero entries in `D`.\n\n `D` must be a NumPy array.\n\n \"\"\"\n result = np.zeros_like(D)\n # This is a cleverer way of doing\n #\n # for (u, v) in zip(*(D.nonzero())):\n # result[u, v] = 1\n #\n result[D.nonzero()] = 1\n return result\n\n\ndef birkhoff_von_neumann_decomposition(D):\n \"\"\"Returns the Birkhoff--von Neumann decomposition of the doubly\n stochastic matrix `D`.\n\n The input `D` must be a square NumPy array representing a doubly\n stochastic matrix (that is, a matrix whose entries are nonnegative\n reals and whose row sums and column sums are all 1). Each doubly\n stochastic matrix is a convex combination of at most ``n ** 2``\n permutation matrices, where ``n`` is the dimension of the input\n array.\n\n The returned value is a list of pairs whose length is at most ``n **\n 2``. In each pair, the first element is a real number in the interval **(0,\n 1]** and the second element is a NumPy array representing a permutation\n matrix. This represents the doubly stochastic matrix as a convex\n combination of the permutation matrices.\n\n The input matrix may also be a scalar multiple of a doubly\n stochastic matrix, in which case the row sums and column sums must\n each be *c*, for some positive real number *c*. This may be useful\n in avoiding precision issues: given a doubly stochastic matrix that\n will have many entries close to one, multiply it by a large positive\n integer. The returned permutation matrices will be the same\n regardless of whether the given matrix is a doubly stochastic matrix\n or a scalar multiple of a doubly stochastic matrix, but in the\n latter case, the coefficients will all be scaled by the appropriate\n scalar multiple, and their sum will be that scalar instead of one.\n\n For example::\n\n >>> import numpy as np\n >>> from birkhoff import birkhoff_von_neumann_decomposition as decomp\n >>> D = np.ones((2, 2))\n >>> zipped_pairs = decomp(D)\n >>> coefficients, permutations = zip(*zipped_pairs)\n >>> coefficients\n (1.0, 1.0)\n >>> permutations[0]\n array([[ 1., 0.],\n [ 0., 1.]])\n >>> permutations[1]\n array([[ 0., 1.],\n [ 1., 0.]])\n >>> zipped_pairs = decomp(D / 2) # halve each value in the matrix\n >>> coefficients, permutations = zip(*zipped_pairs)\n >>> coefficients # will be half as large as before\n (0.5, 0.5)\n >>> permutations[0] # will be the same as before\n array([[ 1., 0.],\n [ 0., 1.]])\n >>> permutations[1]\n array([[ 0., 1.],\n [ 1., 0.]])\n\n The returned list of pairs is given in the order computed by the algorithm\n (so in particular they are not sorted in any way).\n\n \"\"\"\n m, n = D.shape\n if m != n:\n raise ValueError('Input matrix must be square ({} x {})'.format(m, n))\n indices = list(itertools.product(range(m), range(n)))\n # These two lists will store the result as we build it up each iteration.\n coefficients = []\n permutations = []\n # Create a copy of D so that we don't modify it directly. Cast the\n # entries of the matrix to floating point numbers, regardless of\n # whether they were integers.\n S = D.astype('float')\n k_iter = 0\n while not np.all(S == 0):\n # Create an undirected graph whose adjacency matrix contains a 1\n # exactly where the matrix S has a nonzero entry.\n\n # W = to_pattern_matrix(S)\n\n # Construct the bipartite graph whose left and right vertices both\n # represent the vertex set of the pattern graph (whose adjacency matrix\n # is ``W``).\n\n # X = to_bipartite_matrix(W)\n\n # Convert the matrix of a bipartite graph into a NetworkX graph object.\n\n # G = from_numpy_matrix(X)\n\n # Compute a perfect matching for this graph. The dictionary `M` has one\n # entry for each matched vertex (in both the left and the right vertex\n # sets), and the corresponding value is its partner.\n #\n # The bipartite maximum matching algorithm requires specifying\n # the left set of nodes in the bipartite graph. By construction,\n # the left set of nodes is {0, ..., n - 1} and the right set is\n # {n, ..., 2n - 1}; see `to_bipartite_matrix()`.\n\n # left_nodes = range(n)\n # M = maximum_matching(G, left_nodes)\n #\n # However, since we have both a left vertex set and a right vertex set,\n # each representing the original vertex set of the pattern graph\n # (``W``), we need to convert any vertex greater than ``n`` to its\n # original vertex number. To do this,\n #\n # - ignore any keys greater than ``n``, since they are already\n # covered by earlier key/value pairs,\n # - ensure that all values are less than ``n``.\n #\n # M = {u: v % n for u, v in M.items() if u < n}\n # Convert that perfect matching to a permutation matrix.\n\n\n\n # P = to_permutation_matrix(M, n)\n # Get the smallest entry of S corresponding to the 1 entries in the\n # permutation matrix.\n\n C = -S\n C[np.where(S == 0)] = len(S) ** 2\n row_ind, col_ind = scipy.optimize.linear_sum_assignment(C)\n P = np.zeros([len(row_ind), len(col_ind)])\n for r, c in zip(row_ind, col_ind):\n P[r, c] = 1\n\n q = min(S[i, j] for (i, j) in indices if P[i, j] == 1)\n # Store the coefficient and the permutation matrix for later.\n if q == 0.0:\n # print('k_iter for break', k_iter)\n break\n coefficients.append(q)\n permutations.append(P)\n # Subtract P scaled by q. After this subtraction, S has a zero entry\n # where the value q used to live.\n S -= q * P\n # PRECISION ISSUE: There seems to be a problem with floating point\n # precision here, so we need to round down to 0 any entry that is very\n # small.\n S[np.abs(S) < 5E-4] = 0.0\n k_iter += 1\n return list(zip(coefficients, permutations))\n",
"import argparse\nimport os\nimport sys\nimport time\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nsys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))\nfrom models import curves_pam\nfrom utils import utils, alignment, data\nimport models\nimport definitions\nimport copy\n\n\nparser = argparse.ArgumentParser(description='Trains a curve between two neural networks using PAM.')\n\nparser.add_argument('--dir', type=str, default='model_dicts/curve_pam_models/', metavar='DIR',\n help='directory for saving curve models (default: model_dicts/curve_pam_models/)')\nparser.add_argument('--dir2', type=str, default='model_data/training/curve_pam_models/', metavar='DIR',\n help='directory for saving curve models data (default: model_data/training/curve_pam_models/)')\nparser.add_argument('--data_path', type=str, default='data/', metavar='PATH',\n help='path to datasets location (default: data/)')\nparser.add_argument('--dir_models', type=str, default='model_dicts/basic_models/', metavar='ENDPOINTS',\n help='directory to model dicts for the curve endpoints. (default: model_dicts/basic_models/)')\nparser.add_argument('--dir_alignment', type=str, default='model_dicts/paired_models/', metavar='DIR',\n help='directory to alignments between the endpoint models (default: model_dicts/paired_models/)')\n\n\nparser.add_argument('--dataset', type=str, default='CIFAR10', metavar='DATASET',\n help='dataset name (default: CIFAR10)')\nparser.add_argument('--use_test', action='store_true', default=True,\n help='switches between validation and test set (default: True)')\nparser.add_argument('--transform', type=str, default='TinyTen', metavar='TRANSFORM',\n help='transform name (default: TinyTen)')\nparser.add_argument('--batch_size', type=int, default=128, metavar='N',\n help='input batch size (default: 128)')\nparser.add_argument('--num-workers', type=int, default=4, metavar='N',\n help='number of workers (default: 4)')\n\nparser.add_argument('--model', type=str, default='TinyTen', metavar='MODEL',\n help='model name (default: None)')\nparser.add_argument('--curve', type=str, default='Bezier', metavar='CURVE',\n help='curve type to use (default: Bezier)')\nparser.add_argument('--num_bends', type=int, default=3, metavar='N',\n help='number of curve bends (default: 3)')\n\nparser.add_argument('--fix_start', dest='fix_start', action='store_true', default=True,\n help='fix start point (default: True)')\nparser.add_argument('--fix_end', dest='fix_end', action='store_true', default=True,\n help='fix end point (default: True)')\n\nparser.set_defaults(init_linear=True)\nparser.add_argument('--init_linear_off', dest='init_linear', action='store_false',\n help='turns off linear initialization of intermediate points (default: on)')\nparser.add_argument('--resume', type=str, default=None, metavar='CKPT',\n help='checkpoint to resume training from (default: None)')\n\nparser.add_argument('--outer_iters', type=int, default=1, metavar='N',\n help='number of PAM iterations to train (default: 1)')\nparser.add_argument('--inner_iters_perm', type=int, default=20, metavar='N',\n help='number of epochs to train permutation for each subiteration. (default: 20)')\nparser.add_argument('--inner_iters_phi', type=int, default=250, metavar='N',\n help='number of epochs to train curve parameters for each subiteration. (default: 250)')\n\nparser.add_argument('--save_freq', type=int, default=270, metavar='N',\n help='save frequency (default: 270)')\n\nparser.add_argument('--lr', type=float, default=1E-1, metavar='LR',\n help='initial learning rate (default: 0.01)')\nparser.add_argument('--wd', type=float, default=5e-4, metavar='WD',\n help='weight decay (default: 5e-4)')\nparser.add_argument('--lr_decay', type=float, default=0.9996, help='Learning Rate Decay for SGD')\nparser.add_argument('--lr_drop', type=int, default=20, help='Number of epochs required to decay learning rate')\n\nparser.add_argument('--seed', type=int, default=1, metavar='S', help='random seed (default: 1)')\nparser.add_argument('--seed_a', type=int, default=None, metavar='S', help='random seed for model 0 (default: None)')\nparser.add_argument('--seed_b', type=int, default=None, metavar='S', help='random seed for model 1(default: None)')\nparser.add_argument('--epochs_model',\n type=int, default=200, metavar='EPOCHS', help='Number of epochs the models were trained for')\nparser.add_argument('--alignment', type=str, default='',\n help='specify an alignment if the models are to be aligned before curve finding (default: None)')\nparser.add_argument('--val_freq', nargs='+', type=int, default=[20, 250],\n help='the rate in epochs at which to evaluate the model on the validation set. (default: [20, 250])')\n\nargs = parser.parse_args()\n\nargs.dir = ('%s%s/%s/' % (args.dir, args.model, args.dataset))\nargs.dir2 = ('%s%s/%s/' % (args.dir2, args.model, args.dataset))\nargs.dir_models = ('%s%s/%s/' % (args.dir_models, args.model, args.dataset))\nargs.dir_alignment = ('%s%s/%s/' % (args.dir_alignment, args.model, args.dataset))\n\nproject_root = definitions.get_project_root()\nos.chdir(project_root)\nos.makedirs(args.dir, exist_ok=True)\nos.makedirs(args.dir2, exist_ok=True)\n\nprint('Arguments')\nfor arg in vars(args):\n print('%s: %s' % (arg, str(getattr(args, arg))))\n\nuse_cuda = torch.cuda.is_available()\ndevice = torch.device(\"cuda:0\" if use_cuda else \"cpu\")\n\nnp.random.seed(args.seed)\ntorch.backends.cudnn.benchmark = True\ntorch.manual_seed(args.seed)\nif use_cuda:\n torch.cuda.manual_seed(args.seed)\n\nloaders, num_classes = data.loaders(\n args.dataset,\n args.data_path,\n args.batch_size,\n args.num_workers,\n args.transform,\n args.use_test,\n test_batch_size=512\n)\n\nmodel_paths = ['%scheckpoint_seed_%02d-%d.pt' % (args.dir_models, args.seed_a, args.epochs_model),\n '%scheckpoint_seed_%02d-%d.pt' % (args.dir_models, args.seed_b, args.epochs_model)]\nstate_0 = torch.load(model_paths[0], map_location=device)\nstate_1 = torch.load(model_paths[1], map_location=device)\n\narchitecture = getattr(models, args.model)\nmodel_0 = architecture.base(num_classes=num_classes, device=device, **architecture.kwargs)\nmodel_0.load_state_dict(state_0['model_state'])\nmodel_1 = architecture.base(num_classes=num_classes, device=device, **architecture.kwargs)\nmodel_1.load_state_dict(state_1['model_state'])\n\nif args.alignment is not None and args.alignment != '' and args.alignment != 'pam':\n matching = np.load('%smatch_%s_seeds_%02d_%02d.npy' %\n (args.dir_alignment, args.alignment, args.seed_a, args.seed_b), allow_pickle=True)\n if args.model == 'ResNet32':\n model_1, _ = alignment.align_models_resnet(model_1, matching)\n elif args.model == 'GoogLeNet':\n model_1.align_inception(matching)\n else:\n model_1 = alignment.align_models(model_1, matching)\n model_1.to(device)\nelse:\n matching = None\n\nif args.model == 'GoogLeNet':\n matching_ref = np.load('%smatch_%s_seeds_%02d_%02d.npy' %\n (args.dir_alignment, 'corr', args.seed_a, args.seed_b), allow_pickle=True)\nelse:\n matching_ref = None\ncurve = getattr(curves_pam, args.curve)\nmodel = curves_pam.CurveNet(\n num_classes,\n device,\n curve,\n architecture.curve,\n args.num_bends,\n args.fix_start,\n args.fix_end,\n architecture_kwargs=architecture.kwargs,\n act_ref=matching_ref\n)\n\nperm_params = nn.ParameterList()\nfor param in model.permutations.parameters():\n if param.requires_grad:\n perm_params.append(param)\n\noptimizer_perm = optim.SGD(\n perm_params,\n lr=(args.lr * 5E-1))\n\noptimizer_phi = optim.SGD(\n filter(lambda param: param.requires_grad, model.curve_learnable_params),\n lr=args.lr,\n momentum=0.9,\n weight_decay=args.wd if args.curve is None else 0.0,\n nesterov=True)\n\nlambda_perm = lambda epoch: 0.5 ** (epoch // 20) * args.lr_decay ** epoch\nlambda_phi = lambda epoch: 0.5 ** (epoch // args.lr_drop) * args.lr_decay ** epoch\n\nscheduler_perm = optim.lr_scheduler.LambdaLR(optimizer_perm, lr_lambda=lambda_perm)\nscheduler_phi = optim.lr_scheduler.LambdaLR(optimizer_phi, lr_lambda=lambda_phi)\n\nif args.resume is None:\n model.import_base_parameters(model_0, 0)\n model.import_base_parameters(model_1, 2)\n if args.init_linear:\n print('Linear initialization.')\n model.init_zero()\n start_epoch = 1\nmodel.to(device)\n\nmodel_turningpt = architecture.base(num_classes=num_classes, device=device)\nmodel.export_base_parameters(model_turningpt, 1)\n\nif args.model == 'GoogLeNet':\n criterion = utils.googlenet_criterion\nelse:\n criterion = nn.CrossEntropyLoss()\nregularizer = None if args.curve is None else curves_pam.l2_regularizer(args.wd)\n\nif args.val_freq is None:\n args.val_freq = np.nan\n\ntotal_iters = args.outer_iters * (args.inner_iters_perm + args.inner_iters_phi)\nacc_train = np.ones(total_iters + 1) * np.nan\nacc_test = np.ones(total_iters + 1) * np.nan\nloss_train = np.ones(total_iters + 1) * np.nan\nloss_test = np.ones(total_iters + 1) * np.nan\nhas_bn = utils.check_bn(model)\nlr = args.lr\n\nchange_P = np.ones(total_iters + 1) * np.nan\n\nnumber_batches = len(loaders['test'])\nloss_time = np.ones([total_iters+1, number_batches]) * np.nan\nacc_time = np.ones([total_iters+1, number_batches]) * np.nan\n\nif args.val_freq[0] is None:\n args.val_freq[0] = np.nan\nif args.val_freq[1] is None:\n args.val_freq[1] = np.nan\n\nprint('Beginning training')\nfor iter in range(start_epoch, args.outer_iters + 1):\n\n params_before = [None] * len(optimizer_perm.param_groups[0]['params'])\n for idx, param in enumerate(optimizer_perm.param_groups[0]['params']):\n params_before[idx] = param.clone().detach()\n\n for epoch in range(1, args.inner_iters_perm + 1):\n for param in optimizer_perm.param_groups[0]['params']:\n param.requires_grad = True\n for param in optimizer_phi.param_groups[0]['params']:\n param.requires_grad = False\n\n test_res = {'loss': np.nan, 'accuracy': np.nan, 'nll': np.nan, 'loss_time': np.nan, 'acc_time': np.nan}\n time_ep = time.time()\n\n if args.curve is None or not has_bn or epoch % args.val_freq[0] == 1 or args.val_freq[0] == 1:\n test_res = utils.test_perm(loaders['test'], model, criterion, regularizer=regularizer,\n train_loader=loaders['train'], bn_eval=False, samp_t=True)\n idx = scheduler_perm.last_epoch + scheduler_phi.last_epoch\n loss_test[idx] = test_res['loss']\n acc_test[idx] = test_res['accuracy']\n loss_time[idx, :] = test_res['loss_time']\n acc_time[idx, :] = test_res['acc_time']\n\n np.set_printoptions(precision=2)\n np.set_printoptions(suppress=True)\n train_res = utils.train_perm(loaders['train'], model, optimizer_perm, scheduler_perm, criterion,\n params_old=params_before, regularizer=None, nu=1E3, proj_flag=False,\n pen_flag=True, lp_pen=None, tqdm_summary=False)\n scheduler_perm.step()\n\n time_ep = time.time() - time_ep\n print('Outer Iteration %2d, Permutation Iteration %2d, Training Loss: %.3E, Training Accuracy: %.2f, '\n 'Validation Loss: %.3E, Validation Accuracy: %.2f, Time Elapsed: %.2fs' %\n (iter, scheduler_perm.last_epoch, train_res['loss'], train_res['accuracy'], test_res['nll'],\n test_res['accuracy'], time_ep))\n idx = scheduler_perm.last_epoch + scheduler_phi.last_epoch\n loss_train[idx] = train_res['loss']\n acc_train[idx] = train_res['accuracy']\n print('Doubly Stochastic Matrix', optimizer_perm.param_groups[0]['params'][0].data.cpu().detach().numpy())\n utils.sample_permutation(model, optimizer_perm, loaders['train'], loaders['train'], criterion, params_before, k=32)\n print('Permutation Sampled', optimizer_perm.param_groups[0]['params'][0].data.cpu().detach().numpy())\n with torch.no_grad():\n bb = []\n for param, param_o in zip(optimizer_perm.param_groups[0]['params'], params_before):\n bb.append(torch.sum(param * param_o).item())\n print(bb)\n\n params_before = [None] * len(optimizer_phi.param_groups[0]['params'])\n for idx, param in enumerate(optimizer_phi.param_groups[0]['params']):\n params_before[idx] = param.detach().clone()\n\n for epoch in range(1, args.inner_iters_phi + 1):\n for param in optimizer_perm.param_groups[0]['params']:\n param.requires_grad = False\n for param in optimizer_phi.param_groups[0]['params']:\n param.requires_grad = True\n\n test_res = {'loss': np.nan, 'accuracy': np.nan, 'nll': np.nan}\n time_ep = time.time()\n\n if args.curve is None or not has_bn or epoch % args.val_freq[1] == 1 or args.val_freq[1] == 1:\n test_res = utils.test_perm(loaders['test'], model, criterion, regularizer=regularizer,\n train_loader=loaders['train'], bn_eval=False, samp_t=True)\n idx = scheduler_perm.last_epoch + scheduler_phi.last_epoch\n loss_test[idx] = test_res['loss']\n acc_test[idx] = test_res['accuracy']\n loss_time[idx, :] = test_res['loss_time']\n acc_time[idx, :] = test_res['acc_time']\n\n train_res = utils.train_perm(loaders['train'], model, optimizer_phi, scheduler_phi, criterion,\n params_old=params_before, regularizer=regularizer, nu=1E3)\n scheduler_phi.step()\n\n time_ep = time.time() - time_ep\n\n print('Outer Iteration %2d, Curve Iteration %2d, Training Loss: %.3E, Training Accuracy: %.2f, '\n 'Validation Loss: %.3E, Validation Accuracy: %.2f, Time Elapsed: %.2fs' %\n (iter, scheduler_phi.last_epoch, train_res['loss'], train_res['accuracy'], test_res['nll'],\n test_res['accuracy'], time_ep))\n idx = scheduler_perm.last_epoch + scheduler_phi.last_epoch\n loss_train[idx] = train_res['loss']\n acc_train[idx] = train_res['accuracy']\n\ntest_res = utils.test_perm(loaders['test'], model, criterion, regularizer, train_loader=loaders['train'], bn_eval=False,\n samp_t=True)\nloss_test[idx] = test_res['loss']\nacc_test[idx] = test_res['accuracy']\nloss_time[idx, :] = test_res['loss_time']\nacc_time[idx, :] = test_res['acc_time']\n\nif args.model == 'GoogLeNet':\n pam_perm = []\n for perm in model.permutations[1:-1]:\n if len(perm) == 1:\n pam_perm.append(perm[0].cpu().numpy()) \n else: \n sub_list = [] \n for sub_perm in perm:\n sub_list.append(sub_perm.cpu().numpy())\n pam_perm.append(sub_list)\nelse:\n pam_perm = [torch.nonzero(i)[:, 1].cpu().numpy() for i in model.permutations]\n pam_perm = pam_perm[1:-1]\n if matching is not None:\n pam_perm = [match_og[match_perm] for (match_og, match_perm) in zip(matching, pam_perm)]\n\nmodel.export_base_parameters(model_turningpt, 1)\nmodel_turningpt_fin = copy.deepcopy(model_turningpt)\nif args.model == 'GoogLeNet':\n model.weight_permutation(model_1)\nelse:\n model.weight_permutation()\nmodel.init_linear()\nmodel.export_base_parameters(model_turningpt, 1)\nfor param_0, param_1 in zip(model_turningpt_fin.parameters(), model_turningpt.parameters()):\n param_0.data += param_1.data\n\nutils.save_checkpoint(\n args.dir,\n total_iters,\n name='checkpoint_align_pam_%s_seeds_%02d_%02d' % (args.alignment, args.seed_a, args.seed_b),\n model_state=model_turningpt_fin.state_dict(),\n optimizer_state_perm=optimizer_perm.state_dict(),\n optimizer_state_phi=optimizer_phi.state_dict()\n)\n\nnp.save('%smatch_pam_%s_seeds_%02d_%02d.npy' % (args.dir_alignment, args.alignment, args.seed_a, args.seed_b), pam_perm)\n\ncurve_data = {'acc_train': acc_train, 'loss_train': loss_train, 'acc_test': acc_test, 'loss_test': loss_test,\n 'iters_perm': args.inner_iters_perm, 'iters_phi': args.inner_iters_phi,\n 'loss_time': loss_time, 'acc_time': acc_time, 'change_perm': change_P}\nnp.save('%scurve_align_pam_%s_seeds_%02d_%02d.npy' % (args.dir2, args.alignment, args.seed_a, args.seed_b), curve_data)\n"
] |
[
[
"numpy.hstack",
"numpy.abs",
"numpy.finfo",
"numpy.all",
"numpy.zeros_like",
"scipy.optimize.linear_sum_assignment",
"numpy.zeros",
"numpy.where",
"numpy.vstack"
],
[
"torch.nn.CrossEntropyLoss",
"torch.optim.lr_scheduler.LambdaLR",
"numpy.random.seed",
"torch.load",
"torch.cuda.manual_seed",
"torch.manual_seed",
"numpy.set_printoptions",
"torch.sum",
"numpy.save",
"numpy.ones",
"torch.no_grad",
"torch.nn.ParameterList",
"torch.cuda.is_available",
"torch.optim.SGD",
"torch.device",
"numpy.load",
"torch.nonzero"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.6",
"1.4",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"1.0",
"0.17",
"1.3",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
qai-research/Efficient_Text_Detection
|
[
"e5cfe51148cc4fbf4c4f3afede040e4ebd624e8b",
"e5cfe51148cc4fbf4c4f3afede040e4ebd624e8b",
"e5cfe51148cc4fbf4c4f3afede040e4ebd624e8b"
] |
[
"akaocr/pipeline/pipeline/util.py",
"akaocr/tools/train_recog.py",
"akaocr/models/detec/efficient_heatmap.py"
] |
[
"import math\r\nfrom utils.data.collates import NormalizePAD, ResizeNormalize\r\nfrom PIL import ImageFont, ImageDraw, Image\r\nfrom pathlib import Path\r\nfrom utils.utility import initial_logger\r\nlogger = initial_logger()\r\nimport numpy as np\r\nimport cv2\r\n\r\nimport uuid\r\n\r\nclass AlignCollate(object):\r\n def __init__(self, img_h=32, img_w=128, keep_ratio_with_pad=False):\r\n self.img_h = img_h\r\n self.img_w = img_w\r\n self.keep_ratio_with_pad = keep_ratio_with_pad\r\n\r\n def __call__(self, image):\r\n if self.keep_ratio_with_pad: # same concept with 'Rosetta' paper\r\n resized_max_w = self.img_w\r\n input_channel = 3 if image.mode == 'RGB' else 1\r\n transform = NormalizePAD((input_channel, self.img_h, resized_max_w))\r\n\r\n w, h = image.size\r\n ratio = w / float(h)\r\n if math.ceil(self.img_h * ratio) > self.img_w:\r\n resized_w = self.img_w\r\n else:\r\n resized_w = math.ceil(self.img_h * ratio)\r\n\r\n resized_image = transform(image.resize((resized_w, self.img_h), Image.BICUBIC))\r\n image_tensors = resized_image.unsqueeze(0)\r\n\r\n else:\r\n transform = ResizeNormalize((self.img_w, self.img_h))\r\n image_tensor = transform(image)\r\n image_tensors = image_tensor.unsqueeze(0)\r\n\r\n return image_tensors # (1, c, h, w) \r\n\r\nclass Visualizer:\r\n \"\"\"\r\n Utility class for visualizing image.\r\n\r\n Attributes\r\n ----------\r\n output_folder : str\r\n the path to output folder\r\n pre : str\r\n the prefix to append to out image's name\r\n suf : str\r\n the suffix to append to out image's name\r\n\r\n Methods\r\n -------\r\n imwrite(image, file_name)\r\n write the image to output_folder\r\n \"\"\"\r\n\r\n def __init__(self, output_folder='./', pre='', suf='random'):\r\n \"\"\"\r\n Parameters\r\n ----------\r\n output_folder : str\r\n the path to output folder\r\n pre : str, optional, default: \"\"\r\n the prefix to be appended before the given image's name\r\n suf : str or \"random\", optional, default: \"random\"\r\n the suffix to be appended after the given image's name\r\n \"\"\"\r\n print(2,output_folder)\r\n self.output_folder = Path(output_folder)\r\n self.pre = pre\r\n self.suf = suf\r\n\r\n def imwrite(self, image, file_name):\r\n \"\"\"\r\n Parameters\r\n ----------\r\n image : numpy array\r\n the image to be written to file\r\n file_name : str\r\n the image's file name\r\n \"\"\"\r\n file_path = Path(file_name)\r\n name_base = file_path.stem\r\n\r\n if self.suf == 'random':\r\n post = str(uuid.uuid4())[:8]\r\n else:\r\n post = self.suf\r\n\r\n new_name_base = self.pre + name_base + post\r\n file_type = file_path.suffix\r\n write_file_path = self.output_folder.joinpath(new_name_base + file_type)\r\n\r\n if not self.output_folder.exists():\r\n self.output_folder.mkdir()\r\n\r\n is_success, im_buf_arr = cv2.imencode(file_type, image)\r\n im_buf_arr.tofile(str(write_file_path))\r\n\r\n @staticmethod\r\n def draw_zone(image, zone, color, thickness):\r\n points = np.array([zone.points[0].to_array(),\r\n zone.points[1].to_array(),\r\n zone.points[2].to_array(),\r\n zone.points[3].to_array()])\r\n points = points.reshape((-1, 1, 2))\r\n\r\n image = cv2.polylines(image, [points], isClosed=True, color=color, thickness=thickness)\r\n return image\r\n\r\n @staticmethod\r\n def draw_text(image, text, x, y, font_size=18, color=(0, 0, 0), font=None, thickness=3):\r\n img = np.copy(image)\r\n\r\n if font is None:\r\n (text_width, text_height) = cv2.getTextSize(text, cv2.FONT_HERSHEY_SIMPLEX, fontScale=font_size,\r\n thickness=thickness)[0]\r\n text_offset_x = x\r\n text_offset_y = y\r\n\r\n # make the coords of the box with a small padding of two pixels\r\n box_coords = ((text_offset_x, text_offset_y),\r\n (text_offset_x + text_width + 2, text_offset_y - text_height - 2))\r\n\r\n cv2.rectangle(img, box_coords[0], box_coords[1], (255, 255, 255), cv2.FILLED)\r\n cv2.putText(img, text, (text_offset_x, text_offset_y), cv2.FONT_HERSHEY_SIMPLEX,\r\n font_size, color, thickness)\r\n else:\r\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\r\n im_pil = Image.fromarray(img)\r\n draw = ImageDraw.Draw(im_pil)\r\n u_font = ImageFont.truetype(font, font_size)\r\n draw.text((x, y), text, font=u_font, fill=color)\r\n\r\n img = cv2.cvtColor(np.array(im_pil), cv2.COLOR_RGB2BGR)\r\n return img\r\n\r\n def visualizer(self, image_ori, contours=None, boxes=None, lines=None, bcolor=(0, 255, 0), texts=None,\r\n font=None, font_size=30, thick=2, windows=None, show=False, name='demo', tcolor=(255, 0, 0), \r\n gt_text=None, gt_color=(0, 0, 255)):\r\n image = image_ori.copy()\r\n imshape = image.shape\r\n if len(imshape) == 2:\r\n image = cv2.cvtColor(image, cv2.COLOR_GRAY2RGB)\r\n if windows is None:\r\n windows = imshape[:2]\r\n\r\n if contours is not None:\r\n image = cv2.drawContours(image, contours.astype(int), -1, bcolor, thick)\r\n if texts is not None:\r\n if gt_text is None:\r\n for con, tex in zip(contours, texts):\r\n image = self.draw_text(image, tex, con[3][0], con[3][1], font=font, font_size=font_size,\r\n color=tcolor)\r\n else:\r\n for con, tex, gt in zip(contours, texts, gt_text):\r\n if tex == gt:\r\n image = self.draw_text(image, tex, con[3][0], con[3][1], font=font, font_size=font_size,\r\n color=gt_color)\r\n else:\r\n image = self.draw_text(image, tex, con[3][0], con[3][1], font=font, font_size=font_size,\r\n color=tcolor)\r\n\r\n elif boxes is not None:\r\n for b in boxes:\r\n image = cv2.rectangle(image, (b[0], b[1]), (b[2], b[3]), bcolor, thick)\r\n if texts is not None:\r\n if gt_text is None:\r\n for box, tex in zip(boxes, texts):\r\n image = self.draw_text(image, tex, box[0], box[3], font=font, font_size=font_size, color=tcolor)\r\n else:\r\n for box, tex, gt in zip(boxes, texts, gt_text):\r\n if tex == gt:\r\n image = self.draw_text(image, tex, box[0], box[3], font=font, font_size=font_size,\r\n color=gt_color)\r\n else:\r\n image = self.draw_text(image, tex, box[0], box[3], font=font, font_size=font_size,\r\n color=tcolor)\r\n\r\n if lines is not None:\r\n for li in lines:\r\n li = li[0]\r\n image = cv2.line(image, (li[0], li[1]), (li[2], li[3]), (255, 0, 0), thick, cv2.LINE_AA)\r\n\r\n if show:\r\n cv2.namedWindow(name, cv2.WINDOW_NORMAL)\r\n cv2.resizeWindow(name, windows[0], windows[1])\r\n cv2.imshow(name, image)\r\n cv2.waitKey(0)\r\n cv2.destroyAllWindows()\r\n return image\r\n\r\ndef experiment_loader(name='best_accuracy.pth', type='detec', data_path=\"../\"):\r\n data_path = Path(data_path)\r\n if type == 'detec':\r\n saved_model_path = 'data/exp_detec/test'\r\n elif type == 'recog':\r\n saved_model_path = 'data/exp_recog/test'\r\n saved_model = data_path.joinpath(saved_model_path, name)\r\n if not saved_model.exists():\r\n logger.warning(f\"No saved model name {name} in {saved_model_path}\")\r\n logger.warning(f\"Load latest saved model\")\r\n saved_model_list = sorted(data_path.joinpath(saved_model_path).glob('*.pth'))\r\n if len(saved_model_list)<1:\r\n raise Exception(\"No model for experiment \", name, \" in \", data_path.joinpath(saved_model_path))\r\n saved_model = str(saved_model_list[-1])\r\n return saved_model\r\n",
"# -*- coding: utf-8 -*-\n\"\"\"\n_____________________________________________________________________________\nCreated By : Nguyen Huu Kim - Kimnh3\nCreated Date: Mar 31, 2021 6:31pm GMT+0700\nProject : AkaOCR core\n_____________________________________________________________________________\n\nThis file contain code for train recog\n_____________________________________________________________________________\n\"\"\"\nimport sys\nimport torch\n\nsys.path.append(\"../\")\nfrom models.recog.atten import Atten\nfrom engine import Trainer\nfrom engine.config import setup, parse_base\nfrom engine.trainer.loop import CustomLoopHeat, CustomLoopAtten\nfrom engine.build import build_dataloader\nfrom engine.metric.accuracy import RecogAccuracy, DetecAccuracy\nfrom engine.metric.evaluation import DetecEvaluation, RecogEvaluation\n\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\ndef test_recog(args):\n cfg = setup(\"recog\", args)\n cfg.SOLVER.DATA_SOURCE = args.data_recog\n model = Atten(cfg)\n model.to(device=cfg.SOLVER.DEVICE)\n\n evaluate = RecogEvaluation(cfg)\n acc = RecogAccuracy(cfg)\n lossc = CustomLoopAtten(cfg)\n train_loader = build_dataloader(cfg, args.data_recog)\n test_loader = build_dataloader(cfg, args.data_test_recog)\n trainer = Trainer(cfg, model, train_loader=train_loader, test_loader=test_loader, custom_loop=lossc, accuracy=acc,\n evaluation=evaluate, resume=True)\n trainer.do_train()\n\ndef main():\n parser = parse_base()\n parser.add_argument('--data_recog', type=str, default=\"../data/data_recog/train\", help='path to recog data')\n parser.add_argument('--data_test_recog', type=str, default=\"../data/data_recog/val\", help='path to test recog data')\n args = parser.parse_args()\n test_recog(args)\n\nif __name__ == '__main__':\n main()",
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\n_____________________________________________________________________________\nCreated By : Nguyen Ngoc Nghia - Nghiann3\nCreated Date: Fri March 12 13:00:00 VNT 2021\nProject : AkaOCR core\n_____________________________________________________________________________\n\nThis file contains heatmap model with efficientnet backbone for text detection\n_____________________________________________________________________________\n\"\"\"\nimport torch\nfrom torch import nn\nfrom models.modules.biFPN import BiFPN, SeparableConvBlock\nfrom models.modules.backbones.EfficientNet import EfficientNet\nfrom models.modules.utils import MemoryEfficientSwish\n\nclass HEAT_EFFICIENT(nn.Module):\n def __init__(self, num_classes=2, compound_coef=0, **kwargs):\n super(HEAT_EFFICIENT, self).__init__()\n self.compound_coef = compound_coef\n self.backbone_compound_coef = [0, 1, 2, 3, 4, 5, 6, 6, 7]\n self.fpn_num_filters = [64, 88, 112, 160, 224, 288, 384, 384, 384]\n self.fpn_cell_repeats = [3, 4, 5, 6, 7, 7, 8, 8, 8]\n # self.input_sizes = [512, 640, 768, 896, 1024, 1280, 1280, 1536, 1536]\n self.box_class_repeats = [3, 3, 3, 4, 4, 4, 5, 5, 5]\n self.pyramid_levels = [7, 7, 7, 7, 7, 7, 7, 7, 8]\n self.num_classes = num_classes\n \n conv_channel_coef = {\n # the channels of P3/P4/P5.\n 0: [16, 24, 40, 112, 320],\n 1: [16, 24, 40, 112, 320],\n 2: [16, 24, 48, 120, 352],\n 3: [24, 32, 48, 136, 384],\n 4: [24, 32, 56, 160, 448],\n 5: [24, 40, 64, 176, 512],\n 6: [32, 40, 72, 200, 576],\n 7: [32, 40, 72, 200, 576],\n 8: [32, 48, 80, 224, 640],\n }\n\n self.bifpn = nn.Sequential(\n *[BiFPN(self.fpn_num_filters[self.compound_coef],\n conv_channel_coef[compound_coef],\n True if _ == 0 else False,\n attention=True if compound_coef < 6 else False,\n use_p8=compound_coef > 7)\n for _ in range(self.fpn_cell_repeats[compound_coef])])\n \n self.header = Header(in_channels=self.fpn_num_filters[self.compound_coef],\n num_classes=self.num_classes,\n num_layers=self.box_class_repeats[self.compound_coef],\n )\n\n self.backbone_net = EfficientNet(self.backbone_compound_coef[compound_coef])\n\n def freeze_bn(self):\n for m in self.modules():\n if isinstance(m, nn.BatchNorm2d):\n m.eval()\n\n def forward(self, inputs):\n p1, p2, p3, p4, p5 = self.backbone_net(inputs)\n features = (p1, p2, p3, p4, p5)\n\n features = self.bifpn(features)[0]\n feat = self.header(features)\n return feat.permute(0, 2, 3, 1), features\n\nclass Header(nn.Module):\n def __init__(self, in_channels, num_classes, num_layers, onnx_export=False):\n super(Header, self).__init__()\n self.num_classes = num_classes\n self.num_layers = num_layers\n self.conv_list = nn.ModuleList(\n [SeparableConvBlock(in_channels, in_channels, norm=False, activation=False) for i in range(num_layers)])\n self.bn_list = nn.ModuleList([nn.BatchNorm2d(in_channels, momentum=0.01, eps=1e-3) for i in range(num_layers)])\n self.header = SeparableConvBlock(in_channels, num_classes, norm=False, activation=False)\n self.swish = MemoryEfficientSwish() if not onnx_export else Swish()\n\n def forward(self, inputs):\n feat = inputs\n for i, bn, conv in zip(range(self.num_layers), self.bn_list, self.conv_list):\n feat = conv(feat) #conv with the same size\n feat = bn(feat) #apply batch normalization\n feat = self.swish(feat) #appply sigmoid()\n feat = self.header(feat) \n return feat"
] |
[
[
"numpy.copy",
"numpy.array"
],
[
"torch.cuda.is_available"
],
[
"torch.nn.BatchNorm2d"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
gangiman/pytorch-lightning
|
[
"9b31272cf0f3079a244944096b4a81eec20fe555",
"9b31272cf0f3079a244944096b4a81eec20fe555"
] |
[
"tests/base/utils.py",
"tests/base/debug.py"
] |
[
"import os\nfrom argparse import Namespace\n\nimport numpy as np\nimport torch\n\n# from pl_examples import LightningTemplateModel\nfrom pytorch_lightning import Trainer\nfrom pytorch_lightning.callbacks import ModelCheckpoint\nfrom pytorch_lightning.loggers import TestTubeLogger, TensorBoardLogger\nfrom tests.base import LightningTestModel, EvalModelTemplate\nfrom tests.base.datasets import PATH_DATASETS\n\n# generate a list of random seeds for each test\nRANDOM_PORTS = list(np.random.randint(12000, 19000, 1000))\nROOT_SEED = 1234\ntorch.manual_seed(ROOT_SEED)\nnp.random.seed(ROOT_SEED)\nRANDOM_SEEDS = list(np.random.randint(0, 10000, 1000))\nROOT_PATH = os.path.abspath(os.path.dirname(__file__))\n\n\ndef assert_speed_parity(pl_times, pt_times, num_epochs):\n\n # assert speeds\n max_diff_per_epoch = 0.9\n pl_times = np.asarray(pl_times)\n pt_times = np.asarray(pt_times)\n diffs = pl_times - pt_times\n diffs = diffs / num_epochs\n\n assert np.alltrue(diffs < max_diff_per_epoch), \\\n f\"lightning was slower than PT (threshold {max_diff_per_epoch})\"\n\n\ndef run_model_test_no_loggers(trainer_options, model, min_acc=0.50):\n # save_dir = trainer_options['default_root_dir']\n\n # fit model\n trainer = Trainer(**trainer_options)\n result = trainer.fit(model)\n\n # correct result and ok accuracy\n assert result == 1, 'amp + ddp model failed to complete'\n\n # test model loading\n pretrained_model = load_model(trainer.logger,\n trainer.checkpoint_callback.dirpath,\n path_expt=trainer_options.get('default_root_dir'))\n\n # test new model accuracy\n test_loaders = model.test_dataloader()\n if not isinstance(test_loaders, list):\n test_loaders = [test_loaders]\n\n for dataloader in test_loaders:\n run_prediction(dataloader, pretrained_model, min_acc=min_acc)\n\n if trainer.use_ddp:\n # on hpc this would work fine... but need to hack it for the purpose of the test\n trainer.model = pretrained_model\n trainer.optimizers, trainer.lr_schedulers = pretrained_model.configure_optimizers()\n\n\ndef run_model_test(trainer_options, model, on_gpu=True):\n save_dir = trainer_options['default_root_dir']\n\n # logger file to get meta\n logger = get_default_testtube_logger(save_dir, False)\n\n # logger file to get weights\n checkpoint = init_checkpoint_callback(logger)\n\n # add these to the trainer options\n trainer_options['checkpoint_callback'] = checkpoint\n trainer_options['logger'] = logger\n\n # fit model\n trainer = Trainer(**trainer_options)\n result = trainer.fit(model)\n\n # correct result and ok accuracy\n assert result == 1, 'amp + ddp model failed to complete'\n\n # test model loading\n pretrained_model = load_model(logger, trainer.checkpoint_callback.dirpath)\n\n # test new model accuracy\n test_loaders = model.test_dataloader()\n if not isinstance(test_loaders, list):\n test_loaders = [test_loaders]\n\n [run_prediction(dataloader, pretrained_model) for dataloader in test_loaders]\n\n if trainer.use_ddp or trainer.use_ddp2:\n # on hpc this would work fine... but need to hack it for the purpose of the test\n trainer.model = pretrained_model\n trainer.optimizers, trainer.lr_schedulers, trainer.optimizer_frequencies = \\\n trainer.init_optimizers(pretrained_model)\n\n # test HPC loading / saving\n trainer.hpc_save(save_dir, logger)\n trainer.hpc_load(save_dir, on_gpu=on_gpu)\n\n\ndef get_default_hparams(continue_training=False, hpc_exp_number=0):\n _ = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))\n\n args = {\n 'drop_prob': 0.2,\n 'batch_size': 32,\n 'in_features': 28 * 28,\n 'learning_rate': 0.001 * 8,\n 'optimizer_name': 'adam',\n 'data_root': PATH_DATASETS,\n 'out_features': 10,\n 'hidden_dim': 1000,\n }\n\n if continue_training:\n args['test_tube_do_checkpoint_load'] = True\n args['hpc_exp_number'] = hpc_exp_number\n\n hparams = Namespace(**args)\n return hparams\n\n\ndef get_default_model(lbfgs=False):\n # set up model with these hyperparams\n hparams = get_default_hparams()\n if lbfgs:\n setattr(hparams, 'optimizer_name', 'lbfgs')\n setattr(hparams, 'learning_rate', 0.002)\n\n model = LightningTestModel(hparams)\n\n return model, hparams\n\n\ndef get_default_testtube_logger(save_dir, debug=True, version=None):\n # set up logger object without actually saving logs\n logger = TestTubeLogger(save_dir, name='lightning_logs', debug=debug, version=version)\n return logger\n\n\ndef get_data_path(expt_logger, path_dir=None):\n # some calls contain only experiment not complete logger\n expt = expt_logger.experiment if hasattr(expt_logger, 'experiment') else expt_logger\n # each logger has to have these attributes\n name, version = expt_logger.name, expt_logger.version\n # only the test-tube experiment has such attribute\n if hasattr(expt, 'get_data_path'):\n return expt.get_data_path(name, version)\n # the other experiments...\n if not path_dir:\n path_dir = ROOT_PATH\n path_expt = os.path.join(path_dir, name, 'version_%s' % version)\n # try if the new sub-folder exists, typical case for test-tube\n if not os.path.isdir(path_expt):\n path_expt = path_dir\n return path_expt\n\n\ndef load_model(exp, root_weights_dir, module_class=LightningTestModel, path_expt=None):\n # load trained model\n path_expt_dir = get_data_path(exp, path_dir=path_expt)\n tags_path = os.path.join(path_expt_dir, TensorBoardLogger.NAME_CSV_TAGS)\n\n checkpoints = [x for x in os.listdir(root_weights_dir) if '.ckpt' in x]\n weights_dir = os.path.join(root_weights_dir, checkpoints[0])\n\n trained_model = module_class.load_from_checkpoint(\n checkpoint_path=weights_dir,\n tags_csv=tags_path\n )\n\n assert trained_model is not None, 'loading model failed'\n\n return trained_model\n\n\ndef load_model_from_checkpoint(root_weights_dir, module_class=LightningTestModel):\n # load trained model\n checkpoints = [x for x in os.listdir(root_weights_dir) if '.ckpt' in x]\n weights_dir = os.path.join(root_weights_dir, checkpoints[0])\n\n trained_model = module_class.load_from_checkpoint(\n checkpoint_path=weights_dir,\n )\n\n assert trained_model is not None, 'loading model failed'\n\n return trained_model\n\n\ndef run_prediction(dataloader, trained_model, dp=False, min_acc=0.5):\n # run prediction on 1 batch\n for batch in dataloader:\n break\n\n x, y = batch\n x = x.view(x.size(0), -1)\n\n if dp:\n output = trained_model(batch, 0)\n acc = output['val_acc']\n acc = torch.mean(acc).item()\n\n else:\n y_hat = trained_model(x)\n\n # acc\n labels_hat = torch.argmax(y_hat, dim=1)\n acc = torch.sum(y == labels_hat).item() / (len(y) * 1.0)\n acc = torch.tensor(acc)\n acc = acc.item()\n\n assert acc >= min_acc, f\"This model is expected to get > {min_acc} in test set (it got {acc})\"\n\n\ndef assert_ok_model_acc(trainer, key='test_acc', thr=0.5):\n # this model should get 0.80+ acc\n acc = trainer.training_tqdm_dict[key]\n assert acc > thr, f\"Model failed to get expected {thr} accuracy. {key} = {acc}\"\n\n\ndef reset_seed():\n seed = RANDOM_SEEDS.pop()\n torch.manual_seed(seed)\n np.random.seed(seed)\n\n\ndef set_random_master_port():\n port = RANDOM_PORTS.pop()\n os.environ['MASTER_PORT'] = str(port)\n\n\ndef init_checkpoint_callback(logger, path_dir=None):\n exp_path = get_data_path(logger, path_dir=path_dir)\n ckpt_dir = os.path.join(exp_path, 'checkpoints')\n os.mkdir(ckpt_dir)\n checkpoint = ModelCheckpoint(ckpt_dir)\n return checkpoint\n",
"import torch\nfrom torch.nn import functional as F\nfrom torch.utils.data import DataLoader\n\nimport pytorch_lightning as pl\nfrom tests.base.datasets import TrialMNIST\n\n\n# from test_models import assert_ok_test_acc, load_model, \\\n# clear_save_dir, get_default_testtube_logger, get_default_hparams, init_save_dir, \\\n# init_checkpoint_callback, reset_seed, set_random_master_port\n\n\nclass CoolModel(pl.LightningModule):\n\n def __init(self):\n super().__init__()\n # not the best model...\n self.l1 = torch.nn.Linear(28 * 28, 10)\n\n def forward(self, x):\n return torch.relu(self.l1(x))\n\n def my_loss(self, y_hat, y):\n return F.cross_entropy(y_hat, y)\n\n def training_step(self, batch, batch_idx):\n x, y = batch\n y_hat = self(x)\n return {'training_loss': self.my_loss(y_hat, y)}\n\n def validation_step(self, batch, batch_idx):\n x, y = batch\n y_hat = self(x)\n return {'val_loss': self.my_loss(y_hat, y)}\n\n def validation_epoch_end(self, outputs):\n avg_loss = torch.stack([x for x in outputs['val_loss']]).mean()\n return avg_loss\n\n def configure_optimizers(self):\n return [torch.optim.Adam(self.parameters(), lr=0.02)]\n\n def train_dataloader(self):\n return DataLoader(TrialMNIST(train=True, num_samples=100), batch_size=16)\n\n def val_dataloader(self):\n return DataLoader(TrialMNIST(train=False, num_samples=50), batch_size=16)\n\n def test_dataloader(self):\n return DataLoader(TrialMNIST(train=False, num_samples=50), batch_size=16)\n"
] |
[
[
"torch.mean",
"numpy.random.seed",
"numpy.asarray",
"torch.manual_seed",
"torch.argmax",
"torch.sum",
"torch.tensor",
"numpy.alltrue",
"numpy.random.randint"
],
[
"torch.stack",
"torch.nn.Linear",
"torch.nn.functional.cross_entropy"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
shagunuppal/Riemannian_Geometry_of_Deep_Generative_Models
|
[
"98ebd17119d6065d4d89e93d6e0c11d82d49eb33",
"98ebd17119d6065d4d89e93d6e0c11d82d49eb33",
"98ebd17119d6065d4d89e93d6e0c11d82d49eb33"
] |
[
"CelebA/algo2.py",
"CelebA/preprocess.py",
"CelebA/algo1_without_cuda.py"
] |
[
"# Algorithm 2 : Parallel Translation\r\n\r\nimport torch\r\nimport torchvision\r\nfrom torch import nn\r\nfrom torch import optim\r\nimport torch.nn.functional as F\r\nfrom torch.autograd import Variable\r\nfrom torch.utils.data import DataLoader\r\nfrom torchvision import transforms\r\nfrom torchvision.utils import save_image\r\n#from torchvision import datasets\r\n#from torchvision.datasets import MNIST\r\nfrom torch.autograd.gradcheck import zero_gradients\r\nimport random\r\nimport numpy as np\r\nimport matplotlib\r\nmatplotlib.use('Agg')\r\nimport matplotlib.pyplot as plt\r\nimport sys, os\r\nimport math\r\n\r\nfrom algo1 import *\r\n#from PCA import *\r\n\r\n#model = VAE(784,450,200,20)\r\n#load_model()\r\n\r\ndef find_v0(z):\r\n\tb = z[1]\r\n\ta = z[0]\r\n\tv0 = ((b - a)*1.0) / dt\r\n\treturn v0\r\n\r\ndef compute_SVD(matrix):\r\n\tu, sigma, vh = torch.svd(matrix, some=False)\r\n\treturn (u, sigma, vh)\r\n\r\ndef rad2deg(rad):\r\n\treturn (rad*180/math.pi)\r\n\r\ndef make_sigma(sig):\r\n\tsigma = torch.zeros(3*64*64,32)\r\n\tfor i in range(32):\r\n\t\tsigma[i][i] = sig[i]\r\n\treturn sigma\r\n\r\ndef mod(x):\r\n\t#x1 = x.numpy()\r\n\tx1 = x\r\n\tp = 0\r\n\tfor i in range(3*64*64):\r\n\t\tq = x1[i]\r\n\t\tp = p + q*q\r\n\tp = math.sqrt(p)\r\n\treturn p\r\n\r\n\r\ndef chhota_mod(x):\r\n\t#x1 = x.numpy()\r\n\tx1 = x\r\n\tp = 0\r\n\tfor i in range(32):\r\n\t\tq = x1[i]\r\n\t\tp = p + q*q\r\n\tp = math.sqrt(p)\r\n\treturn p\r\n\r\ndef find_angle(v1,v2):\r\n\tv1 = v1.view(32)\r\n\tv2 = v2.view(32)\r\n\tv = v1*v2\r\n\tv1_mod = chhota_mod(v1)\r\n\t#print (\"v1\",v1_mod)\r\n\tv2_mod = chhota_mod(v2)\r\n\t#print (\"v2\",v2_mod)\r\n\tnum = sum(v)\r\n\t#print (\"sum\",num)\r\n\treturn (num/(v1_mod*v2_mod)) \r\n\r\ndef main2(model,z_collection):\r\n\tu = []\r\n\tv = []\r\n\tv0 = (find_v0(z_collection).data.cuda()).view(32)\r\n\tprint(\"z0\",z_collection[0])\r\n\tprint(\"first_wala\",find_jacobian_1(model, Variable(z_collection[0].data.cuda(), requires_grad=True)))\r\n\tu0 = torch.matmul(find_jacobian_1(model, Variable(z_collection[0].data.cuda(), requires_grad=True)), v0)\r\n\tu.append(u0)\r\n\tprint(\"initial\",u0)\r\n\tv.append(v0)\r\n\tT = len(z_collection) - 1\r\n\t\r\n\tfor i in range (T):\r\n\t\tprint(z_collection[i].size())\r\n\t\txi = model.decode(Variable(z_collection[i].data.cuda().view(1,32),requires_grad=True))\r\n\t\tx1 = find_jacobian_1(model, Variable(z_collection[i+1].data.cuda().view(1,32), requires_grad=True))\r\n\t\tU, sigma, vh = compute_SVD(x1)\r\n\t\tU = torch.FloatTensor(U.cpu()).cuda()\r\n\t\thh = torch.matmul(U, U.t())\r\n\t\tui = torch.matmul(torch.matmul(U, U.t()),u[len(u) - 1].view(3*64*64,1))\r\n\t\tui = u[len(u) - 1]\r\n\t\t#print(\"tensor\",ui)\r\n\t\t#print(\"here\",mod(ui))\r\n\t\tui = (mod( u[len(u) - 1].view(3*64*64) ) / mod(ui)) * ui.view(3*64*64)\r\n\t\tvt_ = find_jacobian(model, Variable(z_collection[i].data.cuda().view(1,32),requires_grad=True))\r\n\t\tvt = torch.matmul(vt_, ui.view(3*64*64,1))\r\n\t\tv.append(vt)\r\n\t\tu.append(ui.view(3*64*64))\r\n\r\n\tut = u[len(u) - 1]\r\n\tvt_ = find_jacobian(model, Variable(z_collection[len(z_collection) - 1].data.cuda().view(1,32),requires_grad=True))\r\n\tvt = torch.mm(vt_, ut.view(3*64*64,1))\r\n\tfor i in range(len(z_collection)):\r\n\t\tmake_image(model,Variable(z_collection[i].data.cuda().view(1,32)), \"algo2_latent\"+(str)(i))\t\r\n\tfor i in range(len(v)):\r\n\t\tmake_image(model,Variable(v[i].view(1,32)),\"algo2_tangent\"+(str)(i))\r\n\t\t#if(i!=0):\r\n\t\t\t#angle = find_angle(v[i-1],v[i])\r\n\t\t\t#angle = angle.numpy()\r\n\t\t\t#print(angle)\r\n\treturn vt\r\n\r\nz0 = Variable(torch.FloatTensor(1,32).normal_().cuda(), requires_grad=True)\r\nz1 = Variable(torch.FloatTensor(1,32).normal_().cuda(), requires_grad=True)\r\n\r\nmodel = load_model()\r\nmodel.eval().cuda()\r\nz_ = main1(model,z0,z1)\r\nmain2(model,z_collection=z_)\r\n\r\n\r\n\r\n\r\n\r\n",
"import os\nimport matplotlib.pyplot as plt\nfrom scipy.misc import imresize\n\n# root path depends on your computer\nroot = 'data/CelebA_Images/'\nsave_root = 'data/resized_celebA/'\nresize_size = 64\n\nif not os.path.isdir(save_root):\n os.mkdir(save_root)\nif not os.path.isdir(save_root + 'celebA'):\n os.mkdir(save_root + 'celebA')\n\nimg_list = os.listdir(root)\nprint(img_list)\n# ten_percent = len(img_list) // 10\n\nfor i in range(len(img_list)):\n img = plt.imread(root + img_list[i])\n img = imresize(img, (resize_size, resize_size))\n plt.imsave(fname=save_root + 'celebA/' + img_list[i], arr=img)\n\n if (i % 1000) == 0:\n print('%d images complete' % i)",
"# Algorithm 1 : Geodesic Path\n\nimport torch\nimport torchvision\nfrom torch import nn\nfrom torch import optim\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\nfrom torch.utils.data import DataLoader\nfrom torchvision import transforms\nfrom torchvision.utils import save_image\nfrom torchvision import datasets\nfrom torchvision.datasets import MNIST\nfrom torch.autograd.gradcheck import zero_gradients\nimport random\nimport numpy as np\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nimport sys, os\nimport math\nfrom vae import *\n\nimport torch._utils\ntry:\n\ttorch._utils._rebuild_tensor_v2\nexcept AttributeError:\n\tdef _rebuild_tensor_v2(storage, storage_offset, size, stride, requires_grad, backward_hooks):\n\t\ttensor = torch._utils._rebuild_tensor(storage, storage_offset, size, stride)\n\t\ttensor.requires_grad = requires_grad\n\t\ttensor._backward_hooks = backward_hooks\n\t\treturn tensor\n\ttorch._utils._rebuild_tensor_v2 = _rebuild_tensor_v2\n\n\nmodel = load_model()\n\nT = 4\ndt = 1.0 / T\nepsilon = 500\nz_collection = []\ndelta_e = torch.FloatTensor(32,64*64*3).zero_()\n\ndef find_mod1(x):\n # x is float tensor\n p = 0\n x = x.view(32).data\n x1 = x.cpu().numpy()\n for i in range(32):\n q = x1[i]\n p += q*q\n return math.sqrt(p)\n\ndef linear_distance(z1,z2):\n x = z2 - z1\n return find_mod1(x)\n\ndef linear_interpolation(model,z0, zt):\n\tz_collection.append(z0)\n\tfor i in range(T-2):\n\t\tz0n = z_collection[len(z_collection)-1] + (zt-z0)*dt\n\t\tz_collection.append(z0n)\n\t\t#print(\"distance_\"+(str)(i+1),linear_distance(z_collection[len(z_collection)-2],z_collection[len(z_collection)-1])) \n\t\t#print(\"arclength_\"+(str)(i+1),arc_length(model, z_collection[len(z_collection)-2],z_collection[len(z_collection)-1])) \n\tz_collection.append(zt) \n\t#print(\"distance_\"+(str)(T-1),linear_distance(z_collection[len(z_collection)-2],z_collection[len(z_collection)-1])) \n\t#print(\"arc_length\"+(str)(T-1),arc_length(model, z_collection[len(z_collection)-2],z_collection[len(z_collection)-1])) \n\ndef find_jacobian(model, z1): #Jh\n\t#print(\"bvfkngklmhjml\",z1)\n\tz = Variable(z1.view(1,32).data, requires_grad=True)\n\tdec = Variable(model.decode(z).data, requires_grad=True)\n\tenc1, enc2 = model.encode(dec)\n\tenc1 = enc1.view(32)\n\t#print(\"enc1\",dec.size())\n\tjacobian = torch.FloatTensor(32,3*64*64).zero_()\n\tfor j in range(32):\n\t\tf = torch.FloatTensor(32).zero_()\n\t\tf[j] = 1\t\n\t\tenc1.backward(f, retain_graph=True)\n\t\tjacobian[j,:] = dec.grad.data\n\t\t#print(jacobian[j,:])\n\t\tdec.grad.data.zero_()\n\t#print(\"jaco\",jacobian)\n\treturn jacobian\n\ndef find_jacobian_1(model, z1): #Jg\n\tz = Variable(z1.view(1,32).data, requires_grad=True)\n\tdec = model.decode(z)\n\tdec =dec.view(64*64*3)\n\tjacobian = torch.FloatTensor(64*64*3,32).zero_()\n\tfor j in range(64*64*3):\n\t\tf = torch.FloatTensor(64*64*3).zero_()\t\n\t\tf[j] = 1\t\n\t\tdec.backward(f, retain_graph=True)\n\t\tjacobian[j,:] = z.grad.data\n\t\tz.grad.data.zero_()\n\tprint(\"jacobian\",jacobian)\n\treturn jacobian\n\n\ndef find_energy(model,z0, z1, z2):\n\t#find_jacobian(model, z1)\n\ta11 = find_jacobian_1(model, z1)\n\ta1 = torch.transpose(find_jacobian_1(model,Variable(z1.data.view(1,32), requires_grad=True)),0,1)\n\ta2 = ((model.decode(Variable(z2.data.view(1,32))) - 2*model.decode(Variable(z1.data.view(1,32)))+model.decode(Variable(z0.data.view(1,32)))).data).view(64*64*3,1)\n\te = -(1 / dt)*(torch.mm(a1,a2))\n\treturn e\n\ndef find_etta_i(model,z0,z1,z2):\n\tdt = 1/T\n\tz0 = z0.view(32)\n\tz1 = z1.view(32)\n\tz2 = z2.view(32)\n\ta1 = find_jacobian(model,Variable(z1))\n\tx1 = model.decode(Variable(z2))\n\tx2 = 2*model.decode(Variable(z1))\n\tx3 = model.decode(Variable(z0))\n\ta21 = (x1-x2+x3).data\n\ta2 = a21.view(3*64*64,1)\n\te = -(1 / dt)*torch.mm(a1,a2)\n\treturn e\n\ndef find_mod2(x):\n\t# x is float tensor\n\tp = 0\n\tx = x.view(3*64*64).data\n\tx1 = x.numpy()\n\tfor i in range(64*64*3):\n\t\tq = x1[i]\n\t\tp += q*q\n\treturn p\n\ndef find_mod(x):\n\t# x is float tensor\n\tp = 0\n\tx = x.data\n\tx1 = x.numpy()\n\tfor i in range(784):\n\t\tq = x1[i]\n\t\tp += q*q\n\treturn math.sqrt(p)\n\ndef sum_energy(model):\n\tdelta_e = torch.FloatTensor(20,784).zero_()\n\tfor i in range(1,T-2):\n\t\tdelta_e += find_etta_i(model,z_collection[i-1],z_collection[i],z_collection[i+1])\n\tmulti = (torch.mm((delta_e),torch.transpose(delta_e,0,1)))\n\treturn multi\n\ndef sum_energy_1(model):\n\tdelta_e = torch.FloatTensor(32,1).zero_()\n\tfor i in range(1,T-2):\n\t\tdelta_e += find_energy(model,z_collection[i-1].view(32),z_collection[i].view(32),z_collection[i+1].view(32))\n\treturn find_mod1(Variable(delta_e))\n\n'''def make_image(model,z,name):\n\tx = model.decode(Variable(z.data))\n\tprint(\"decoded\",x)\n\tx = x.view(3,64,64)\n\timg = x.data.cpu().numpy()\n\tplt.imshow(img, interpolation = 'nearest')\n\tplt.savefig('./' + name + '.jpg')'''\n\ndef arc_length(model, z1, z2):\n\txx = 0 \n\txx = model.decode(z2) - model.decode(z1)\n\txx1 = find_mod2(xx)\n\treturn xx1 * T\n\ndef geodesic_length(model, z_collection):\n\txx = 0\n\tfor i in range(1,T):\n\t\txx1 = model.decode(z_collection[i]) - model.decode(z_collection[i-1]) \n\t\txx += find_mod2(xx1)*T\n\treturn xx\n\ndef main1(model,z0,zt):\n\tstep_size = 0.1\n\ty = linear_distance(z0,zt)\n\t#print(\"distance_ends:\",y)\n\tlinear_interpolation(model,z0,zt)\n\t#print(\"geodesic_ends:\",geodesic_length(model, z_collection))\n\t#print(sum_energy_1(model))\n\twhile (sum_energy_1(model) > epsilon):\n\t \t#print(sum_energy_1(model))\n\t\tfor i in range(1,T-1):\n\t\t\tetta_i = find_etta_i(model, z_collection[i-1], z_collection[i], z_collection[i+1])\n\t\t\te1 = step_size*etta_i\n\t\t\tz_collection[i] = z_collection[i].view(32,1)\n\t\t\tz_collection[i] = z_collection[i] - e1\n\tfor p in range(T):\n\t \tmake_image(model,z=z_collection[p].view(1,32),name=str(p))\n\treturn z_collection\n\n\nz0 = Variable(torch.FloatTensor(1,32).normal_(), requires_grad=True)\nzt = Variable(torch.FloatTensor(1,32).normal_(), requires_grad=True)\n\nmain1(model=model,z0=z0, zt=zt)\n\n\n\t\n\n\t\t\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n"
] |
[
[
"matplotlib.use",
"torch.svd",
"torch.FloatTensor",
"torch.zeros"
],
[
"matplotlib.pyplot.imread",
"scipy.misc.imresize",
"matplotlib.pyplot.imsave"
],
[
"torch.mm",
"torch.transpose",
"matplotlib.use",
"torch.FloatTensor",
"torch._utils._rebuild_tensor",
"torch.autograd.Variable"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"0.14",
"0.15",
"1.0",
"0.19",
"0.18",
"1.2",
"0.12",
"0.10",
"0.17",
"0.16"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
kurusugawa-computer/annofab-cli
|
[
"8edad492d439bc8fe64e9471464f545d07aba8b7",
"8edad492d439bc8fe64e9471464f545d07aba8b7",
"8edad492d439bc8fe64e9471464f545d07aba8b7"
] |
[
"annofabcli/job/list_generated_task_history.py",
"annofabcli/task/put_tasks.py",
"annofabcli/filesystem/mask_user_info.py"
] |
[
"import argparse\nimport logging\nfrom typing import Any, Dict, List, Optional\n\nimport annofabapi\nimport pandas\n\nimport annofabcli\nfrom annofabcli import AnnofabApiFacade\nfrom annofabcli.common.cli import AbstractCommandLineInterface, ArgumentParser, build_annofabapi_resource_and_login\nfrom annofabcli.common.enums import FormatArgument\n\nlogger = logging.getLogger(__name__)\n\n\nclass ListTaskCreationHistoryMain:\n def __init__(self, service: annofabapi.Resource):\n self.service = service\n self.facade = AnnofabApiFacade(service)\n\n def get_data_list(self, project_id: str) -> List[Dict[str, Any]]:\n def create_elm(job: Dict[str, Any]) -> Dict[str, Any]:\n job_detail = job[\"job_detail\"]\n return {\n \"project_id\": job[\"project_id\"],\n \"job_id\": job[\"job_id\"],\n \"job_status\": job[\"job_status\"],\n \"generated_task_count\": job_detail[\"generated_task_count\"],\n \"created_datetime\": job[\"created_datetime\"],\n \"updated_datetime\": job[\"updated_datetime\"],\n \"task_generated_rule\": job_detail[\"request\"][\"task_generate_rule\"],\n }\n\n query_params = {\"type\": \"gen-tasks\"}\n job_list = self.service.wrapper.get_all_project_job(project_id, query_params=query_params)\n return [create_elm(job) for job in job_list]\n\n\nclass ListTaskCreationHistory(AbstractCommandLineInterface):\n def main(self):\n args = self.args\n main_obj = ListTaskCreationHistoryMain(self.service)\n data_list = main_obj.get_data_list(args.project_id)\n\n if args.format == FormatArgument.CSV.value:\n data_list = self.search_with_jmespath_expression(data_list)\n df = pandas.DataFrame(data_list)\n self.print_csv(df)\n else:\n self.print_according_to_format(data_list)\n\n\ndef main(args):\n service = build_annofabapi_resource_and_login(args)\n facade = AnnofabApiFacade(service)\n ListTaskCreationHistory(service, facade, args).main()\n\n\ndef parse_args(parser: argparse.ArgumentParser):\n argument_parser = ArgumentParser(parser)\n\n argument_parser.add_project_id()\n\n argument_parser.add_format(\n choices=[FormatArgument.CSV, FormatArgument.JSON, FormatArgument.PRETTY_JSON], default=FormatArgument.CSV\n )\n argument_parser.add_output()\n argument_parser.add_csv_format()\n\n argument_parser.add_query()\n parser.set_defaults(subcommand_func=main)\n\n\ndef add_parser(subparsers: Optional[argparse._SubParsersAction] = None):\n subcommand_name = \"list_task_creation_history\"\n subcommand_help = \"タスクの作成履歴一覧を出力します。\"\n description = \"タスクの作成履歴一覧を出力します。\"\n\n parser = annofabcli.common.cli.add_parser(subparsers, subcommand_name, subcommand_help, description)\n parse_args(parser)\n return parser\n",
"import argparse\nimport copy\nimport json\nimport logging\nimport tempfile\nfrom pathlib import Path\nfrom typing import Any, Dict, List, Optional\n\nimport pandas\nfrom annofabapi.models import ProjectJobType, ProjectMemberRole\n\nimport annofabcli\nfrom annofabcli import AnnofabApiFacade\nfrom annofabcli.common.cli import (\n AbstractCommandLineInterface,\n ArgumentParser,\n build_annofabapi_resource_and_login,\n get_json_from_args,\n get_wait_options_from_args,\n)\nfrom annofabcli.common.dataclasses import WaitOptions\n\nlogger = logging.getLogger(__name__)\n\nDEFAULT_WAIT_OPTIONS = WaitOptions(interval=60, max_tries=360)\n\nTaskInputRelation = Dict[str, List[str]]\n\"\"\"task_idとinput_data_idの構造を表現する型\"\"\"\n\n\nclass PutTask(AbstractCommandLineInterface):\n \"\"\"\n CSVからタスクを登録する。\n \"\"\"\n\n DEFAULT_BY_COUNT = {\"allow_duplicate_input_data\": False, \"input_data_order\": \"name_asc\"}\n\n TASK_THRESHOLD_FOR_JSON = 10\n \"\"\"'--json'が指定されたとき、この値以下ならば`put_task`APIでタスクを登録する。\n この値を超えているならば、`initiate_tasks_generation`APIでタスクを登録する。\"\"\"\n\n def put_task_by_count(self, project_id: str, task_generate_rule: Dict[str, Any]):\n project_last_updated_datetime = self.service.api.get_project(project_id)[0][\"updated_datetime\"]\n task_generate_rule.update({\"_type\": \"ByCount\"})\n request_body = {\n \"task_generate_rule\": task_generate_rule,\n \"project_last_updated_datetime\": project_last_updated_datetime,\n }\n self.service.api.initiate_tasks_generation(project_id, request_body=request_body)\n\n def put_task_from_csv_file(self, project_id: str, csv_file: Path) -> None:\n \"\"\"\n CSVファイルからタスクを登録する。\n\n Args:\n project_id:\n csv_file: タスク登録に関する情報が記載されたCSV\n \"\"\"\n project_title = self.facade.get_project_title(project_id)\n logger.info(f\"{project_title} に対して、{str(csv_file)} からタスクを登録します。\")\n self.service.wrapper.initiate_tasks_generation_by_csv(project_id, csvfile_path=str(csv_file))\n\n def wait_for_completion(\n self,\n project_id: str,\n wait_options: WaitOptions,\n wait: bool = False,\n ) -> None:\n \"\"\"\n CSVファイルからタスクを登録する。\n\n Args:\n project_id:\n wait_options: タスク登録の完了を待つ処理\n wait: タスク登録が完了するまで待つかどうか\n \"\"\"\n logger.info(f\"タスクの登録中です(サーバ側の処理)。\")\n\n if wait:\n MAX_WAIT_MINUTE = wait_options.max_tries * wait_options.interval / 60\n logger.info(f\"最大{MAX_WAIT_MINUTE}分間、タスク登録処理が終了するまで待ちます。\")\n\n result = self.service.wrapper.wait_for_completion(\n project_id,\n job_type=ProjectJobType.GEN_TASKS,\n job_access_interval=wait_options.interval,\n max_job_access=wait_options.max_tries,\n )\n if result:\n logger.info(f\"タスクの登録が完了しました。\")\n else:\n logger.warning(f\"タスクの登録に失敗しました。または、{MAX_WAIT_MINUTE}分間待っても、タスクの登録が完了しませんでした。\")\n\n @staticmethod\n def create_task_relation_dataframe(task_relation_dict: TaskInputRelation) -> pandas.DataFrame:\n tmp_list = []\n for task_id, input_data_id_list in task_relation_dict.items():\n for input_data_id in input_data_id_list:\n tmp_list.append({\"task_id\": task_id, \"input_data_id\": input_data_id})\n df = pandas.DataFrame(tmp_list)\n df[\"input_data_name\"] = \"\"\n return df[[\"task_id\", \"input_data_name\", \"input_data_id\"]]\n\n def main(self):\n args = self.args\n project_id = args.project_id\n super().validate_project(project_id, [ProjectMemberRole.OWNER])\n\n if args.csv is not None:\n csv_file = Path(args.csv)\n self.put_task_from_csv_file(project_id, csv_file)\n elif args.json is not None:\n # CSVファイルに変換する\n task_relation_dict = get_json_from_args(args.json)\n if len(task_relation_dict) > self.TASK_THRESHOLD_FOR_JSON:\n df = self.create_task_relation_dataframe(task_relation_dict)\n with tempfile.NamedTemporaryFile() as f:\n df.to_csv(f, index=False, header=None)\n self.put_task_from_csv_file(project_id, Path(f.name))\n else:\n # 登録件数が少ない場合は、put_taskの方が早いのでこちらで登録する。\n task_count = 0\n for task_id, input_data_id_list in task_relation_dict.items():\n task = self.service.wrapper.get_task_or_none(project_id, task_id)\n if task is None:\n logger.debug(f\"タスク'{task_id}'を登録します。\")\n self.service.api.put_task(\n project_id, task_id, request_body={\"input_data_id_list\": input_data_id_list}\n )\n task_count += 1\n else:\n logger.warning(f\"タスク'{task_id}'はすでに存在するため、登録をスキップします。\")\n self.service.api.put_task(\n project_id, task_id, request_body={\"input_data_id_list\": input_data_id_list}\n )\n\n logger.info(f\"{task_count} 件のタスクを登録しました。\")\n return\n\n elif args.by_count is not None:\n by_count = copy.deepcopy(PutTask.DEFAULT_BY_COUNT)\n by_count.update(get_json_from_args(args.by_count))\n self.put_task_by_count(project_id, by_count)\n else:\n raise RuntimeError(\"--csv or --by_count が指定されていません。\")\n\n wait_options = get_wait_options_from_args(get_json_from_args(args.wait_options), DEFAULT_WAIT_OPTIONS)\n self.wait_for_completion(\n project_id,\n wait=args.wait,\n wait_options=wait_options,\n )\n\n\ndef main(args):\n service = build_annofabapi_resource_and_login(args)\n facade = AnnofabApiFacade(service)\n PutTask(service, facade, args).main()\n\n\ndef parse_args(parser: argparse.ArgumentParser):\n argument_parser = ArgumentParser(parser)\n\n argument_parser.add_project_id()\n\n file_group = parser.add_mutually_exclusive_group(required=True)\n file_group.add_argument(\n \"--csv\",\n type=str,\n help=(\n \"タスクに割り当てる入力データが記載されたCSVファイルのパスを指定してください。\"\n \"CSVのフォーマットは、以下の通りです。\"\n \"タスク作成画面でアップロードするCSVと同じフォーマットです。\"\n \"\\n\"\n \" * ヘッダ行なし, カンマ区切り\\n\"\n \" * 1列目: task_id\\n\"\n \" * 2列目: Any(無視される)\\n\"\n \" * 3列目: input_data_id\\n\"\n ),\n )\n\n JSON_SAMPLE = '{\"task1\":[\"input1\",\"input2\"]}'\n file_group.add_argument(\n \"--json\",\n type=str,\n help=(\n \"タスクに割り当てる入力データをJSON形式で指定してください。\"\n \"keyがtask_id, valueがinput_data_idのlistです。\\n\"\n f\"(ex) ``{JSON_SAMPLE}`` \"\n \"``file://`` を先頭に付けるとjsonファイルを指定できます。\"\n ),\n )\n\n file_group.add_argument(\n \"--by_count\",\n type=str,\n help=f\"1つのタスクに割り当てる入力データの個数などの情報を、JSON形式で指定してください。\\n\"\n \"JSONフォーマットは https://annofab.com/docs/api/#operation/initiateTasksGeneration\"\n \" APIのリクエストボディ ``task_generate_rule`` と同じです。\"\n f\"デフォルトは ``{json.dumps(PutTask.DEFAULT_BY_COUNT)}`` です。\\n\"\n \"``file://`` を先頭に付けるとjsonファイルを指定できます。\",\n )\n\n parser.add_argument(\"--wait\", action=\"store_true\", help=(\"タスク登録が完了するまで待ちます。\"))\n\n parser.add_argument(\n \"--wait_options\",\n type=str,\n help=\"タスクの登録が完了するまで待つ際のオプションを、JSON形式で指定してください。\"\n \" ``file://`` を先頭に付けるとjsonファイルを指定できます。\"\n 'デフォルは ``{\"interval\":60, \"max_tries\":360}`` です。'\n \"``interval`` :完了したかを問い合わせる間隔[秒], \"\n \"``max_tires`` :完了したかの問い合わせを最大何回行うか。\",\n )\n\n parser.set_defaults(subcommand_func=main)\n\n\ndef add_parser(subparsers: Optional[argparse._SubParsersAction] = None):\n subcommand_name = \"put\"\n subcommand_help = \"タスクを作成します。\"\n description = \"タスクを作成します。\"\n epilog = \"オーナロールを持つユーザで実行してください。\"\n\n parser = annofabcli.common.cli.add_parser(subparsers, subcommand_name, subcommand_help, description, epilog=epilog)\n parse_args(parser)\n return parser\n",
"import argparse\nimport logging\nfrom functools import partial\nfrom pathlib import Path\nfrom typing import Any, Dict, List, Optional, Set, Tuple, Union\n\nimport numpy\nimport pandas\n\nimport annofabcli\nfrom annofabcli.common.cli import AbstractCommandLineWithoutWebapiInterface, ArgumentParser, get_list_from_args\nfrom annofabcli.common.exceptions import AnnofabCliException\nfrom annofabcli.common.utils import read_multiheader_csv\n\nlogger = logging.getLogger(__name__)\n\n\nALPHABET_SIZE = 26\nDIGIT = 2\n\n\ndef _create_unique_masked_name(masked_name_set: Set[str], masked_name: str) -> str:\n \"\"\"\n マスクされたユニークな名前を返す。\n `masked_name_set` に含まれている名前なら、末尾に数字をつけて、ユニークにする。\n \"\"\"\n if masked_name not in masked_name_set:\n masked_name_set.add(masked_name)\n return masked_name\n else:\n # 末尾に数字を付ける\n base_masked_name = masked_name[0:DIGIT]\n try:\n # 末尾が数字の場合(末尾の数字が2桁になると処理がおかしくなるけど、許容する)\n now_index = int(masked_name[-1])\n except ValueError:\n # 末尾が数字でない場合\n now_index = 0\n\n new_masked_name = base_masked_name + str(now_index + 1)\n return _create_unique_masked_name(masked_name_set, new_masked_name)\n\n\ndef _create_replaced_dict(name_set: Set[str]) -> Dict[str, str]:\n \"\"\"\n keyがマスク対象の名前で、valueがマスクしたあとの名前であるdictを返します。\n\n Args:\n name_set:\n\n Returns:\n\n \"\"\"\n replaced_dict = {}\n masked_name_set: Set[str] = set()\n for name in name_set:\n masked_name = create_masked_name(name)\n unique_masked_name = _create_unique_masked_name(masked_name_set, masked_name)\n replaced_dict[name] = unique_masked_name\n return replaced_dict\n\n\ndef create_replaced_biography_dict(name_set: Set[str]) -> Dict[str, str]:\n replaced_dict = {}\n masked_name_set: Set[str] = set()\n for name in name_set:\n masked_name = create_masked_name(name)\n unique_masked_name = _create_unique_masked_name(masked_name_set, masked_name)\n replaced_dict[name] = unique_masked_name\n return replaced_dict\n\n\ndef create_masked_name(name: str) -> str:\n \"\"\"\n マスクされた名前を返す。\n AA,ABのように、26*26 パターンを返す\n \"\"\"\n\n def _hash_str(value: str) -> int:\n hash_value = 7\n for c in value:\n # 64bit integer\n hash_value = (31 * hash_value + ord(c)) & 18446744073709551615\n return hash_value\n\n def _num2alpha(num):\n \"\"\"\n 1以上の整数を大文字アルファベットに変換する\n \"\"\"\n if num <= ALPHABET_SIZE:\n return chr(64 + num)\n elif num % 26 == 0:\n return _num2alpha(num // ALPHABET_SIZE - 1) + chr(90)\n else:\n return _num2alpha(num // ALPHABET_SIZE) + chr(64 + num % ALPHABET_SIZE)\n\n SIZE = pow(ALPHABET_SIZE, DIGIT)\n hash_value = (_hash_str(name) % SIZE) + 1\n return _num2alpha(hash_value)\n\n\ndef get_replaced_user_id_set_from_biography(\n df: pandas.DataFrame, not_masked_location_set: Optional[Set[str]] = None\n) -> Set[str]:\n if not_masked_location_set is None:\n filtered_df = df\n else:\n filtered_df = df[df[\"biography\"].map(lambda e: e not in not_masked_location_set)]\n\n return set(filtered_df[\"user_id\"])\n\n\ndef _get_header_row_count(df: pandas.DataFrame) -> int:\n if isinstance(df.columns, pandas.MultiIndex):\n return len(df.columns.levels)\n else:\n return 1\n\n\ndef _get_tuple_column(df: pandas.DataFrame, column: str) -> Union[str, Tuple]:\n \"\"\"\n 列名を返します。ヘッダ行が複数行の場合は、タプルで返します。\n\n Args:\n df:\n column:\n\n Returns:\n\n \"\"\"\n size = _get_header_row_count(df)\n if size >= 2:\n return tuple([column] + [\"\"] * (size - 1))\n else:\n return column\n\n\ndef replace_by_columns(df, replacement_dict: Dict[str, str], main_column: Any, sub_columns: Optional[List[Any]] = None):\n def _get_username(row, main_column: Any, sub_column: Any) -> str:\n if row[main_column] in replacement_dict:\n return replacement_dict[row[main_column]]\n else:\n return row[sub_column]\n\n if sub_columns is not None:\n for sub_column in sub_columns:\n get_username_func = partial(_get_username, main_column=main_column, sub_column=sub_column)\n df[sub_column] = df.apply(get_username_func, axis=1)\n\n # 列の型を合わせないとreplaceに失敗するため, dtype を確認する\n if df[main_column].dtype == numpy.dtype(\"object\"):\n df[main_column] = df[main_column].replace(replacement_dict)\n\n\ndef get_masked_username_series(df: pandas.DataFrame, replace_dict_by_user_id: Dict[str, str]) -> pandas.Series:\n \"\"\"\n マスク後のusernameのSeriesを返す\n \"\"\"\n user_id_column = _get_tuple_column(df, \"user_id\")\n username_column = _get_tuple_column(df, \"username\")\n\n def _get_username(row) -> str:\n if row[user_id_column] in replace_dict_by_user_id:\n return replace_dict_by_user_id[row[user_id_column]]\n else:\n return row[username_column]\n\n return df.apply(_get_username, axis=1)\n\n\ndef get_masked_account_id(df: pandas.DataFrame, replace_dict_by_user_id: Dict[str, str]) -> pandas.Series:\n \"\"\"\n マスク後のaccount_idのSeriesを返す\n \"\"\"\n user_id_column = _get_tuple_column(df, \"user_id\")\n account_id_column = _get_tuple_column(df, \"account_id\")\n\n def _get_account_id(row) -> str:\n if row[user_id_column] in replace_dict_by_user_id:\n return replace_dict_by_user_id[row[user_id_column]]\n else:\n return row[account_id_column]\n\n return df.apply(_get_account_id, axis=1)\n\n\ndef get_replaced_biography_set(df: pandas.DataFrame, not_masked_location_set: Optional[Set[str]] = None) -> Set[str]:\n biography_set = set(df[\"biography\"])\n if numpy.nan in biography_set:\n biography_set.remove(numpy.nan)\n\n if not_masked_location_set is None:\n return biography_set\n else:\n for not_masked_location in not_masked_location_set:\n if not_masked_location in biography_set:\n biography_set.remove(not_masked_location)\n\n return biography_set\n\n\ndef create_replacement_dict_by_user_id(\n df: pandas.DataFrame,\n not_masked_biography_set: Optional[Set[str]] = None,\n not_masked_user_id_set: Optional[Set[str]] = None,\n) -> Dict[str, str]:\n \"\"\"\n keyが置換対象のuser_id、valueが置換後のマスクされたuser_idであるdictを作成する。\n \"\"\"\n if \"biography\" in df:\n replaced_user_id_set = get_replaced_user_id_set_from_biography(\n df, not_masked_location_set=not_masked_biography_set\n )\n else:\n replaced_user_id_set = set()\n if not_masked_user_id_set is not None:\n replaced_user_id_set = replaced_user_id_set - not_masked_user_id_set\n\n return _create_replaced_dict(replaced_user_id_set)\n\n\ndef create_replacement_dict_by_biography(\n df: pandas.DataFrame,\n not_masked_biography_set: Optional[Set[str]] = None,\n) -> Dict[str, str]:\n \"\"\"\n keyが置換対象のbiography、valueが置換後のマスクされた biography であるdictを作成する。\n \"\"\"\n replaced_biography_set = get_replaced_biography_set(df, not_masked_location_set=not_masked_biography_set)\n tmp_replace_dict_by_biography = _create_replaced_dict(replaced_biography_set)\n return {key: f\"category-{value}\" for key, value in tmp_replace_dict_by_biography.items()}\n\n\ndef replace_user_info_by_user_id(df: pandas.DataFrame, replacement_dict_by_user_id: Dict[str, str]):\n \"\"\"\n user_id, username, account_id 列を, マスクする。\n\n Args:\n df:\n replacement_dict_by_user_id: user_idの置換前と置換後を示したdict\n\n \"\"\"\n sub_columns = []\n user_id_column = _get_tuple_column(df, \"user_id\")\n\n if \"username\" in df:\n username_column = _get_tuple_column(df, \"username\")\n sub_columns.append(username_column)\n if \"account_id\" in df:\n account_id_column = _get_tuple_column(df, \"account_id\")\n sub_columns.append(account_id_column)\n replace_by_columns(df, replacement_dict_by_user_id, main_column=user_id_column, sub_columns=sub_columns)\n\n\ndef replace_biography(\n df: pandas.DataFrame, replacement_dict_by_user_id: Dict[str, str], replacement_dict_by_biography: Dict[str, str]\n):\n \"\"\"\n biography 列を, マスクする。\n\n Args:\n df:\n replacement_dict_by_user_id: user_idの置換前と置換後を示したdict\n\n \"\"\"\n user_id_column = _get_tuple_column(df, \"user_id\")\n biography_column = _get_tuple_column(df, \"biography\")\n\n def _get_biography(row, user_id_column: Any, biography_column: Any) -> str:\n if row[user_id_column] in replacement_dict_by_user_id:\n # マスク対象のユーザなら biographyをマスクする\n biography = row[biography_column]\n if biography in replacement_dict_by_biography:\n return replacement_dict_by_biography[biography]\n else:\n return biography\n else:\n return row[biography_column]\n\n get_biography_func = partial(_get_biography, user_id_column=user_id_column, biography_column=biography_column)\n df[biography_column] = df.apply(get_biography_func, axis=1)\n\n\ndef create_masked_user_info_df(\n df: pandas.DataFrame,\n not_masked_biography_set: Optional[Set[str]] = None,\n not_masked_user_id_set: Optional[Set[str]] = None,\n) -> pandas.DataFrame:\n if \"user_id\" not in df:\n raise AnnofabCliException(f\"`user_id`列が存在しないため、ユーザ情報をマスクできません。\")\n\n replacement_dict_by_user_id = create_replacement_dict_by_user_id(\n df, not_masked_biography_set=not_masked_biography_set, not_masked_user_id_set=not_masked_user_id_set\n )\n\n if \"biography\" in df:\n replacement_dict_by_biography = create_replacement_dict_by_biography(\n df, not_masked_biography_set=not_masked_biography_set\n )\n replace_biography(\n df,\n replacement_dict_by_biography=replacement_dict_by_biography,\n replacement_dict_by_user_id=replacement_dict_by_user_id,\n )\n\n replace_user_info_by_user_id(df, replacement_dict_by_user_id)\n\n return df\n\n\nclass MaskUserInfo(AbstractCommandLineWithoutWebapiInterface):\n def main(self):\n args = self.args\n\n not_masked_biography_set = (\n set(get_list_from_args(args.not_masked_biography)) if args.not_masked_biography is not None else None\n )\n not_masked_user_id_set = (\n set(get_list_from_args(args.not_masked_user_id)) if args.not_masked_user_id is not None else None\n )\n\n csv_header: int = args.csv_header\n csv_path: Path = args.csv\n if csv_header == 1:\n original_df = pandas.read_csv(str(csv_path))\n else:\n original_df = read_multiheader_csv(str(csv_path), header_row_count=csv_header)\n\n df = create_masked_user_info_df(\n df=original_df,\n not_masked_biography_set=not_masked_biography_set,\n not_masked_user_id_set=not_masked_user_id_set,\n )\n self.print_csv(df)\n\n\ndef main(args):\n MaskUserInfo(args).main()\n\n\ndef parse_args(parser: argparse.ArgumentParser):\n argument_parser = ArgumentParser(parser)\n\n parser.add_argument(\"--csv\", type=Path, required=True, help=\"ユーザ情報が記載されたCSVファイルを指定してください。CSVには`user_id`列が必要です。\")\n parser.add_argument(\n \"--not_masked_biography\",\n type=str,\n nargs=\"+\",\n help=\"マスクしないユーザの`biography`を指定してください。\",\n )\n parser.add_argument(\n \"--not_masked_user_id\",\n type=str,\n nargs=\"+\",\n help=\"マスクしないユーザの`user_id`を指定してください。\",\n )\n parser.add_argument(\"--csv_header\", type=int, help=\"CSVのヘッダ行数\", default=1)\n\n argument_parser.add_output()\n argument_parser.add_csv_format()\n\n parser.set_defaults(subcommand_func=main)\n\n\ndef add_parser(subparsers: Optional[argparse._SubParsersAction] = None):\n subcommand_name = \"mask_user_info\"\n subcommand_help = \"CSVに記載されたユーザ情報をマスクします。\"\n description = \"CSVに記載されたユーザ情報をマスクします。CSVの`user_id`,`username`,`biography`,`account_id` 列をマスクします。\"\n parser = annofabcli.common.cli.add_parser(subparsers, subcommand_name, subcommand_help, description)\n parse_args(parser)\n return parser\n"
] |
[
[
"pandas.DataFrame"
],
[
"pandas.DataFrame"
],
[
"numpy.dtype"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
GReguig/kymatio
|
[
"e0fc10057f5f8bb947068bc40afff8d3d3729052",
"e0fc10057f5f8bb947068bc40afff8d3d3729052"
] |
[
"tests/scattering3d/test_torch_scattering3d.py",
"kymatio/scattering2d/backend/tensorflow_backend.py"
] |
[
"\"\"\" This script will test the submodules used by the scattering module\"\"\"\nimport torch\nimport os\nimport io\nimport numpy as np\nimport pytest\nfrom kymatio import HarmonicScattering3D\nfrom kymatio.scattering3d.utils import generate_weighted_sum_of_gaussians\n\n\nbackends = []\n\nskcuda_available = False\ntry:\n if torch.cuda.is_available():\n from skcuda import cublas\n import cupy\n skcuda_available = True\nexcept:\n Warning('torch_skcuda backend not available.')\n\nif skcuda_available:\n from kymatio.scattering3d.backend.torch_skcuda_backend import backend\n backends.append(backend)\n\n\nfrom kymatio.scattering3d.backend.torch_backend import backend\nbackends.append(backend)\n\nif torch.cuda.is_available():\n devices = ['cuda', 'cpu']\nelse:\n devices = ['cpu']\n\n\ndef relative_difference(a, b):\n return np.sum(np.abs(a - b)) / max(np.sum(np.abs(a)), np.sum(np.abs(b)))\n\n\[email protected](\"device\", devices)\[email protected](\"backend\", backends)\ndef test_FFT3d_central_freq_batch(device, backend):\n # Checked the 0 frequency for the 3D FFT\n for device in devices:\n x = torch.zeros(1, 32, 32, 32, 2).float()\n if device == 'gpu':\n x = x.cuda()\n a = x.sum()\n y = backend.fft(x)\n c = y[:, 0, 0, 0].sum()\n assert (c - a).abs().sum() < 1e-6\n\n\[email protected](\"device\", devices)\[email protected](\"backend\", backends)\ndef test_fft3d_error(backend, device):\n x = torch.zeros(8, 1)\n with pytest.raises(TypeError) as record:\n backend.fft(x)\n assert \"should be complex\" in record.value.args[0]\n\n\[email protected](\"device\", devices)\[email protected](\"backend\", backends)\[email protected](\"inplace\", [False, True])\ndef test_cdgmm3d(device, backend, inplace):\n if backend.name == 'torch' or device != 'cpu':\n # Not all backends currently implement the inplace variant\n x = torch.zeros(2, 3, 4, 2).to(device)\n x[..., 0] = 2\n x[..., 1] = 3\n\n y = torch.zeros_like(x)\n y[..., 0] = 4\n y[..., 1] = 5\n\n prod = torch.zeros_like(x)\n prod[..., 0] = x[..., 0] * y[..., 0] - x[..., 1] * y[..., 1]\n prod[..., 1] = x[..., 0] * y[..., 1] + x[..., 1] * y[..., 0]\n\n z = backend.cdgmm3d(x, y, inplace=inplace)\n\n assert (z - prod).norm().cpu().item() < 1e-7\n\n if inplace:\n assert (x - z).norm().cpu().item() < 1e-7\n\n with pytest.warns(UserWarning) as record:\n x = torch.randn((3, 4, 3, 2), device=device)\n x = x[:, 0:3, ...]\n y = torch.randn((3, 3, 3, 2), device=device)\n backend.cdgmm3d(x, y)\n assert \"A is converted\" in record[0].message.args[0]\n\n with pytest.warns(UserWarning) as record:\n x = torch.randn((3, 3, 3, 2), device=device)\n y = torch.randn((3, 4, 3, 2), device=device)\n y = y[:, 0:3, ...]\n backend.cdgmm3d(x, y)\n assert \"B is converted\" in record[0].message.args[0]\n\n with pytest.raises(RuntimeError) as record:\n x = torch.randn((3, 3, 3, 2), device=device)\n y = torch.randn((4, 4, 4, 2), device=device)\n backend.cdgmm3d(x, y)\n assert \"not compatible\" in record.value.args[0]\n\n x = torch.randn((2, 3, 3, 3, 2), device=device)\n y = torch.randn((3, 3, 3, 2), device=device)\n backend.cdgmm3d(x, y)\n\n with pytest.raises(TypeError) as record:\n x = torch.randn((3, 3, 3, 1), device=device)\n y = torch.randn((3, 3, 3, 1), device=device)\n backend.cdgmm3d(x, y)\n assert \"should be complex\" in record.value.args[0]\n\n # This one is a little tricky. We can't have the number of dimensions be\n # greater than 4 since that triggers the \"not compatible\" error.\n with pytest.raises(RuntimeError) as record:\n x = torch.randn((3, 3, 2), device=device)\n y = torch.randn((3, 3, 2), device=device)\n backend.cdgmm3d(x, y)\n assert \"must be simply a complex\" in record.value.args[0]\n\n # Create a tensor that behaves like `torch.Tensor` but is technically a\n # different type.\n class FakeTensor(torch.Tensor):\n pass\n\n with pytest.raises(RuntimeError) as record:\n x = FakeTensor(3, 3, 3, 2)\n y = torch.randn(3, 3, 3, 2)\n backend.cdgmm3d(x, y)\n assert \"should be same type\" in record.value.args[0]\n\n if backend.name == 'torch_skcuda':\n x = torch.randn((3, 3, 3, 2), device=torch.device('cpu'))\n y = torch.randn((3, 3, 3, 2), device=torch.device('cpu'))\n with pytest.raises(RuntimeError) as record:\n backend.cdgmm3d(x, y)\n assert \"for CPU tensors\" in record.value.args[0]\n\n\[email protected](\"device\", devices)\[email protected](\"backend\", backends)\ndef test_complex_modulus(backend, device):\n x = torch.randn(4, 3, 2).to(device)\n xm = torch.sqrt(x[..., 0] ** 2 + x[..., 1] ** 2)\n y = backend.modulus(x)\n assert (y[..., 0] - xm).norm() < 1e-7\n assert (y[..., 1]).norm() < 1e-7\n\n\[email protected](\"device\", devices)\[email protected](\"backend\", backends)\ndef test_against_standard_computations(device, backend):\n if backend.name == \"torch_skcuda\" and device == \"cpu\":\n pytest.skip(\"The skcuda backend does not support CPU tensors.\")\n\n file_path = os.path.abspath(os.path.dirname(__file__))\n with open(os.path.join(file_path, 'test_data_3d.npz'), 'rb') as f:\n buffer = io.BytesIO(f.read())\n data = np.load(buffer)\n x = torch.from_numpy(data['x'])\n scattering_ref = torch.from_numpy(data['Sx'])\n J = data['J']\n L = data['L']\n integral_powers = data['integral_powers']\n\n M = x.shape[1]\n\n batch_size = x.shape[0]\n\n N, O = M, M\n sigma = 1\n\n scattering = HarmonicScattering3D(J=J, shape=(M, N, O), L=L,\n sigma_0=sigma, method='integral',\n integral_powers=integral_powers, max_order=2, backend=backend, frontend='torch')\n\n scattering.to(device)\n x = x.to(device)\n\n order_0 = backend.compute_integrals(x, integral_powers)\n scattering.max_order = 2\n scattering.method = 'integral'\n scattering.integral_powers = integral_powers\n orders_1_and_2 = scattering(x)\n\n order_0 = order_0.cpu().numpy().reshape((batch_size, -1))\n start = 0\n end = order_0.shape[1]\n order_0_ref = scattering_ref[:, start:end].cpu().numpy()\n\n orders_1_and_2 = orders_1_and_2.cpu().numpy().reshape((batch_size, -1))\n start = end\n end += orders_1_and_2.shape[1]\n orders_1_and_2_ref = scattering_ref[:, start:end].cpu().numpy()\n\n order_0_diff_cpu = relative_difference(order_0_ref, order_0)\n orders_1_and_2_diff_cpu = relative_difference(\n orders_1_and_2_ref, orders_1_and_2)\n\n assert order_0_diff_cpu < 1e-6, \"CPU : order 0 do not match, diff={}\".format(order_0_diff_cpu)\n assert orders_1_and_2_diff_cpu < 1e-6, \"CPU : orders 1 and 2 do not match, diff={}\".format(orders_1_and_2_diff_cpu)\n\n\[email protected](\"device\", devices)\[email protected](\"backend\", backends)\ndef test_solid_harmonic_scattering(device, backend):\n if backend.name == \"torch_skcuda\" and device == \"cpu\":\n pytest.skip(\"The skcuda backend does not support CPU tensors.\")\n\n # Compare value to analytical formula in the case of a single Gaussian\n centers = np.zeros((1, 1, 3))\n weights = np.ones((1, 1))\n sigma_gaussian = 3.\n sigma_0_wavelet = 3.\n M, N, O, J, L = 128, 128, 128, 1, 3\n grid = np.mgrid[-M // 2:-M // 2 + M, -N // 2:-N // 2 + N, -O // 2:-O // 2 + O]\n grid = grid.astype('float32')\n grid = np.fft.ifftshift(grid, axes=(1, 2, 3))\n x = torch.from_numpy(generate_weighted_sum_of_gaussians(grid, centers,\n weights, sigma_gaussian)).to(device).float()\n scattering = HarmonicScattering3D(J=J, shape=(M, N, O), L=L,\n sigma_0=sigma_0_wavelet,max_order=1, method='integral',\n integral_powers=[1], frontend='torch',backend=backend).to(device)\n\n scattering.max_order = 1\n scattering.method = 'integral'\n scattering.integral_powers = [1]\n\n s = scattering(x)\n\n for j in range(J+1):\n sigma_wavelet = sigma_0_wavelet*2**j\n k = sigma_wavelet / np.sqrt(sigma_wavelet**2 + sigma_gaussian**2)\n for l in range(1, L+1):\n err = torch.abs(s[0, j, l, 0] - k ** l).sum()/(1e-6+s[0, j, l, 0].abs().sum())\n assert err<1e-4\n\n\[email protected](\"device\", devices)\[email protected](\"backend\", backends)\ndef test_larger_scales(device, backend):\n if backend.name == \"torch_skcuda\" and device == \"cpu\":\n pytest.skip(\"The skcuda backend does not support CPU tensors.\")\n\n shape = (32, 32, 32)\n L = 3\n sigma_0 = 1\n\n x = torch.randn((1,) + shape).to(device)\n\n for J in range(3, 4+1):\n scattering = HarmonicScattering3D(J=J, shape=shape, L=L, sigma_0=sigma_0, frontend='torch', backend=backend).to(device)\n scattering.method = 'integral'\n Sx = scattering(x)\n\n\[email protected](\"device\", devices)\[email protected](\"backend\", backends)\ndef test_scattering_batch_shape_agnostic(device, backend):\n if backend.name == \"torch_skcuda\" and device == \"cpu\":\n pytest.skip(\"The skcuda backend does not support CPU tensors.\")\n\n J = 2\n shape = (16, 16, 16)\n\n S = HarmonicScattering3D(J=J, shape=shape)\n\n for k in range(3):\n with pytest.raises(RuntimeError) as ve:\n S(torch.zeros(shape[:k]))\n assert 'at least three' in ve.value.args[0]\n\n x = torch.zeros(shape)\n\n x = x.to(device)\n S.to(device)\n\n Sx = S(x)\n\n assert len(Sx.shape) == 3\n\n coeffs_shape = Sx.shape[-3:]\n\n test_shapes = ((1,) + shape, (2,) + shape, (2, 2) + shape,\n (2, 2, 2) + shape)\n\n for test_shape in test_shapes:\n x = torch.zeros(test_shape)\n\n x = x.to(device)\n\n Sx = S(x)\n\n assert len(Sx.shape) == len(test_shape)\n assert Sx.shape[-3:] == coeffs_shape\n assert Sx.shape[:-3] == test_shape[:-3]\n",
"# Authors: Edouard Oyallon, Sergey Zagoruyko, Muawiz Chaudhary\n\nimport tensorflow as tf\nfrom collections import namedtuple\n\nBACKEND_NAME = 'tensorflow'\n\n\nfrom ...backend.tensorflow_backend import Modulus, cdgmm, concatenate\nfrom ...backend.base_backend import FFT\n\nclass Pad(object):\n def __init__(self, pad_size, input_size, pre_pad=False):\n \"\"\"\n Padding which allows to simultaneously pad in a reflection fashion\n and map to complex.\n Parameters\n ----------\n pad_size : list of 4 integers\n size of padding to apply.\n input_size : list of 2 integers\n size of the original signal\n pre_pad : boolean\n if set to true, then there is no padding, one simply adds the imaginarty part.\n \"\"\"\n self.pre_pad = pre_pad\n self.pad_size = pad_size\n\n def __call__(self, x):\n if self.pre_pad:\n return x\n else:\n paddings = [[0, 0]] * len(x.shape[:-2])\n paddings += [[self.pad_size[0], self.pad_size[1]], [self.pad_size[2], self.pad_size[3]]]\n return tf.cast(tf.pad(x, paddings, mode=\"REFLECT\"), tf.complex64)\n\ndef unpad(in_):\n \"\"\"\n Slices the input tensor at indices between 1::-1\n Parameters\n ----------\n in_ : tensor_like\n input tensor\n Returns\n -------\n in_[..., 1:-1, 1:-1]\n \"\"\"\n return in_[..., 1:-1, 1:-1]\n\nclass SubsampleFourier(object):\n \"\"\" Subsampling of a 2D image performed in the Fourier domain.\n\n Subsampling in the spatial domain amounts to periodization\n in the Fourier domain, hence the formula.\n\n Parameters\n ----------\n x : tensor_like\n input tensor with at least three dimensions.\n k : int\n integer such that x is subsampled by k along the spatial variables.\n\n Returns\n -------\n out : tensor_like\n Tensor such that its Fourier transform is the Fourier\n transform of a subsampled version of x, i.e. in\n F^{-1}(out)[u1, u2] = F^{-1}(x)[u1 * k, u2 * k]\n\n \"\"\"\n def __call__(self, x, k):\n y = tf.reshape(x, (-1, k, x.shape[1] // k, k, x.shape[2] // k))\n\n out = tf.reduce_mean(y, axis=(1, 3))\n return out\n\n\n\nbackend = namedtuple('backend', ['name', 'cdgmm', 'modulus', 'subsample_fourier', 'fft', 'Pad', 'unpad', 'concatenate'])\n\nbackend.name = 'tensorflow'\nbackend.cdgmm = cdgmm\nbackend.modulus = Modulus()\nbackend.subsample_fourier = SubsampleFourier()\nbackend.fft = FFT(lambda x: tf.signal.fft2d(x, name='fft2d'),\n lambda x: tf.signal.ifft2d(x, name='ifft2d'),\n lambda x: tf.math.real(tf.signal.ifft2d(x, name='irfft2d')),\n lambda x: None)\nbackend.Pad = Pad\nbackend.unpad = unpad\nbackend.concatenate = lambda x: concatenate(x, -3)\n"
] |
[
[
"torch.abs",
"numpy.abs",
"numpy.sqrt",
"torch.zeros",
"torch.sqrt",
"torch.randn",
"torch.zeros_like",
"torch.from_numpy",
"numpy.ones",
"numpy.fft.ifftshift",
"torch.cuda.is_available",
"torch.device",
"numpy.load",
"numpy.zeros"
],
[
"tensorflow.signal.ifft2d",
"tensorflow.reduce_mean",
"tensorflow.reshape",
"tensorflow.pad",
"tensorflow.signal.fft2d"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
adaptivemgmt/datamonster-api
|
[
"1c56440d1e9c48380d5cca54bb195ef7ee9d9472",
"1c56440d1e9c48380d5cca54bb195ef7ee9d9472",
"1c56440d1e9c48380d5cca54bb195ef7ee9d9472"
] |
[
"datamonster_api/lib/datamonster.py",
"datamonster_api/tests/lib/test_datamonster.py",
"datamonster_api/tests/lib/regression.py"
] |
[
"import datetime\nimport fastavro\nimport json\nimport pandas\nimport six\n\nfrom .client import Client\nfrom .company import Company\nfrom .data_group import DataGroup, DataGroupColumn\nfrom .datasource import Datasource\nfrom .errors import DataMonsterError\n\n__all__ = [\"DataMonster\", \"DimensionSet\"]\n\n\nclass DataMonster(object):\n \"\"\"DataMonster object. Main entry point to the library\n\n :param key_id: (str) a user's public key\n :param secret: (str) a user's secret key\n :param server: (optional, str) default to dm.adaptivemgmt.com\n :param verify: (optional, bool) whether to verify the server's TLS certificate\n \"\"\"\n\n company_path = \"/rest/v1/company\"\n datasource_path = \"/rest/v1/datasource\"\n dimensions_path = \"/rest/v1/datasource/{}/dimensions\"\n data_group_path = '/rest/v1/data_group'\n rawdata_path = \"/rest/v2/datasource/{}/rawdata\"\n\n DATAMONSTER_SCHEMA_FIELDS = {\n \"lower_date\": \"start_date\",\n \"upper_date\": \"end_date\",\n \"value\": \"value\",\n }\n\n def __init__(self, key_id, secret, server=None, verify=True):\n self.client = Client(key_id, secret, server, verify)\n self.key_id = key_id\n self.secret = secret\n\n def _get_paginated_results(self, url):\n \"\"\"Get the paginated results starting with this url\"\"\"\n\n next_page = url\n while next_page is not None:\n resp = self.client.get(next_page)\n for result in resp[\"results\"]:\n yield result\n next_page = resp[\"pagination\"][\"nextPageURI\"]\n\n @staticmethod\n def _check_param(company=None, datasource=None):\n if company is not None and not isinstance(company, Company):\n raise DataMonsterError(\"company argument must be a Company object\")\n\n if datasource is not None and not isinstance(datasource, Datasource):\n raise DataMonsterError(\"datasource argument must be a Datasource object\")\n\n ##############################################\n # Company methods\n ##############################################\n\n def get_company_by_ticker(self, ticker):\n \"\"\"Get a single company by ticker\n\n :param ticker: Ticker to search for\n\n :return: Single ``Company`` object if any companies exactly match the ticker (case insensitive)\n\n :raises: ``DataMonsterError`` if no companies match ticker\n \"\"\"\n ticker = ticker.lower()\n companies = self.get_companies(ticker)\n for company in companies:\n if company.ticker is not None and company.ticker.lower() == ticker:\n return company\n\n raise DataMonsterError(\"Could not find company with ticker {}\".format(ticker))\n\n def get_company_by_id(self, company_id):\n \"\"\"Get a single company by id\n\n :param company_id: (str or int) unique internal identifier for the desired company.\n Can take str form e.g. '718', or int form, e.g. 707.\n In order to find the id of a frequently used company,\n find the company by ticker and call ``.pk`` on the resulting ``Company`` object\n\n :return: Single ``Company`` object if any company matches the id\n\n :raises: ``DataMonsterError`` if no company matches id\n \"\"\"\n company = self.get_company_details(company_id)\n company[\"uri\"] = self._get_company_path(company_id)\n return self._company_result_to_object(company, has_details=True)\n\n def get_companies(self, query=None, datasource=None):\n \"\"\"Get available companies\n\n :param query: Optional query that will restrict companies by ticker or name\n :param datasource: Optional ``Datasource`` object that restricts companies to those\n covered by the given data source\n\n :return: Iterator of ``Company`` objects\n \"\"\"\n params = {}\n if query:\n params[\"q\"] = query\n if datasource:\n self._check_param(datasource=datasource)\n params[\"datasourceId\"] = datasource.id\n\n url = self.company_path\n if params:\n url = \"\".join([url, \"?\", six.moves.urllib.parse.urlencode(params)])\n\n companies = self._get_paginated_results(url)\n return six.moves.map(self._company_result_to_object, companies)\n\n def get_company_details(self, company_id):\n \"\"\"Get details for the given company\n\n :param company_id: (str or int) unique internal identifier for company.\n See the method |br|\n `get_company_by_id <api.html#datamonster_api.DataMonster.get_company_by_id>`__\n for more info on ``company_id``.\n :return: (dict) details (metadata) for this company, providing basic information.\n \"\"\"\n path = self._get_company_path(company_id)\n return self.client.get(path)\n\n def _get_company_path(self, company_id):\n return \"{}/{}\".format(self.company_path, company_id)\n\n def _company_result_to_object(self, company, has_details=False):\n company_inst = Company(\n company[\"id\"], company[\"ticker\"], company[\"name\"], company[\"uri\"], self\n )\n\n if has_details:\n company_inst.set_details(company)\n return company_inst\n\n ##############################################\n # Datasource methods\n ##############################################\n\n def get_datasources(self, query=None, company=None):\n \"\"\"Get available datasources\n\n :param query: (str) Optional query that will restrict data sources by name or provider name\n :param company: Optional ``Company`` object that restricts data sources to those that cover\n the given company\n\n :return: Iterator of ``Datasource`` objects\n \"\"\"\n params = {}\n if query:\n params[\"q\"] = query\n if company:\n self._check_param(company=company)\n params[\"companyId\"] = company.id\n\n url = self.datasource_path\n if params:\n url = \"\".join([url, \"?\", six.moves.urllib.parse.urlencode(params)])\n\n datasources = self._get_paginated_results(url)\n return six.moves.map(self._datasource_result_to_object, datasources)\n\n def get_datasource_by_name(self, name):\n \"\"\"Given a name, try to find a data source of that name\n\n :param name: (str)\n\n :return: Single ``Datasource`` object with the given name\n\n :raises: ``DataMonsterError`` if no data source matches the given name\n \"\"\"\n for ds in self.get_datasources(query=name):\n if ds.name.lower() == name.lower():\n return ds\n raise DataMonsterError(\n \"Did not find a data source matching the name {!r}\".format(name)\n )\n\n def get_datasource_by_id(self, datasource_id):\n \"\"\"Given a data source UUID (universal unique identifier),\n return the corresponding ``Datasource`` object.\n To find the UUID for a data source, first find it by name, then call ``.id``\n on the resulting ``Datasource`` object.\n\n :param datasource_id: (str)\n\n :return: Single ``Datasource`` object with the given id\n\n :raises: ``DataMonsterError`` if no data source matches the given id\n \"\"\"\n datasource = self.get_datasource_details(datasource_id)\n datasource[\"uri\"] = self._get_datasource_path(datasource_id)\n return self._datasource_result_to_object(datasource, has_details=True)\n\n def get_datasource_details(self, datasource_id):\n \"\"\"Get details (metadata) for the data source corresponding to the given UUID\n\n :param datasource_id: (str) See the method |br|\n `get_datasource_by_id\n <api.html#datamonster_api.DataMonster.get_datasource_by_id>`__\n for more info on ``datasource_id``\n\n :return: (dict) details (metadata) for this data source,\n providing basic information.\n \"\"\"\n path = self._get_datasource_path(datasource_id)\n return self.client.get(path)\n\n def _get_datasource_path(self, datasource_id):\n return \"{}/{}\".format(self.datasource_path, datasource_id)\n\n def _get_data_group_path(self, data_group_id):\n return '{}/{}'.format(self.data_group_path, data_group_id)\n\n def _get_dimensions_path(self, uuid):\n return self.dimensions_path.format(uuid)\n\n def _datasource_result_to_object(self, datasource, has_details=False):\n ds_inst = Datasource(\n datasource[\"id\"],\n datasource[\"name\"],\n datasource[\"category\"],\n datasource[\"uri\"],\n self,\n )\n if has_details:\n ds_inst.set_details(datasource)\n\n return ds_inst\n\n def get_data(\n self, datasource, company, aggregation=None, start_date=None, end_date=None\n ):\n \"\"\"Get data for data source\n\n :param datasource: ``Datasource`` object to get the data for\n :param company: ``Company`` object to filter the data source on\n :param aggregation: Optional ``Aggregation`` object to specify the aggregation of the data\n :param start_date: Optional filter for the start date of the data\n :param end_date: Optional filter for the end date of the data\n\n See `here <quickstart.html#>`__ for example usage.\n\n :return: pandas.DataFrame\n \"\"\"\n # todo: support multiple companies\n self._check_param(company=company, datasource=datasource)\n\n filters = {\"section_pk\": [int(company.id)]}\n\n if start_date is not None:\n if not datasource.upperDateField:\n raise DataMonsterError(\"This data source does not support date queries\")\n\n if end_date is not None:\n if not datasource.lowerDateField:\n raise DataMonsterError(\"This data source does not support date queries\")\n\n if aggregation is not None and aggregation.period == 'fiscalQuarter' and aggregation.company != company:\n raise DataMonsterError(\"Aggregating by the fiscal quarter of a different company not yet supported\")\n\n schema, df = self.get_data_raw(datasource, filters, aggregation)\n\n if datasource.type == \"datasource\":\n df = self._datamonster_data_mapper(\n self.DATAMONSTER_SCHEMA_FIELDS, schema, df\n )\n\n # Trim the dates on the client side. This would be more efficient on the server, but we don't support\n # greater than or less than right now\n if start_date is not None and 'end_date' in df:\n df = df[df.end_date >= pandas.Timestamp(start_date)]\n\n if end_date is not None and 'start_date' in df:\n df = df[df.start_date <= pandas.Timestamp(end_date)]\n\n if \"end_date\" in df:\n df.sort_values(by=\"end_date\", inplace=True)\n return df\n\n def get_data_raw(self, datasource, filters=None, aggregation=None):\n \"\"\"Get raw data for all companies available in the data source.\n\n :param datasource: ``Datasource`` object to get the data for\n :param aggregation: ``Aggregation`` object to specify requested aggregation\n :param filters: dictionary of requested filters\n\n :return: (schema, pandas.DataFrame)\n\n See `here <examples.html#get-data-raw>`__ for example usage.\n \"\"\"\n post_data = {\n 'forecast': False,\n 'valueAggregation': None,\n 'timeAggregation': None,\n }\n\n if filters is not None:\n post_data['filters'] = filters\n if aggregation is not None:\n post_data['timeAggregation'] = aggregation.to_time_aggregation_dictionary(\n datasource.aggregationType\n )\n\n headers = {\"Accept\": \"avro/binary\", 'Content-Type': 'application/json'}\n url = self.rawdata_path.format(datasource.id)\n resp = self.client.post(url, post_data, headers, stream=True)\n return self._avro_to_df(resp.content, datasource.fields)\n\n def get_raw_data(self, *args, **kwargs):\n \"\"\"This function is deprecated. Please use the get_data_raw function instead\"\"\"\n raise DataMonsterError(\"This function has been deprecated. Please use get_data_raw\")\n\n def _avro_to_df(self, avro_buffer, data_types):\n \"\"\"Read an avro structure into a dataframe and minimially parse it\n\n returns: (schema, pandas.Dataframe)\n \"\"\"\n\n def parse_row(row):\n return {\n col[\"name\"]: pandas.to_datetime(row[col[\"name\"]])\n if col[\"data_type\"] == \"date\"\n else row[col[\"name\"]]\n for col in data_types\n }\n\n reader = fastavro.reader(six.BytesIO(avro_buffer))\n metadata = reader.writer_schema.get(\"structure\", ())\n\n if not metadata:\n raise DataMonsterError(\n \"DataMonster does not currently support this request\"\n )\n\n records = [parse_row(r) for r in reader]\n return metadata, pandas.DataFrame.from_records(records)\n\n @staticmethod\n def _datamonster_data_mapper(mapping_fields, schema, df):\n \"\"\"mapping function applied to a ``DataMonster`` data source to format the data\n\n :param mapping_fields (dict): mapping of column names to rename from in the schema\n :param schema (dict): avro schema of the data\n :param df (pandas.DataFrame): data to manipulate\n\n :return: pandas.DataFrame\n \"\"\"\n if df.empty:\n return df\n\n if not set(schema.keys()).issuperset(mapping_fields.keys()):\n raise DataMonsterError(\n \"DataMonster does not currently support this request\"\n )\n\n split_columns = schema.get(\"split\", [])\n rename_columns = {}\n for key, val in mapping_fields.items():\n if len(schema[key]) != 1:\n raise DataMonsterError(\n \"Expected a single defined column for {!r}. Got {!r}\".format(\n key, schema[key]\n )\n )\n rename_columns[schema[key][0]] = val\n\n df.rename(columns=rename_columns, inplace=True)\n df[\"dimensions\"] = df.apply(\n lambda row, *splits: {split: row[split] for split in splits},\n args=(split_columns),\n axis=1,\n )\n\n df[\"time_span\"] = df[\"end_date\"] - df[\"start_date\"]\n df[\"end_date\"] -= datetime.timedelta(\n days=1\n ) # Change the format of the end_date\n drop_columns = [col for col in split_columns + [\"section_pk\"] if col in df]\n df.drop(columns=drop_columns, inplace=True)\n return df\n\n def get_dimensions_for_datasource(\n self, datasource, filters=None, add_company_info_from_pks=False\n ):\n \"\"\"Get dimensions (\"splits\") for the data source\n from the DataMonster REST endpoint ``/datasource/<uuid>/dimensions?filters=...``\n where the ``filters`` string is optional.\n\n :param datasource: ``Datasource`` object\n :param filters: (dict): a dict of key/value pairs to filter\n dimensions by\n :param add_company_info_from_pks: (bool): Determines whether return value will include tickers for\n the returned companies. If ``False``, only ``section_pk`` s will be returned.\n\n See `here <examples.html#get-dimensions-for-datasource>`__\n for example usage.\n\n :return: a ``DimensionSet`` object - an iterable through a collection\n of dimension dicts, filtered as requested. See `this documentation <api.html#datamonster_api.DimensionSet>`_\n for more info on ``DimensionSet`` objects.\n\n :raises: ``DataMonsterError`` if ``filters`` is not a dict or is not JSON-serializable.\n Re-raises ``DataMonsterError`` if ``self.client.get()`` raises that.\n \"\"\"\n self._check_param(datasource=datasource)\n\n params = {}\n if filters:\n params[\"filters\"] = self.to_json_checked(filters)\n\n url = self._get_dimensions_path(uuid=datasource.id)\n if params:\n url = \"\".join([url, \"?\", six.moves.urllib.parse.urlencode(params)])\n\n # Let any DataMonsterError from self.client.get() happen -- we don't occlude them\n return DimensionSet(\n url, self, add_company_info_from_pks=add_company_info_from_pks\n )\n\n @staticmethod\n def to_json_checked(filters):\n \"\"\"\n Not \"private\" because `Datasource.get_dimensions()` uses it too\n\n :param filters: dict\n :return: JSON string encoding `filters`. Normal exit if `filters` is\n JSON-serializable.\n\n :raises: DataMonsterError if `filters` isn't a dict or can't be JSON-encoded.\n \"\"\"\n if not isinstance(filters, dict):\n raise DataMonsterError(\n \"`filters` must be a dict, got {} instead\".format(\n type(filters).__name__\n )\n )\n try:\n return json.dumps(filters)\n except TypeError as e:\n raise DataMonsterError(\n \"Problem with filters when getting dimensions: {}\".format(e)\n )\n\n ##############################################\n # DataGroup methods\n ##############################################\n\n def get_data_groups(self, query=None):\n \"\"\"Get available data groups\n\n :param query: (str) Optional query that will restrict data groups by name or data source name\n :return: Iterator of ``DataGroup`` objects.\n \"\"\"\n params = {}\n if query is not None:\n params['q'] = query\n\n url = self.data_group_path\n if params:\n url = ''.join([url, '?', six.moves.urllib.parse.urlencode(params)])\n\n datagroups = self._get_paginated_results(url)\n return six.moves.map(self._data_group_result_to_object, datagroups)\n\n def get_data_group_details(self, id):\n \"\"\"Given a data group id, return the corresponding ``DataGroup`` object\n\n :param id: (int)\n\n :return: Single ``DataGroup`` object with the given id\n\n :raises: ``DataMonsterError`` if no data group matches the given id\n \"\"\"\n path = self._get_data_group_path(id)\n return self.client.get(path)\n\n def get_data_group_by_id(self, id):\n \"\"\"Give a data group pk (primary key),\n return the corresponding ``DataGroup`` object.\n To find the pk for a data group, first find it using\n the iterator returned by ``get_data_groups()``, then\n call ``.id`` on the ``DataGroup`` object.\n\n :param id: (int)\n\n :return: Single ``DataGroup`` object with the given id\n\n :raises: ``DataMonsterError`` if no data group matches the given id\n \"\"\"\n dg = self.get_data_group_details(id)\n return self._data_group_result_to_object(dg, has_details=True)\n\n def _data_group_result_to_object(self, data_group, has_details=False):\n columns = [DataGroupColumn(**column) for column in data_group['columns']]\n dg_inst = DataGroup(\n data_group['_id'],\n data_group['name'],\n columns,\n data_group['status'],\n self\n )\n\n if has_details:\n dg_inst.set_details(data_group)\n\n return dg_inst\n\n\nclass DimensionSet(object):\n \"\"\"\n An iterable through a collection of dimensions dictionaries.\n\n Each dimension dictionary has 4 keys:\n ``max_date``, ``min_date``, ``row_count``, and ``split_combination``.\n The first two have values that are dates as strings in ISO format;\n ``split_combination`` points to a dict containing data from all other columns;\n ``row_count`` points to an int specifying how many rows match the dates and all splits in ``split_combination``\n\n \"\"\"\n\n def __init__(self, url, dm, add_company_info_from_pks):\n \"\"\"\n :param url: (string) URL for REST endpoint\n :param dm: DataMonster object\n :param add_company_info_from_pks: (bool) If ``True``, create ticker items from\n ``section_pk`` items.\n \"\"\"\n self._url_orig = url\n\n resp0 = dm.client.get(url)\n\n self._min_date = resp0[\"minDate\"]\n self._max_date = resp0[\"maxDate\"]\n self._row_count = resp0[\"rowCount\"]\n self._dimension_count = resp0[\"dimensionCount\"]\n self._resp = resp0\n\n self._dm = dm\n self._add_company_info_from_pks = bool(add_company_info_from_pks)\n\n # Populated during iteration, maps pk => Company.\n # Contents are not \"settled\" until iteration is complete.\n self._pk2company = {}\n\n def __str__(self):\n has_extra_info_str = (\n \"; extra company info\" if self.has_extra_company_info else \"\"\n )\n\n \"{}: {} dimensions, {} rows, from {} to {}{}\".format(\n self.__class__.__name__,\n len(self),\n self._row_count,\n self._min_date,\n self._max_date,\n has_extra_info_str,\n )\n\n def __len__(self):\n \"\"\"\n (int) number of *dimension dicts* in the collection\n \"\"\"\n return self._dimension_count\n\n def __iter__(self):\n \"\"\"Generator that iterates through the dimension dicts in the collection.\n\n Populates self.pk2company during iteration:\n `section_pk`s already in this dict will use the tickers (/names) of `Company`s\n already looked up and saved;\n newly-encountered `section_pk`s will have their corresponding `Company`s saved here\n \"\"\"\n while True:\n resp = self._resp # shorthand\n if not resp:\n return\n\n results_this_page = resp[\"results\"]\n next_page_uri = resp[\"pagination\"][\"nextPageURI\"]\n\n if not results_this_page:\n break\n\n for dimension in results_this_page:\n # do `_camel2snake` *before* possible pk->ticker conversion,\n # as `_create_ticker_items_from_section_pks` assumes snake_case\n # ('split_combination')\n dimension = DimensionSet._camel2snake(dimension)\n if self._add_company_info_from_pks:\n self._create_ticker_items_from_section_pks(dimension)\n yield dimension\n\n if next_page_uri is None:\n break\n\n self._resp = self._dm.client.get(next_page_uri)\n\n # So that attempts to reuse the iterator get nothing.\n # Without this, the last page could be re-yielded\n self._resp = None\n\n @property\n def pk2company(self):\n \"\"\"Empty if ``has_extra_company_info`` is ``False``.\n If ``has_extra_company_info``, this dict maps company pk's (int id's) to ``Company``\n objects. If ``pk`` is a key in the dict, then ``self.pk2company[pk].pk == pk``.\n The pk's in ``pk2company`` are those in the ``section_pk`` items of dimension dicts\n in this collection. (``section_pk`` items are in the ``split_combination`` subdict\n of a dimension dict.)\n\n During an iteration, ``pk2company`` contains all pk's from ``section_pk`` values in\n dimension dicts *that have been yielded so far*. Thus, ``pk2company`` is initially\n empty, and isn't fully populated until the iteration completes.\n\n Note that making a *list* of a ``DimensionSet`` performs a complete iteration.\n\n :return: (dict)\n \"\"\"\n return self._pk2company\n\n @property\n def min_date(self):\n \"\"\"\n :return type: (str) min of the ``min_date`` of the dimension dicts\n \"\"\"\n return self._min_date\n\n @property\n def max_date(self):\n \"\"\"\n :return: (str) max of the ``max_date`` of the dimension dicts\n \"\"\"\n return self._max_date\n\n @property\n def row_count(self):\n \"\"\"\n :return: (int) number of rows matching the filters for this ``DimensionSet``\n \"\"\"\n return self._row_count\n\n @property\n def has_extra_company_info(self):\n \"\"\"\n :return: (bool) The value passed as ``add_company_info_from_pks`` to the constructor, coerced\n to *bool*.\n \"\"\"\n return self._add_company_info_from_pks\n\n @staticmethod\n def _camel2snake(dimension_dict):\n \"\"\"Return a dict with four keys changed from camelCase to snake_case;\n `dimension_dict` unchanged\n \"\"\"\n camel2snake = {\n \"splitCombination\": \"split_combination\",\n \"maxDate\": \"max_date\",\n \"minDate\": \"min_date\",\n \"rowCount\": \"row_count\",\n }\n return {camel2snake[k]: dimension_dict[k] for k in dimension_dict}\n\n def _create_ticker_items_from_section_pks(self, dimension):\n \"\"\"\n :param dimension: a dimension dict, with a key 'split_combination'.\n\n :return: `None`\n Mutates the dict `dimension:\n if 'section_pk' in `dimension['split_combination']`, its value::\n\n dimension['split_combination']['section_pk\"]\n\n is a 'section_pk'` or a list of them (we accommodate `None`, too).\n We add a `'ticker'` item to dimension['split_combination'] whose value is\n the ticker or tickers for the pk's in the value of 'section_pk'` --\n more precisely, the value corresponding to any `pk` is:\n\n `self._pk_to_ticker(pk)`\n if ticker is not `None`,\n\n name of company with key `pk`\n if ticker is `None`\n \"\"\"\n combo = dimension[\"split_combination\"]\n if \"section_pk\" in combo:\n value = combo.get(\"section_pk\") # type: int or list[int]\n if value is not None:\n combo[\"ticker\"] = (\n self._pk_to_ticker(value)\n if isinstance(value, int)\n else list(six.moves.map(lambda pk: self._pk_to_ticker(pk), value))\n )\n return dimension\n\n def _pk_to_ticker(self, pk):\n \"\"\"\n :param pk: int -- a section_pk\n\n :return: str --\n\n `self._dm.get_company_from_id(pk).ticker`\n if that is not `None`,\n\n name of company with key `pk`\n otherwise (actual ticker is `None` or empty)\n\n Note that `self._pk2company` basically holds memos for this method: for each `pk`,\n `self._dm.get_company_from_id(pk)` is only called once.\n \"\"\"\n if pk not in self._pk2company:\n self._pk2company[pk] = self._dm.get_company_by_id(pk)\n\n company = self._pk2company[pk]\n return company.ticker or company.name\n",
"import datetime\nimport pandas\nimport pytest\n\nfrom datamonster_api import Aggregation, DataMonsterError, DataMonster, DataGroupColumn\nfrom test_data_group import assert_object_matches_data_group\n\n\ndef _assert_object_matches_datasource(datasource, datasource_obj):\n assert datasource_obj[\"id\"] == datasource.id\n assert datasource_obj[\"name\"] == datasource.name\n assert datasource_obj[\"category\"] == datasource.category\n assert datasource_obj[\"uri\"] == datasource.uri\n\n\ndef test_equality(datasource, other_datasource):\n assert datasource == datasource\n assert other_datasource != datasource\n\n d = {}\n d[datasource] = 1\n d[other_datasource] = 2\n assert len(d) == 2\n assert d[datasource] == 1\n\n\ndef test_datamonster_datasource_mapper():\n from pandas.util.testing import assert_frame_equal\n\n df = pandas.DataFrame.from_records([])\n print(assert_frame_equal(DataMonster._datamonster_data_mapper({}, {}, df), df))\n assert_frame_equal(DataMonster._datamonster_data_mapper({}, {}, df), df)\n\n df = pandas.DataFrame.from_records([{\"apple\": 1, \"banana\": 2, \"cherry\": 3}])\n with pytest.raises(DataMonsterError):\n DataMonster._datamonster_data_mapper({\"garbage\": \"g\"}, {\"split\": \"apple\"}, df)\n\n with pytest.raises(DataMonsterError):\n DataMonster._datamonster_data_mapper(\n {\"fruits\": \"value\"}, {\"fruits\": [\"apple\", \"banana\"]}, df\n )\n\n\ndef test_get_datasources_1(mocker, dm, single_page_datasource_results, company):\n \"\"\"Test getting datasources. multiple pages. various filters\"\"\"\n\n # The resulting datasources should always be the same\n def assert_results_good(results):\n results = list(results)\n assert len(results) == 2\n\n _assert_object_matches_datasource(\n results[0], single_page_datasource_results[\"results\"][0]\n )\n\n _assert_object_matches_datasource(\n results[1], single_page_datasource_results[\"results\"][1]\n )\n\n dm.client.get = mocker.Mock(return_value=single_page_datasource_results)\n\n # ++ No queries, no company\n dm.client.get.reset_mock()\n datasources = dm.get_datasources()\n\n assert_results_good(datasources)\n assert dm.client.get.call_count == 1\n assert dm.client.get.call_args[0][0] == \"/rest/v1/datasource\"\n\n # ++ text query, no company\n dm.client.get.reset_mock()\n datasources = dm.get_datasources(query=\"abc\")\n\n assert_results_good(datasources)\n assert dm.client.get.call_count == 1\n assert dm.client.get.call_args[0][0] == \"/rest/v1/datasource?q=abc\"\n\n # ++ no text query, company\n dm.client.get.reset_mock()\n datasources = dm.get_datasources(company=company)\n\n assert_results_good(datasources)\n assert dm.client.get.call_count == 1\n assert dm.client.get.call_args[0][0] == \"/rest/v1/datasource?companyId={}\".format(\n company.id\n )\n\n # ++ text query, company\n dm.client.get.reset_mock()\n datasources = dm.get_datasources(query=\"abc\", company=company)\n\n assert_results_good(datasources)\n assert dm.client.get.call_count == 1\n assert dm.client.get.call_args[0][\n 0\n ] == \"/rest/v1/datasource?q=abc&companyId={}\".format(company.id)\n\n\ndef test_get_datasources_2(mocker, dm, single_page_datasource_results, company):\n \"\"\"Test getting datasources. error states\"\"\"\n\n # Bad datasource\n with pytest.raises(DataMonsterError) as excinfo:\n dm.get_datasources(company=\"abc\")\n\n assert \"company argument must be a Company object\" in excinfo.value.args[0]\n\n\ndef test_get_datasource_by_name(mocker, dm, datasource, other_datasource):\n \"\"\"Test getting datasource by name\"\"\"\n dm.get_datasources = mocker.Mock(return_value=[datasource])\n assert datasource == dm.get_datasource_by_name(\"name\")\n\n dm.get_datasources = mocker.Mock(return_value=[])\n with pytest.raises(DataMonsterError):\n datasource = dm.get_datasource_by_name(\"garbage\")\n\n dm.get_datasources = mocker.Mock(return_value=[datasource, other_datasource])\n with pytest.raises(DataMonsterError):\n datasource = dm.get_datasource_by_name(\"garbage\")\n\n\ndef test_get_datasource_by_id(mocker, dm, datasource_details_result):\n \"\"\"Test getting datasource by uuid\"\"\"\n\n dm.client.get = mocker.Mock(return_value=datasource_details_result)\n\n datasource = dm.get_datasource_by_id(\"abc\")\n\n # Make sure we hit the right endpoint\n assert dm.client.get.call_count == 1\n assert dm.client.get.call_args[0][0] == \"/rest/v1/datasource/abc\"\n\n # a couple of spot checks.\n assert datasource.category == \"category\"\n assert datasource.cadence == \"daily\"\n assert datasource.splitColumns == [\"category\", \"country\"]\n assert datasource.type == \"datasource\"\n\n assert datasource.earliestData == \"2015-01-01\"\n assert datasource.latestData == \"2018-10-01\"\n\n # Make sure we didn't go through the client again for the details\n assert dm.client.get.call_count == 1\n\n\ndef test_get_data_raw_1(mocker, dm, avro_data_file, company, datasource, datasource_details_result):\n \"\"\"Test getting raw data -- calendar quarterly aggregation\"\"\"\n\n datasource.get_details = mocker.Mock(return_value=datasource_details_result)\n dm.client.post = mocker.Mock(return_value=avro_data_file)\n\n filters = {\n 'category': ['Apple iTunes'],\n 'country': ['US'],\n 'section_pk': [company.id]\n }\n\n # Expected values for network calls\n expected_path = \"/rest/v2/datasource/{}/rawdata\".format(datasource.id)\n expected_post_data = {\n 'timeAggregation': {\n 'cadence': 'fiscal quarterly',\n 'aggregationType': 'sum',\n 'includePTD': False,\n 'sectionPk': company.id,\n },\n 'valueAggregation': None,\n 'filters': filters,\n 'forecast': False\n }\n\n agg = Aggregation(period='fiscalQuarter', company=company)\n schema, df = dm.get_data_raw(datasource, aggregation=agg, filters=filters)\n\n assert dm.client.post.call_count == 1\n assert dm.client.post.call_args[0][0] == expected_path\n assert dm.client.post.call_args[0][1] == expected_post_data\n assert dm.client.post.call_args[0][2] == {\"Accept\": \"avro/binary\", 'Content-Type': 'application/json'}\n\n assert len(df.columns) == 5\n assert sorted(df.columns) == [\n \"avg_dollar_per_cust\",\n \"category\",\n \"country\",\n \"period_end\",\n \"period_start\",\n ]\n assert len(df) == 8\n\n assert df.iloc[0][\"category\"] == \"Amazon ex. Whole Foods\"\n assert df.iloc[0][\"country\"] == \"US\"\n assert df.iloc[0][\"avg_dollar_per_cust\"] == 38.5165896068141\n assert df.iloc[0][\"period_start\"].date() == datetime.date(2019, 1, 2)\n assert df.iloc[0][\"period_end\"].date() == datetime.date(2019, 1, 3)\n\n assert df.iloc[7][\"category\"] == \"Amazon Acquisition Adjusted\"\n assert df.iloc[7][\"country\"] == \"US\"\n assert df.iloc[7][\"avg_dollar_per_cust\"] == 40.692421507668499\n assert df.iloc[7][\"period_start\"].date() == datetime.date(2019, 1, 2)\n assert df.iloc[7][\"period_end\"].date() == datetime.date(2019, 1, 3)\n\n\ndef test_get_data_raw_2(mocker, dm, avro_data_file, company, datasource, datasource_details_result):\n \"\"\"Test getting raw data -- no aggregation\"\"\"\n\n datasource.get_details = mocker.Mock(return_value=datasource_details_result)\n dm.client.post = mocker.Mock(return_value=avro_data_file)\n\n filters = {\n 'category': ['Apple iTunes'],\n 'country': ['US'],\n 'section_pk': [company.id]\n }\n\n # Expected values for network calls\n expected_path = \"/rest/v2/datasource/{}/rawdata\".format(datasource.id)\n expected_post_data = {\n 'timeAggregation': None,\n 'valueAggregation': None,\n 'filters': filters,\n 'forecast': False\n }\n\n schema, df = dm.get_data_raw(datasource, filters)\n\n assert dm.client.post.call_count == 1\n assert dm.client.post.call_args[0][0] == expected_path\n assert dm.client.post.call_args[0][1] == expected_post_data\n assert dm.client.post.call_args[0][2] == {\"Accept\": \"avro/binary\", 'Content-Type': 'application/json'}\n\n assert len(df.columns) == 5\n assert sorted(df.columns) == [\n \"avg_dollar_per_cust\",\n \"category\",\n \"country\",\n \"period_end\",\n \"period_start\",\n ]\n assert len(df) == 8\n\n assert df.iloc[0][\"category\"] == \"Amazon ex. Whole Foods\"\n assert df.iloc[0][\"country\"] == \"US\"\n assert df.iloc[0][\"avg_dollar_per_cust\"] == 38.5165896068141\n assert df.iloc[0][\"period_start\"].date() == datetime.date(2019, 1, 2)\n assert df.iloc[0][\"period_end\"].date() == datetime.date(2019, 1, 3)\n\n assert df.iloc[7][\"category\"] == \"Amazon Acquisition Adjusted\"\n assert df.iloc[7][\"country\"] == \"US\"\n assert df.iloc[7][\"avg_dollar_per_cust\"] == 40.692421507668499\n assert df.iloc[7][\"period_start\"].date() == datetime.date(2019, 1, 2)\n assert df.iloc[7][\"period_end\"].date() == datetime.date(2019, 1, 3)\n\n\ndef test_get_data_1(\n mocker, dm, avro_data_file, company, datasource, datasource_details_result\n):\n \"\"\"Test getting data -- happy case\"\"\"\n datasource.get_details = mocker.Mock(return_value=datasource_details_result)\n dm.client.post = mocker.Mock(return_value=avro_data_file)\n\n # Expected values\n expected_path = \"/rest/v2/datasource/{}/rawdata\".format(datasource.id)\n expected_post_data = {\n 'timeAggregation': None,\n 'valueAggregation': None,\n 'filters': {'section_pk': [int(company.id)]},\n 'forecast': False\n }\n\n df = dm.get_data(datasource, company)\n\n # Check that we called the client correctly\n assert dm.client.post.call_count == 1\n assert dm.client.post.call_args[0][0] == expected_path\n assert dm.client.post.call_args[0][1] == expected_post_data\n assert dm.client.post.call_args[0][2] == {\"Accept\": \"avro/binary\", 'Content-Type': 'application/json'}\n\n assert len(df.columns) == 5\n assert sorted(df.columns) == [\n \"dimensions\",\n \"end_date\",\n \"start_date\",\n \"time_span\",\n \"value\",\n ]\n assert len(df) == 8\n\n assert df.iloc[0][\"dimensions\"] == {\"category\": \"Whole Foods\", \"country\": \"US\"}\n assert df.iloc[0][\"value\"] == 52.6278787878788\n assert df.iloc[0][\"start_date\"].date() == datetime.date(2019, 1, 1)\n assert df.iloc[0][\"time_span\"].to_pytimedelta() == datetime.timedelta(days=1)\n assert df.iloc[0][\"end_date\"].date() == datetime.date(2019, 1, 1)\n\n assert df.iloc[7][\"dimensions\"] == {\n \"category\": \"Amazon Acquisition Adjusted\",\n \"country\": \"US\",\n }\n assert df.iloc[7][\"value\"] == 40.692421507668499\n assert df.iloc[7][\"start_date\"].date() == datetime.date(2019, 1, 2)\n assert df.iloc[7][\"time_span\"].to_pytimedelta() == datetime.timedelta(days=1)\n assert df.iloc[7][\"end_date\"].date() == datetime.date(2019, 1, 2)\n\n\ndef test_get_data_2(mocker, dm, avro_data_file, company, other_company, datasource):\n \"\"\"Test getting data -- bad aggregation\"\"\"\n\n # ** fiscal quarter aggregation -- different company\n agg = Aggregation(period=\"fiscalQuarter\", company=other_company)\n with pytest.raises(DataMonsterError) as excinfo:\n dm.get_data(datasource, company, agg)\n\n assert (\n \"Aggregating by the fiscal quarter of a different company not yet supported\"\n in excinfo.value.args[0]\n )\n\n\ndef test_get_data_3(\n mocker,\n dm,\n avro_data_file,\n company,\n other_company,\n datasource,\n datasource_details_result,\n):\n \"\"\"Test getting data -- monthly aggregation\"\"\"\n\n # ** monthly aggregation\n dm.client.post = mocker.Mock(return_value=avro_data_file)\n datasource.get_details = mocker.Mock(return_value=datasource_details_result)\n agg = Aggregation(period=\"month\", company=None)\n\n # Expected values\n expected_path = \"/rest/v2/datasource/{}/rawdata\".format(datasource.id)\n expected_post_data = {\n 'timeAggregation': {\n 'cadence': 'monthly',\n 'aggregationType': 'sum',\n 'includePTD': False\n },\n 'valueAggregation': None,\n 'filters': {'section_pk': [int(company.id)]},\n 'forecast': False\n }\n\n dm.get_data(datasource, company, agg)\n assert dm.client.post.call_args[0][0] == expected_path\n assert dm.client.post.call_args[0][1] == expected_post_data\n\n # ** fiscal quarter aggregation -- good company\n dm.client.post = mocker.Mock(return_value=avro_data_file)\n agg = Aggregation(period=\"fiscalQuarter\", company=company)\n expected_post_data = {\n 'timeAggregation': {\n 'cadence': 'fiscal quarterly',\n 'aggregationType': 'sum',\n 'includePTD': False,\n 'sectionPk': company.id,\n },\n 'valueAggregation': None,\n 'filters': {'section_pk': [int(company.id)]},\n 'forecast': False\n }\n\n dm.get_data(datasource, company, agg)\n\n assert dm.client.post.call_args[0][0] == expected_path\n assert dm.client.post.call_args[0][1] == expected_post_data\n\n\ndef test_get_data_4(mocker, dm, other_avro_data_file, company, datasource, datasource_details_result):\n \"\"\"Test getting data -- date filters\"\"\"\n\n dm.client.post = mocker.Mock(return_value=other_avro_data_file)\n datasource.get_details = mocker.Mock(return_value=datasource_details_result)\n\n # ** start date\n df = dm.get_data(datasource, company, start_date=datetime.date(2019, 12, 30))\n\n assert len(df) == 2\n assert df.iloc[0].start_date.date() == datetime.date(2019, 12, 30)\n assert df.iloc[1].start_date.date() == datetime.date(2019, 12, 31)\n\n # ** end date\n df = dm.get_data(datasource, company, end_date=datetime.date(2014, 1, 2))\n assert len(df) == 2\n assert df.iloc[0].start_date.date() == datetime.date(2014, 1, 1)\n assert df.iloc[1].start_date.date() == datetime.date(2014, 1, 2)\n\n # ** start and end date\n\n df = dm.get_data(\n datasource,\n company,\n start_date=datetime.date(2014, 1, 15),\n end_date=datetime.date(2014, 1, 16),\n )\n assert len(df) == 2\n assert df.iloc[0].start_date.date() == datetime.date(2014, 1, 15)\n assert df.iloc[1].start_date.date() == datetime.date(2014, 1, 16)\n\n\ndef test_get_data_group_by_id(mocker, dm, data_group_details_result):\n \"\"\"Test getting data group by pk\"\"\"\n\n dm.client.get = mocker.Mock(return_value=data_group_details_result)\n\n data_group = dm.get_data_group_by_id(123)\n\n # Make sure we hit the right endpoint\n assert dm.client.get.call_count == 1\n assert dm.client.get.call_args[0][0] == '/rest/v1/data_group/123'\n\n # a couple of spot checks.\n assert data_group.name == 'Test By Id'\n assert data_group.id == 123\n assert len(data_group.columns) == 7\n for c in data_group.columns:\n assert isinstance(c, DataGroupColumn)\n\n\ndef test_get_data_groups(mocker, dm, single_page_data_group_results):\n \"\"\"Test getting datagroups.\"\"\"\n\n def assert_results_good(results):\n results = list(results)\n assert len(results) == 2\n\n assert_object_matches_data_group(results[0], single_page_data_group_results[\"results\"][0])\n\n assert_object_matches_data_group(results[1], single_page_data_group_results[\"results\"][1])\n\n dm.client.get = mocker.Mock(return_value=single_page_data_group_results)\n\n # ++ No query\n dm.client.get.reset_mock()\n data_groups = dm.get_data_groups()\n\n assert_results_good(data_groups)\n assert dm.client.get.call_count == 1\n assert dm.client.get.call_args[0][0] == \"/rest/v1/data_group\"\n\n # ++ text query\n dm.client.get.reset_mock()\n data_groups = dm.get_data_groups(query=\"test\")\n\n assert_results_good(data_groups)\n assert dm.client.get.call_count == 1\n assert dm.client.get.call_args[0][0] == \"/rest/v1/data_group?q=test\"\n",
"import collections\nimport datetime\nimport numpy\nimport pandas\nimport pytest\n\nfrom datamonster_api import DataMonster, Aggregation, DataMonsterError\nfrom regression_keys import DM_API_KEY_ID, DM_API_SECRET\n\nQA_ETL_UUID = \"57588c68-e262-49b4-b05a-8ae4c30c183b\"\nFACTSET_UUID = \"0d07adb8-291e-4f4f-9c27-bbe2519e89e7\"\nSIMILARWEB_UUID = \"5899e237-874c-4e77-9d2e-c4b6cff218e8\"\n\ndm = DataMonster(DM_API_KEY_ID, DM_API_SECRET, server=\"http://staging.adaptivemgmt.com\")\n\n\ndef assert_data_frame(df, length, value_type=\"float64\"):\n assert sorted(df.columns) == [\n \"dimensions\",\n \"end_date\",\n \"start_date\",\n \"time_span\",\n \"value\",\n ]\n assert collections.Counter(df.dtypes.values) == collections.Counter(\n [\n numpy.dtype(\"<M8[ns]\"),\n numpy.dtype(\"<M8[ns]\"),\n numpy.dtype(value_type),\n numpy.dtype(\"<m8[ns]\"),\n numpy.dtype(\"O\"),\n ]\n )\n assert len(df) == length\n\n\ndef assert_frame_equal(df1, df2):\n from pandas.util.testing import assert_frame_equal\n\n assert_frame_equal(\n df1.reset_index(drop=True).sort_index(axis=1),\n df2.reset_index(drop=True).sort_index(axis=1),\n )\n\n\ndef test_company():\n company = dm.get_company_by_id(79)\n assert company == dm.get_company_by_ticker(\"AAP\")\n assert company.pk == 79\n assert company.name == \"ADVANCE AUTO PARTS\"\n assert company.ticker == \"AAP\"\n assert company.type == \"Company\"\n assert company.quarters\n assert type(company.quarters) == list\n assert company.quarters[0] == \"04-21-2001\"\n\n company = dm.get_company_by_id(1257)\n assert company.name == \"MASTERCARD SECTOR INSIGHTS\"\n assert company.type == \"Macro\"\n\n\ndef test_data_source():\n data_source = dm.get_datasource_by_id(QA_ETL_UUID)\n assert data_source == dm.get_datasource_by_name(\"Static QA Fountain\")\n assert data_source.id == QA_ETL_UUID\n assert data_source.name == \"Static QA Fountain\"\n assert data_source.uri == \"/rest/v1/datasource/\" + QA_ETL_UUID\n assert data_source.category == \"Web Scrape Data\"\n assert len(list(data_source.companies)) == 2\n details = data_source.get_details()\n assert details[\"earliestData\"] == \"2017-07-01\"\n assert details[\"category\"] == \"Web Scrape Data\"\n\n\ndef test_aggregation():\n company = dm.get_company_by_id(335)\n agg = Aggregation(period=\"week\", company=company)\n assert agg.period == \"week\"\n assert agg.company == company\n\n\ndef test_get_data_qa_etl_historical():\n data_source = dm.get_datasource_by_id(QA_ETL_UUID)\n company = dm.get_company_by_id(79)\n\n df = data_source.get_data(company, end_date=\"2017-09-01\")\n assert len(df) == 244\n\n df = data_source.get_data(company, end_date=\"2019-01-01\")\n assert_data_frame(df, 2192, \"int64\")\n records = {\n \"dimensions\": {\"country\": \"USA\", \"category\": \"small\"},\n \"end_date\": pandas.to_datetime(\"2017-07-01\"),\n \"start_date\": pandas.to_datetime(\"2017-07-01\"),\n \"value\": 1701,\n \"time_span\": datetime.timedelta(days=1),\n }\n assert_frame_equal(df.head(1), pandas.DataFrame.from_records([records]))\n\n df = data_source.get_data(company, start_date=\"2018-12-10\", end_date=\"2019-01-01\")\n assert_data_frame(df, 84, \"int64\")\n records = {\n \"dimensions\": {\"country\": \"USA\", \"category\": \"small\"},\n \"end_date\": pandas.to_datetime(\"2018-12-10\"),\n \"start_date\": pandas.to_datetime(\"2018-12-10\"),\n \"value\": 11210,\n \"time_span\": datetime.timedelta(days=1),\n }\n assert_frame_equal(df.head(1), pandas.DataFrame.from_records([records]))\n\n df = data_source.get_data(\n company, Aggregation(period=\"quarter\", company=company), end_date=\"2019-01-01\"\n )\n assert_data_frame(df, 24)\n records = {\n \"dimensions\": {\"country\": \"USA\", \"category\": \"small\"},\n \"end_date\": pandas.to_datetime(\"2017-09-30\"),\n \"start_date\": pandas.to_datetime(\"2017-07-01\"),\n \"value\": 1814.75,\n \"time_span\": datetime.timedelta(days=92),\n }\n assert_frame_equal(df.head(1), pandas.DataFrame.from_records([records]))\n\n\ndef test_get_data_qa_today():\n data_source = dm.get_datasource_by_id(QA_ETL_UUID)\n company = dm.get_company_by_id(79)\n yest = datetime.datetime.now() - datetime.timedelta(days=1)\n df = data_source.get_data(company, start_date=yest)\n assert_data_frame(df, 4, \"int64\")\n parsed_date = pandas.to_datetime(yest.strftime(\"%Y-%m-%d\"))\n records = [\n {\n \"dimensions\": {\n \"category\": \"large\" if yest.day % 2 == 0 else \"small\",\n \"country\": \"USA\",\n },\n \"end_date\": parsed_date,\n \"start_date\": parsed_date,\n \"value\": int(\"1{}{}\".format(int(yest.month), int(yest.day))),\n \"time_span\": datetime.timedelta(days=1),\n },\n {\n \"dimensions\": {\n \"category\": \"large\" if yest.day % 2 == 1 else \"small\",\n \"country\": \"USA\",\n },\n \"end_date\": parsed_date,\n \"start_date\": parsed_date,\n \"value\": int(\"1{}{}\".format(int(yest.month), int(yest.day))),\n \"time_span\": datetime.timedelta(days=1),\n },\n {\n \"dimensions\": {\n \"category\": \"large\" if yest.day % 2 == 0 else \"small\",\n \"country\": \"GB\",\n },\n \"end_date\": parsed_date,\n \"start_date\": parsed_date,\n \"value\": int(\"2{}{}\".format(int(yest.month), int(yest.day))),\n \"time_span\": datetime.timedelta(days=1),\n },\n {\n \"dimensions\": {\n \"category\": \"large\" if yest.day % 2 == 1 else \"small\",\n \"country\": \"GB\",\n },\n \"end_date\": parsed_date,\n \"start_date\": parsed_date,\n \"value\": int(\"2{}{}\".format(int(yest.month), int(yest.day))),\n \"time_span\": datetime.timedelta(days=1),\n },\n ]\n print(df)\n assert_frame_equal(df, pandas.DataFrame.from_records(records))\n\n\ndef test_get_data_simple():\n \"\"\" Test 1010data Blended Credit Dataset\n \"\"\"\n company = dm.get_company_by_ticker(\"W\")\n ds = next(\n dm.get_datasources(query=\"1010data Blended Credit & Debit Sales Index YoY\")\n )\n assert ds.name == \"1010data Blended Credit & Debit Sales Index YoY\"\n assert ds.id == \"3de84b2e-604f-4ea7-901f-61601eef8e0e\"\n assert ds.category == \"Blended Payment Data\"\n assert ds.type == \"datasource\"\n assert len(list(ds.companies)) == 190\n\n df = ds.get_data(company, end_date=\"2017-09-09\")\n assert_data_frame(df, 28)\n records = {\n \"dimensions\": {\"category\": \"Wayfair 6-day Adjusted\", \"country\": \"US\"},\n \"end_date\": pandas.to_datetime(\"2014-03-31\"),\n \"start_date\": pandas.to_datetime(\"2014-03-31\"),\n \"value\": 0.70154036189355,\n \"time_span\": datetime.timedelta(days=1),\n }\n assert_frame_equal(df.head(1), pandas.DataFrame.from_records([records]))\n\n df = ds.get_data(company, start_date=\"2016-01-01\", end_date=\"2017-09-01\")\n assert_data_frame(df, 12)\n records = {\n \"dimensions\": {\"category\": \"Wayfair Overall\", \"country\": \"US\"},\n \"end_date\": pandas.to_datetime(\"2016-03-31\"),\n \"start_date\": pandas.to_datetime(\"2016-03-31\"),\n \"value\": 0.684391819283361,\n \"time_span\": datetime.timedelta(days=1),\n }\n assert_frame_equal(df.head(1), pandas.DataFrame.from_records([records]))\n\n with pytest.raises(DataMonsterError):\n ds.get_data(company, Aggregation(period=\"quarter\", company=company))\n\n\ndef test_get_data_factset():\n \"\"\" Test `FactSet Actuals Sales Quarterly` one of the more popular data sets\n \"\"\"\n company = dm.get_company_by_id(335)\n sales = next(dm.get_datasources(query=\"FactSet Actuals Sales Quarterly\"))\n assert sales.id == \"bdcac6ae-4f31-4aaf-a92a-12854f09c768\"\n assert sales.name == \"FactSet Actuals Sales Quarterly\"\n\n df = sales.get_data(company, end_date=\"2017-09-01\")\n assert_data_frame(df, 70)\n records = {\n \"dimensions\": {},\n \"end_date\": pandas.to_datetime(\"2000-03-31\"),\n \"start_date\": pandas.to_datetime(\"2000-03-31\"),\n \"value\": 573889.0,\n \"time_span\": datetime.timedelta(days=1),\n }\n assert_frame_equal(df.head(1), pandas.DataFrame.from_records([records]))\n\n agg = Aggregation(period=\"quarter\", company=company)\n df = sales.get_data(company, agg, start_date=\"2010-01-01\", end_date=\"2017-09-01\")\n assert_data_frame(df, 30)\n records = {\n \"dimensions\": {},\n \"end_date\": pandas.to_datetime(\"2010-03-31\"),\n \"start_date\": pandas.to_datetime(\"2010-01-01\"),\n \"value\": 7131000.0,\n \"time_span\": datetime.timedelta(days=90),\n }\n assert_frame_equal(df.head(1), pandas.DataFrame.from_records([records]))\n\n\ndef test_get_data_bigger():\n \"\"\" Test `SimilarWeb Direct Volume` which is a bigger dataset\n \"\"\"\n company = dm.get_company_by_id(335)\n ds = dm.get_datasource_by_id(SIMILARWEB_UUID)\n\n df = ds.get_data(company, end_date=\"2018-01-01\")\n assert_data_frame(df, 7584)\n\n agg = Aggregation(period=\"week\", company=company)\n df = ds.get_data(company, agg, end_date=\"2018-01-01\")\n assert_data_frame(df, 1094)\n\n\ndef test_get_data_estimate():\n \"\"\" Test `Factset Estimates Sales Quarterly` which is a non datamonster data source\n \"\"\"\n\n def assert_estimate_data_frame(df, length):\n assert sorted(df.columns) == [\n \"average\",\n \"currency_code\",\n \"end_date\",\n \"estimate_count\",\n \"high\",\n \"low\",\n \"median\",\n \"section_pk\",\n \"start_date\",\n \"std_dev\",\n \"target_date\",\n ]\n assert sorted(\n df.dtypes.values\n == [\n numpy.dtype(\"float64\"),\n numpy.dtype(\"<M8[ns]\"),\n numpy.dtype(\"int64\"),\n numpy.dtype(\"int64\"),\n numpy.dtype(\"float64\"),\n numpy.dtype(\"float64\"),\n numpy.dtype(\"float64\"),\n numpy.dtype(\"<M8[ns]\"),\n numpy.dtype(\"float64\"),\n numpy.dtype(\"<M8[ns]\"),\n numpy.dtype(\"O\"),\n ]\n )\n assert len(df) == length\n\n estimate = dm.get_datasource_by_id(FACTSET_UUID)\n assert estimate.name == \"FactSet Estimates Sales Quarterly\"\n assert estimate.type == \"Datamonster Estimates\"\n company = dm.get_company_by_id(335)\n\n df = estimate.get_data(company, end_date=\"2018-01-01\")\n assert_estimate_data_frame(df, 4349)\n\n with pytest.raises(DataMonsterError):\n agg = Aggregation(period=\"week\", company=company)\n df = estimate.get_data(company, agg, end_date=\"2018-01-01\")\n"
] |
[
[
"pandas.DataFrame.from_records",
"pandas.to_datetime",
"pandas.Timestamp"
],
[
"pandas.DataFrame.from_records"
],
[
"pandas.DataFrame.from_records",
"pandas.to_datetime",
"numpy.dtype"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
simonvh/fluff
|
[
"f8a5d88421a54ec559d1bac52d60643fb814c6f9"
] |
[
"fluff/track.py"
] |
[
"import os\nimport pyBigWig\nimport re\nimport tempfile\nfrom collections import Counter\nfrom warnings import warn\n\nimport HTSeq\nimport numpy as np\nimport pybedtools\nimport pysam\nfrom scipy.stats import binned_statistic\n\n\nclass SimpleFeature(object):\n def __init__(self, chrom, start, end, value, strand):\n self.chrom = chrom\n self.start = start\n self.end = end\n self.value = value\n self.strand = strand\n\nclass SimpleBed(object):\n \"\"\"\n BED file as a simple iterator\n \"\"\"\n\n def __init__(self, fname):\n self.f = open(fname)\n\n def __iter__(self):\n return self\n\n def __next__(self):\n line = self.f.readline()\n while line and (line[0] == \"#\" or line.startswith(\"track\")):\n line = self.f.readline()\n if line:\n vals = line.strip().split(\"\\t\")\n start, end = int(vals[1]), int(vals[2])\n if len(vals) > 3:\n value = vals[3]\n else:\n value = 0\n if len(vals) > 5:\n if not (vals[5] is '+') or (vals[5] is '-'):\n return SimpleFeature(vals[0], start, end, value, '+')\n else:\n return SimpleFeature(vals[0], start, end, value, vals[5])\n elif len(vals) > 4:\n if not (vals[4] is '+') or (vals[4] is '-'):\n return SimpleFeature(vals[0], start, end, value, '+')\n else:\n return SimpleFeature(vals[0], start, end, value, vals[4])\n else:\n return SimpleFeature(vals[0], start, end, value, '+')\n else:\n self.f.close()\n raise StopIteration\n\n\nclass TrackMeta(type):\n \"\"\" Keep track of \"plugin\" classes. We use of a metaclass is to\n automatically register all subclasses derived from a given base class.\n\n we use __init__ rather than __new__ here because we want\n to modify attributes of the class *after* they have been created\n\n def __init__(cls, name, bases, dct):\n if not hasattr(cls, '_registry'):\n # this is the base class. Create an empty registry\n cls._registry = []\n else:\n # this is a derived class. Add cls to the registry\n cls._registry.append((name, cls))\n\n super(TrackMeta, cls).__init__(name, bases, dct)\n\n see here: https://jakevdp.github.io/blog/2012/12/01/a-primer-on-python-metaclasses/\n \"\"\"\n _registry = []\n _filetypes = []\n\n def __init__(cls, name, bases, attrs):\n cls._registry.append((name, cls))\n\n\n# import six\n# @six.add_metaclass(MetaTrack)\n#class Track(object):\nclass Track(object, metaclass=TrackMeta):\n _registry = []\n _filetypes = []\n track_type = \"profile\"\n\n def __init__(self, fname):\n \"\"\"\n Instance Track class is not a good choice,\n once we have created registry subclass. We use metaclass\n to provide different interface for these following files tyes:\n bam, bed, wig, bg, bw, tabix (bed.gz, bg.gz, wig.gz)\n \"\"\"\n\n raise NotImplementedError(\"please instantiate subclass\")\n\n @classmethod\n def filetypes(cls):\n \"\"\"\n Return all supported filetypes of the subclasses of Track\n\n Returns\n -------\n list\n List of supported filetypes\n \"\"\"\n\n return list(set([ftype for _,t in cls._registry for ftype in t._filetypes]))\n\n def _get_interval(self, interval):\n \"\"\"\n Translate interval to tuple of (chrom, start, end)\n\n Params\n ------\n interval : list, tuple or str\n If interval is a list or tuple, it should contain chromosome (str),\n start (int), end (int). If it is a string, it should be of the\n format chrom:start-end\n\n Returns\n -------\n tuple\n (chrom, start, end)\n\n \"\"\"\n try:\n chrom, start, end = interval\n except Exception:\n chrom, start, end = re.split(r'[:-]', interval)\n start, end = int(start), int(end)\n\n return (chrom, start, end)\n\n @classmethod\n def load(self, fname, *args, **kwargs):\n \"\"\"\n Load a track in one of the following formats:\n bam, bed, wig, bg, bw, tabix (bed.gz, bg.gz, wig.gz)\n The format is guessed by the file extension.\n\n Parameters\n ----------\n fname : str\n Filename\n\n Returns\n -------\n Track object of the specified type\n \"\"\"\n\n _, ftype = os.path.splitext(fname)\n ftype = ftype.strip(\".\")\n for _, cls in self._registry:\n for filetype in cls._filetypes:\n if filetype.endswith(ftype):\n return cls(fname, *args, **kwargs)\n raise ValueError(\"can't guess type of file {}\".format(fname))\n\nclass BinnedMixin(object):\n def binned_stats(self, in_fname, nbins, split=False, **args):\n rpkm = args.get(\"rpkm\", False)\n readlength = self.read_length()\n fragmentsize = self.fragmentsize\n if not fragmentsize:\n fragmentsize = readlength\n total_reads = 1\n if rpkm:\n total_reads = self.count() / 1000000.0\n ret = []\n count = 1\n # Only use a BedTool if really necessary, as BedTools does not close open files\n # on object deletion\n if self.ftype == \"bam\":\n in_track = SimpleBed(in_fname)\n else:\n in_track = pybedtools.BedTool(in_fname)\n\n #extend = fragmentsize - readlength\n for feature, min_strand, plus_strand in self.fetch_to_counts(in_track):\n binsize = (feature.end - feature.start) / float(nbins)\n row = []\n min_strand = [x - (fragmentsize - readlength) for x in min_strand]\n bin_start = feature.start\n while int(bin_start + 0.5) < feature.end:\n num_reads = 0\n i = 0\n c = 0\n while i < len(min_strand) and min_strand[i] <= int(bin_start + binsize + 0.5):\n if min_strand[i] + fragmentsize <= int(bin_start + binsize + 0.5):\n c += 1\n num_reads += 1\n i += 1\n min_strand = min_strand[c:]\n\n i = 0\n c = 0\n while i < len(plus_strand) and plus_strand[i] <= int(bin_start + binsize + 0.5):\n if plus_strand[i] + fragmentsize <= int(bin_start + binsize + 0.5):\n c += 1\n num_reads += 1\n i += 1\n plus_strand = plus_strand[c:]\n\n if rpkm:\n per_kb = num_reads * (1000.0 / binsize)\n row.append(per_kb / total_reads)\n else:\n row.append(num_reads)\n bin_start += binsize\n if feature.strand == \"-\":\n row = row[::-1]\n ret.append([feature.chrom, feature.start, feature.end] + row)\n count += 1\n\n del in_track\n if split:\n return ret\n else:\n return [\"\\t\".join([str(x) for x in r]) for r in ret]\n\nclass BamTrack(BinnedMixin, Track):\n _filetypes = [\"bam\"]\n track_type = \"feature\"\n\n def __init__(self, fname, **kwargs):\n \"\"\"\n Track interface to a BAM file\n\n Parameters\n ----------\n fname: str\n filename of BAM file\n\n fragmentsize : int, optional\n Reads are extended to fragmentsize before summarizing the profile.\n If fragmentsize is None, the read length is used.\n\n rmdup : bool, optional\n Ignore duplicate reads if True, default False\n\n rmrepeats : bool, optional\n Ignore reads with mapping quality 0 (multi-mapped reads) if\n True, default False\n\n \"\"\"\n\n self.rmdup = kwargs.get(\"rmdup\", False)\n self.rmrepeats = kwargs.get(\"rmrepeats\", False)\n self.fragmentsize = kwargs.get(\"fragmentsize\", None)\n\n if fname.split(\".\")[-1] in self._filetypes:\n self.track = pysam.AlignmentFile(fname, \"rb\")\n self.ftype = \"bam\"\n self.chroms = self.track.references\n else:\n raise ValueError(\"filetype of {} is not supported\".format(fname))\n\n def count(self):\n \"\"\"\n Count total number of reads in file\n\n Returns\n -------\n int\n Number of reads\n \"\"\"\n\n if (not self.rmdup and not self.rmrepeats):\n try:\n return self.track.mapped\n except Exception:\n pass\n\n c = 0\n for read in self.track:\n # duplicates\n if (not self.rmdup or not read.flag & 0x0400):\n # multi-mappers / mapping quality 0\n if (not self.rmrepeats) or read.mapq > 0:\n c += 1\n return c\n\n def read_length(self):\n \"\"\"\n Return the read length\n\n This function returns the read length of the first read where it is defined\n\n Returns\n -------\n int\n Read length\n \"\"\"\n it = self.track.head(100)\n lengths = [read.infer_query_length(always=False) for read in it]\n return Counter(lengths).most_common(1)[0][0]\n\n def fetch_to_counts(self, track):\n \"\"\"\n Yields the number of reads for each feature in track\n\n Parameters\n ----------\n track : Object of <TODO: what exactly do we expect?>\n Track with features\n\n Yields\n ------\n tuple\n Return value consists of:\n feature\n number of reads on minus strand\n number of reads on plus strand\n \"\"\"\n for feature in track:\n min_strand = []\n plus_strand = []\n if feature.start < 0:\n feature.start = 0\n if feature.chrom in self.chroms:\n for read in self.track.fetch(feature.chrom, feature.start, feature.end):\n if (not self.rmrepeats) or read.mapq > 0:\n if read.is_reverse:\n min_strand.append(read.pos)\n else:\n plus_strand.append(read.pos)\n\n # Remove duplicates\n if self.rmdup:\n min_strand = sorted(set(min_strand))\n plus_strand = sorted(set(plus_strand))\n else:\n min_strand = sorted(min_strand)\n plus_strand = sorted(plus_strand)\n yield (feature, min_strand, plus_strand)\n\n def fetch_reads(self, args, **kwargs):\n warn(\"fetch_reads is deprecated, please use fetch\", DeprecationWarning)\n\n def fetch(self, interval, strand=None):\n \"\"\"\n Retrieve all reads within a given window\n\n Parameters\n ----------\n window : list, tuple or str\n If window is a list or tuple, it should contain chromosome (str),\n start (int), end (int). If it is a string, it should be of the\n format chrom:start-end\n\n strand : str, optional\n Either '+' or '-'. By default all reads are returned.\n\n Yields\n ------\n AlignedSegment\n Yields pysam AlignedSegment objects.\n \"\"\"\n\n chrom, start, end = self._get_interval(interval)\n\n if chrom in self.chroms:\n for read in self.track.fetch(chrom, start, end):\n # duplicate reads\n if self.rmdup and (read.flag & 1024):\n continue\n # multimappers / low mapping quality\n if self.rmrepeats and read.mapping_quality < 10:\n continue\n if strand:\n if strand == \"+\" and read.is_reverse:\n continue\n elif strand == \"-\" and not read.is_reverse:\n continue\n yield read\n\n def close(self):\n self.track.close()\n\n def get_profile(self, interval, **kwargs):\n \"\"\"\n Return summary profile in a given window\n\n Parameters\n ----------\n interval : list, tuple or str\n If interval is a list or tuple, it should contain chromosome (str),\n start (int), end (int). If it is a string, it should be of the\n format chrom:start-end\n\n scalepm : bool, optional\n Scale profile to per million reads\n\n scalefactor : float, optional\n Scale profile by this factor, default 1.0\n\n Returns\n -------\n numpy array\n A summarized profile as a numpy array\n\n \"\"\"\n scalefactor = kwargs.get(\"scalefactor\", 1.0)\n scalepm = kwargs.get(\"scalepm\", False)\n if scalepm:\n scalefactor = scalefactor * 1e6 / float(self.count())\n\n chrom, start, end = self._get_interval(interval)\n profile = np.zeros(end - start, dtype=\"f\")\n profile.fill(np.nan)\n\n strand = {True: \"-\", False: \"+\"}\n for read in self.fetch(interval):\n iv = HTSeq.GenomicInterval(\n chrom,\n read.reference_start,\n read.reference_end, strand[read.is_reverse]\n )\n if self.fragmentsize:\n iv.length = self.fragmentsize\n region = profile[iv.start - start:iv.end - start]\n region[np.isnan(region)] = 0\n region += 1\n else:\n for blockstart, blockend in read.get_blocks():\n region = profile[blockstart - start:blockend - start]\n region[np.isnan(region)] = 0\n region += 1\n profile = profile * scalefactor\n return profile\n\nclass BedTrack(BinnedMixin, Track):\n _filetypes = [\"bed\"]\n track_type = \"feature\"\n\n def __init__(self, fname, **kwargs):\n \"\"\"\n Parameters\n ----------\n\n fragmentsize : int, optional\n Reads are extended to fragmentsize before summarizing the profile.\n If fragmentsize is None, the read length is used.\n \"\"\"\n\n self.fragmentsize = kwargs.get(\"fragmentsize\", None)\n\n self.track = pybedtools.BedTool(fname)\n self.ftype = \"bed\"\n\n def count(self):\n \"\"\"\n Count total number of features in file\n\n Returns\n -------\n int\n Number of features\n \"\"\"\n\n return self.track.count()\n\n def read_length(self):\n if self.ftype == \"bed\":\n for read in self.track:\n return read.end - read.start\n\n def fetch_to_counts(self, track):\n \"\"\"\n Yields the number of features for each feature in track\n\n Parameters\n ----------\n track : Object of <TODO: what exactly do we expect?>\n Track with features\n\n Yields\n ------\n tuple\n Return value consists of:\n feature\n number of reads on minus strand\n number of reads on plus strand\n \"\"\"\n\n for feature, features in self._get_features_by_feature(track):\n min_strand = []\n plus_strand = []\n\n for f in features:\n if len(f) >= 6 and f[5] == \"-\":\n min_strand.append(int(f[1]))\n else:\n plus_strand.append(int(f[1]))\n yield (feature, min_strand, plus_strand)\n\n def fetch_reads(self, *args, **kwargs):\n warn(\"fetch_reads is deprecated, please use fetch\", DeprecationWarning)\n\n def _get_features_by_feature(self, track_a):\n \"\"\"\n Return overlapping features\n\n Parameters\n ----------\n\n track_a : BedTrack object\n \"\"\"\n\n track_b = self.track\n if track_a.file_type != \"bed\" or track_b.file_type != \"bed\":\n raise ValueError(\"Need BED files\")\n for f in track_a:\n field_len_a = len(f.fields)\n break\n i = track_a.intersect(track_b, wao=True, stream=False)\n with tempfile.NamedTemporaryFile(mode='w+', encoding='utf-8', delete=False, prefix=\"fluff\") as tmp:\n _ = i.saveas(tmp.name)\n tmp.flush()\n last = None\n features = []\n for line in tmp.readlines():\n vals = line.strip().split(\"\\t\")\n if field_len_a >= 6:\n feature = pybedtools.Interval(vals[0], int(vals[1]), int(vals[2]), strand=vals[5])\n else:\n feature = pybedtools.Interval(vals[0], int(vals[1]), int(vals[2]))\n if str(feature) != str(last):\n if len(features) > 0:\n if len(features) == 1 and features[0][1:3] == ['-1', '-1']:\n yield last, []\n else:\n yield last, features\n features = []\n last = feature\n features.append(vals[field_len_a:])\n if len(features) == 1 and features[0][1:3] == ['-1', '-1']:\n yield feature, []\n else:\n yield feature, features\n\n def _interval_bedtool(self, interval, strand=None):\n \"\"\"\n Convert an interval to a BedTool\n\n Parameters\n ----------\n interval : list, tuple or str\n If interval is a list or tuple, it should contain chromosome (str),\n start (int), end (int). If it is a string, it should be of the\n format chrom:start-end\n\n strand : str, optional\n Either '+' or '-'. Default is no strand.\n\n Returns\n -------\n BedTool object\n \"\"\"\n\n chrom, start, end = self._get_interval(interval)\n\n if strand is None:\n strand = \".\"\n\n if strand == \".\":\n feature = pybedtools.BedTool(\n \"{0} {1} {2}\".format(\n chrom, start, end),\n from_string=True)\n else:\n feature = pybedtools.BedTool(\n \"{0} {1} {2} 0 0 {3}\".format(\n chrom, start, end, strand),\n from_string=True)\n return feature\n\n def fetch(self, interval, strand=None):\n \"\"\"\n Retrieve all reads within a given window\n\n Parameters\n ----------\n interval : list, tuple or str\n If interval is a list or tuple, it should contain chromosome (str),\n start (int), end (int). If it is a string, it should be of the\n format chrom:start-end\n\n strand : str, optional\n Either '+' or '-'. By default all reads are returned.\n\n Yields\n ------\n GenomicInterval\n Yields HTSeq GenomicInterval objects.\n \"\"\"\n\n feature = self._interval_bedtool(interval, strand=strand)\n chrom, start, end = self._get_interval(interval)\n for read in self.track.intersect(feature, u=True, stream=True, s=strand in [\"+\", \"-\"]):\n yield HTSeq.GenomicInterval(\n chrom,\n read.start,\n read.end,\n str(read.strand))\n\n def close(self):\n pass\n\n def get_profile(self, interval, **kwargs):\n \"\"\"\n Return summary profile in a given window\n\n Parameters\n ----------\n interval : list, tuple or str\n If interval is a list or tuple, it should contain chromosome (str),\n start (int), end (int). If it is a string, it should be of the\n format chrom:start-end\n\n scalepm : bool, optional\n Scale profile to per million reads\n\n scalefactor : float, optional\n Scale profile by this factor, default 1.0\n\n Returns\n -------\n numpy array\n A summarized profile as a numpy array\n\n \"\"\"\n scalefactor = kwargs.get(\"scalefactor\", 1.0)\n scalepm = kwargs.get(\"scalepm\", False)\n if scalepm:\n scalefactor = scalefactor * 1e6 / float(self.count())\n\n chrom, start, end = self._get_interval(interval)\n profile = np.zeros(end - start, dtype=\"f\")\n profile.fill(np.nan)\n\n for f in self.fetch(interval):\n iv = HTSeq.GenomicInterval(\n chrom,\n f.start,\n f.end,\n f.strand\n )\n if self.fragmentsize:\n iv.length = self.fragmentsize\n region = profile[iv.start - start:iv.end - start]\n region[np.isnan(region)] = 0\n region += 1\n\n return profile\n\nclass WigTrack(Track):\n _filetypes = [\"bg\", \"wig\", \"bdg\", \"bedGraph\"]\n\n def __init__(self, fname, **kwargs):\n self.fname = fname\n\n if fname.split(\".\")[-1] in self._filetypes:\n self.track = pybedtools.BedTool(fname)\n self.ftype = \"wig\"\n else:\n raise ValueError(\"filetype of {} is not supported\".format(fname))\n\n def get_profile(self, interval, **kwargs):\n \"\"\"\n Return summary profile in a given window\n\n Parameters\n ----------\n interval : list, tuple or str\n If interval is a list or tuple, it should contain chromosome (str),\n start (int), end (int). If it is a string, it should be of the\n format chrom:start-end\n\n scalefactor : float, optional\n Scale profile by this factor, default 1.0\n\n Returns\n -------\n numpy array\n A summarized profile as a numpy array\n \"\"\"\n scalefactor = kwargs.get(\"scalefactor\", 1.0)\n\n chrom, start, end = self._get_interval(interval)\n int_bed = pybedtools.BedTool(\n \"{} {} {}\".format(chrom, start, end),\n from_string=True)\n\n profile = np.zeros(end - start, dtype=\"f\")\n profile.fill(np.nan)\n\n for f in self.track.intersect(int_bed, u=True):\n if f.chrom == chrom:\n if f.start <= end and f.end >= start:\n if f.start < start:\n f.start = start\n if f.end > end:\n f.end = end\n # in a wig file, 4th column is score\n profile[f.start - start:f.end - start] = float(f.name)\n\n profile = profile * scalefactor\n return profile\n\n def binned_stats(self, in_fname, nbins, split=False, **args):\n \"\"\"\n Yields a binned statistic applied to the track values for\n every feature in in_fname.\n\n Parameters\n ----------\n in_fname : str\n BED file\n\n nbins : int\n number of bins\n\n statistic : str, optional\n Default is \"mean\", other options are \"min\", \"max\" and \"std\"\n \"\"\"\n\n in_track = pybedtools.BedTool(in_fname)\n\n statistic = args.get(\"statistic\", \"mean\")\n if statistic in [\"min\", \"max\", \"std\"]:\n statistic = eval(statistic)\n\n order = {}\n regions = []\n lens = []\n for i, f in enumerate(in_track):\n region = \"{}:{}-{}\".format(f.chrom, f.start, f.end)\n regions.append([f.chrom, f.start, f.end])\n order[region] = i\n lens.append(f.end - f.start)\n max_len = max(lens)\n\n profile = np.zeros((len(regions), max_len))\n for f in self.track.intersect(in_track, wo=True):\n start, end = [int(x) for x in f.fields[5:7]]\n region = \"{}:{}-{}\".format(*f.fields[4:7])\n pos = order[region]\n\n f_start, f_end = int(f[1]), int(f[2])\n\n if f_start < start:\n f_start = start\n if f_end > end:\n f_end = end\n\n profile[pos][f_start - start: f_end - start] = float(f[3])\n\n for l,region,row in zip(lens, regions, profile):\n h,_,_ = binned_statistic(np.arange(l), row, bins=nbins, statistic=statistic)\n yield region + list(h)\n\n\nclass BigWigTrack(Track):\n _filetypes = [\"bw\", \"bigWig\"]\n\n def __init__(self, fname, **kwargs):\n if fname.split(\".\")[-1] in self._filetypes:\n self.track = pyBigWig.open(fname)\n self.ftype = \"bw\"\n else:\n raise ValueError(\"filetype of {} is not supported\".format(fname))\n\n def get_profile(self, interval, **kwargs):\n \"\"\"\n Return summary profile in a given window\n\n Parameters\n ----------\n interval : list, tuple or str\n If interval is a list or tuple, it should contain chromosome (str),\n start (int), end (int). If it is a string, it should be of the\n format chrom:start-end\n\n scalefactor : float, optional\n Scale profile by this factor, default 1.0\n\n Returns\n -------\n numpy array\n A summarized profile as a numpy array\n \"\"\"\n scalefactor = kwargs.get(\"scalefactor\", 1.0)\n\n chrom, start, end = self._get_interval(interval)\n profile = np.array(self.track.values(chrom, start, end))\n profile = profile * scalefactor\n return profile\n\n def binned_stats(self, in_fname, nbins, split=False, **args):\n \"\"\"\n Yields a binned statistic applied to the track values for\n every feature in in_fname.\n\n Parameters\n ----------\n in_fname : str\n BED file\n\n nbins : int\n number of bins\n\n statistic : str, optional\n Default is \"mean\", other options are \"min\", \"max\" and \"std\"\n \"\"\"\n #qiuck hack\n # fixed nbins is not int type, need to find where is the problem\n nbins = int(nbins)\n statistic = args.get(\"statistic\", \"mean\")\n use_strand = args.get(\"use_strand\", False)\n in_track = SimpleBed(in_fname)\n for f in in_track:\n try:\n vals = self.track.stats(f.chrom, f.start, f.end,\n type=statistic, nBins=nbins)\n vals = np.array(vals, dtype=\"float\")\n vals = np.nan_to_num(vals)\n if use_strand and f.strand == \"-\":\n vals = vals[::-1]\n yield [f.chrom, f.start, f.end] + list(vals)\n except:\n yield [f.chrom, f.start, f.end] + [0.0] * nbins\n\nclass TabixTrack(Track):\n _filetypes = [\"bg.gz\", \"wig.gz\", \"bed.gz\",\n \"bedGraph.gz\", \"bigWig.gz\", \"bdg.gz\"]\n\n def __init__(self, fname, **kwargs):\n if fname.endswith(\"gz\"):\n if not os.path.exists(fname + \".tbi\"):\n raise ValueError(\"Can't find tabix index for {}\".format(fname))\n for ftype in self._filetypes:\n if fname.endswith(ftype):\n self.tabix_track = pysam.Tabixfile(fname)\n self.ftype = \"tabix\"\n return\n raise ValueError(\"Can't guess format of {}\".format(fname))\n else:\n raise ValueError(\"Can only process bgzipped files.\")\n\n def get_profile(self, interval, **kwargs):\n \"\"\"\n Return summary profile in a given window\n\n Parameters\n ----------\n interval : list, tuple or str\n If interval is a list or tuple, it should contain chromosome (str),\n start (int), end (int). If it is a string, it should be of the\n format chrom:start-end\n\n scalefactor : float, optional\n Scale profile by this factor, default 1.0\n\n Returns\n -------\n numpy array\n A summarized profile as a numpy array\n \"\"\"\n scalefactor = kwargs.get(\"scalefactor\", 1.0)\n\n chrom, start, end = self._get_interval(interval)\n profile = np.zeros(end - start)\n\n # Chromosome not in index\n if chrom not in self.tabix_track.contigs:\n return profile\n\n profile.fill(np.nan)\n\n for f in self.tabix_track.fetch(chrom, start, end):\n f = f.split()\n fstart = int(f[1])\n fend = int(f[2])\n if fstart < start:\n fstart = start\n if fend > end:\n fend = end\n profile[fstart - start: fend - end] = float(f[3])\n\n profile = profile * scalefactor\n return profile\n\n def binned_stats(self, in_fname, nbins, split=False, **args):\n \"\"\"\n Yields a binned statistic applied to the track values for\n every feature in in_fname.\n\n Parameters\n ----------\n in_fname : str\n BED file\n\n nbins : int\n number of bins\n\n statistic : str, optional\n Default is \"mean\", other options are \"min\", \"max\" and \"std\"\n \"\"\"\n\n statistic = args.get(\"statistic\", \"mean\")\n in_track = SimpleBed(in_fname)\n\n if statistic in [\"min\", \"max\", \"std\"]:\n statistic = eval(statistic)\n\n for r in in_track:\n profile = np.zeros(r.end - r.start)\n if r.chrom in self.tabix_track.contigs:\n for f in self.tabix_track.fetch(r.chrom, r.start, r.end):\n f = f.split()\n start = int(f[1])\n end = int(f[2])\n if start < r.start:\n start = r.start\n if end > r.end:\n end = r.end\n profile[start - r.start: end - r.end] = float(f[3])\n h,_,_ = binned_statistic(\n np.arange(r.end - r.start),\n profile,\n bins=nbins,\n statistic=statistic)\n yield [r.chrom, r.start, r.end] + list(h)\n"
] |
[
[
"numpy.isnan",
"numpy.arange",
"numpy.nan_to_num",
"numpy.array",
"numpy.zeros"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
leoking99-BIT/Constrained_ILQR
|
[
"08346c0aa9eeb035ae6e3d6643ac9c119cb893d2"
] |
[
"scripts/ilqr/obstacles.py"
] |
[
"import numpy as np \nimport math\nimport pdb\n\nclass Obstacle:\n def __init__(self, args, track_id, bb):\n self.args = args\n self.car_length = bb[0]\n self.car_width = bb[1]\n self.track_id = track_id\n\n def get_obstacle_cost_derivatives(self, npc_traj, i, ego_state):\n\n a = self.car_length + np.abs(npc_traj[2, i]*math.cos(npc_traj[3, i]))*self.args.t_safe + self.args.s_safe_a + self.args.ego_rad\n b = self.car_width + np.abs(npc_traj[2, i]*math.sin(npc_traj[3, i]))*self.args.t_safe + self.args.s_safe_b + self.args.ego_rad\n \n P1 = np.diag([1/a**2, 1/b**2, 0, 0])\n\n theta = npc_traj[3, i]\n theta_ego = ego_state[3]\n\n transformation_matrix = np.array([[ math.cos(theta), math.sin(theta), 0, 0],\n [-math.sin(theta), math.cos(theta), 0, 0],\n [ 0, 0, 0, 0],\n [ 0, 0, 0, 0]])\n \n ego_front = ego_state + np.array([math.cos(theta_ego)*self.args.ego_lf, math.sin(theta_ego)*self.args.ego_lf, 0, 0])\n diff = (transformation_matrix @ (ego_front - npc_traj[:, i])).reshape(-1, 1) # (x- xo)\n c = 1 - diff.T @ P1 @ diff # Transform into a constraint function\n c_dot = -2 * P1 @ diff\n b_f, b_dot_f, b_ddot_f = self.barrier_function(self.args.q1_front, self.args.q2_front, c, c_dot)\n\n ego_rear = ego_state - np.array([math.cos(theta_ego)*self.args.ego_lr, math.sin(theta_ego)*self.args.ego_lr, 0, 0])\n diff = (transformation_matrix @ (ego_rear - npc_traj[:, i])).reshape(-1, 1)\n c = 1 - diff.T @ P1 @ diff\n c_dot = -2 * P1 @ diff\n b_r, b_dot_r, b_ddot_r = self.barrier_function(self.args.q1_rear, self.args.q2_rear, c, c_dot)\n\n return b_dot_f + b_dot_r, b_ddot_f + b_ddot_r\n\n def get_obstacle_cost(self, npc_traj, i, ego_state_nominal, ego_state):\n a = self.car_length + np.abs(npc_traj[2, i]*math.cos(npc_traj[3, i]))*self.args.t_safe + self.args.s_safe_a + self.args.ego_rad\n b = self.car_width + np.abs(npc_traj[2, i]*math.sin(npc_traj[3, i]))*self.args.t_safe + self.args.s_safe_b + self.args.ego_rad\n \n P1 = np.diag([1/a**2, 1/b**2, 0, 0])\n\n theta = npc_traj[3, i]\n theta_ego = ego_state[3]\n theta_ego_nominal = ego_state_nominal[3]\n\n\n transformation_matrix = np.array([[ math.cos(theta), math.sin(theta), 0, 0],\n [-math.sin(theta), math.cos(theta), 0, 0],\n [ 0, 0, 0, 0],\n [ 0, 0, 0, 0]])\n \n # front circle\n ego_front_nominal = ego_state_nominal + np.array([math.cos(theta_ego)*self.args.ego_lf, math.sin(theta_ego)*self.args.ego_lf, 0, 0])\n ego_front = ego_state + np.array([math.cos(theta_ego_nominal)*self.args.ego_lf, math.sin(theta_ego_nominal)*self.args.ego_lf, 0, 0])\n\n x_del = ego_front - ego_front_nominal\n\n diff = (transformation_matrix @ (ego_front_nominal - npc_traj[:, i])).reshape(-1, 1)\n c = 1 - diff.T @ P1 @ diff\n c_dot = -2 * P1 @ diff\n b_f, b_dot_f, b_ddot_f = self.barrier_function(self.args.q1_front, self.args.q2_front, c, c_dot)\n\n cost = b_f + x_del.T @ b_dot_f + x_del.T @ b_ddot_f @ x_del \n\n # rear circle\n ego_rear_nominal = ego_state_nominal - np.array([math.cos(theta_ego)*self.args.ego_lr, math.sin(theta_ego)*self.args.ego_lr, 0, 0])\n ego_rear = ego_state - np.array([math.cos(theta_ego_nominal)*self.args.ego_lr, math.sin(theta_ego_nominal)*self.args.ego_lr, 0, 0])\n\n x_del = ego_rear - ego_rear_nominal\n\n diff = (transformation_matrix @ (ego_rear_normalized - npc_traj[:, i])).reshape(-1, 1)\n c = 1 - diff.T @ P1 @ diff\n c_dot = -2 * P1 @ diff\n b_r, b_dot_r, b_ddot_r = self.barrier_function(self.args.q1_rear, self.args.q2_rear, c, c_dot)\n\n cost += b_r + x_del.T @ b_dot_r + x_del.T @ b_ddot_r @ x_del \n\n return cost\n\n def barrier_function(self, q1, q2, c, c_dot):\n b = q1*np.exp(q2*c)\n b_dot = q1*q2*np.exp(q2*c)*c_dot\n b_ddot = q1*(q2**2)*np.exp(q2*c)*np.matmul(c_dot, c_dot.T)\n\n return b, b_dot, b_ddot\n"
] |
[
[
"numpy.diag",
"numpy.exp",
"numpy.matmul"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ShintaroMinami/mican
|
[
"c224975a3ac3766ae82c0c250022ad7b2f2573c1"
] |
[
"pymican/main.py"
] |
[
"import os\nimport subprocess\nfrom typing import List, Union\nimport numpy as np\nimport pandas as pd\nfrom .parse_result import output2dict\n\n\ndir_script = os.path.dirname(os.path.realpath(__file__))\nBINFILEPATH = os.path.abspath(dir_script+'/bin/mican')\n\n\nclass Alignment:\n \"\"\"\n MICAN alignment class\n\n Attributes\n ----------\n outdict : dict\n Alignment info\n mode : str\n Alignment mode\n pdb1, pdb2 : str\n PDB file path\n size1, size2 : int\n Size of protein structure\n nalign : int\n Number of aligned residues\n rmsd : float\n RMSD of aligned residues\n TMscore : float\n TM-score\n sTMscore : float\n SSE weighted TM-score\n seq_identity : float\n Sequence identity as percentage [0,100]\n DALIscore : float\n DALI z-score\n SPscore : float\n SP-score\n TMscore1, TMscore2 : float\n TM-score normalized by each protein length\n coverage1, coverage2 : float\n Aligned coverage for each protein length\n translation_rot : numpy.array(3,3)\n Rotation matrix for superposition protein1 on protein2\n translation_vec : numpy.array(3)\n Translation vector for superposition protein1 on protein2\n alignment : pandas.DataFrame\n Residue-Residue alignment info\n alignlst : List[pandas.item]\n Alignment info for iterator methods\n\n Methods\n -------\n translate_xyz(xyz: np.array(N,3)) -> np.array(N,3)\n Rotate & translate xyz coordinates\n \"\"\"\n\n def __init__(self, outdict: dict):\n self.outdict = outdict\n self.mode = outdict['mode']\n self.pdb1 = outdict['pdb1']\n self.size1 = outdict['size1']\n self.pdb2 = outdict['pdb2']\n self.size2 = outdict['size2']\n self.nalign = outdict['nalign']\n self.rmsd = outdict['rmsd']\n self.TMscore = outdict['TMscore']\n self.sTMscore = outdict['sTMscore']\n self.seq_identity = outdict['seq_identity']\n self.DALIscore = outdict['DALIscore']\n self.SPscore = outdict['SPscore']\n self.TMscore1 = outdict['TMscore1']\n self.coverage1 = outdict['coverage1']\n self.TMscore2 = outdict['TMscore2']\n self.coverage2 = outdict['coverage2']\n self.translation_rot = outdict['translation_rot']\n self.translation_vec = outdict['translation_vec']\n self.alignment = outdict['alignment']\n self.alignlist = list(self.alignment.itertuples())\n\n def __iter__(self):\n return iter(self.alignlist)\n\n def __str__(self):\n return '{}'.format(self.outdict)\n\n def __getitem__(self, key):\n if type(key) == str:\n return self.outdict.get(key)\n elif type(key) == int:\n return self.alignlist[key]\n elif type(key) == slice:\n return self.alignlist[key]\n\n def keys(self):\n return self.outdict.keys()\n\n def translate_xyz(self, xyz: Union[np.ndarray, List[float]])->np.ndarray:\n \"\"\"\n Translate coordinates\n\n Paremeters\n ----------\n xyz : (np.array(3) | np.array(N,3) | List[float])\n Input xyz coordinates\n \n Returns\n -------\n np.array\n Translated xyz coordinates\n \"\"\"\n # list to np.array\n xyz = np.array(xyz, dtype=np.float) if type(xyz) == list else xyz\n # save original shape\n original_shape = xyz.shape\n # add dimention 1 if ndim==1\n xyz = xyz[np.newaxis,:] if xyz.ndim == 1 else xyz\n # translate\n xyz_translated = np.dot(xyz,self.translation_rot.T) + self.translation_vec\n # return\n return np.reshape(xyz_translated, original_shape)\n\n\nclass mican:\n \"\"\"\n MICAN: non-sequential alignment algorithm\n\n Attributes\n ----------\n binary : Filename\n executable binary file path\n \"\"\"\n def __init__(self, binary: str=BINFILEPATH):\n \"\"\"\n Parameters\n ----------\n binary : Filename\n executable binary file path\n \"\"\"\n self.binary = binary\n return\n\n def align(self, pdb1: str, pdb2: str, options: Union[str, List[str]]=[], return_dict: int=False)->Union[Alignment, dict]:\n \"\"\"\n Alignment calculation\n\n Paremeters\n ----------\n pdb1, pdb2 : str\n Input PDB files.\n options : (str | [str,...]), default=[]\n Extra potions for mican calculation.\n For the option details please see (https://github.com/ShintaroMinami/mican).\n return_dict : bool, defualt=False\n Simple dict return.\n\n Returns\n -------\n Alignment object\n\n if (return_dict == True)\n dict = {\n 'mode': ('sequential', 'rewirering', 'reverse'),\n 'pdb1': string,\n 'size1': int,\n 'pdb2': string,\n 'size2': int,\n 'nalign': int,\n 'rmsd': float,\n 'TMscore': float,\n 'sTMscore': float,\n 'seq_identity': float,\n 'DALIscore': float,\n 'SPscore': float,\n 'TMscore1': float, # TMscore normalized by size of protein1\n 'coverage1': float,\n 'TMscore2': float, # TMscore normalized by size of protein2\n 'coverage2': float,\n 'translation_rot': numpy.array(3,3),\n 'translation_vec': numpy.array(3),\n 'alignment': pandas.DataFrame\n }\n \"\"\"\n # arguments\n options = options if type(options) == str else ' '.join(options)\n args = [pdb1, pdb2] + options.split()\n # mican calc\n process = subprocess.Popen([self.binary,'-z']+args,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n # output\n output, _ = process.communicate()\n outdict = output2dict(output.decode('utf-8'))\n # return\n return outdict if return_dict else Alignment(outdict)\n\n"
] |
[
[
"numpy.reshape",
"numpy.dot",
"numpy.array"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
wconnell/siamese-triplet
|
[
"54296bac5bdd861dc4f43c37a5024de8d285afaa"
] |
[
"trainer.py"
] |
[
"import torch\nimport numpy as np\n\n\ndef fit(train_loader, val_loader, model, loss_fn, optimizer, scheduler, n_epochs, cuda, log_interval, metrics=[],\n start_epoch=0):\n \"\"\"\n Loaders, model, loss function and metrics should work together for a given task,\n i.e. The model should be able to process data output of loaders,\n loss function should process target output of loaders and outputs from the model\n\n Examples: Classification: batch loader, classification model, NLL loss, accuracy metric\n Siamese network: Siamese loader, siamese model, contrastive loss\n Online triplet learning: batch loader, embedding model, online triplet loss\n \"\"\"\n train_loss_total = []\n val_loss_total = []\n \n for epoch in range(0, start_epoch):\n scheduler.step()\n\n for epoch in range(start_epoch, n_epochs):\n\n # Train stage\n train_loss, metrics = train_epoch(train_loader, model, loss_fn, optimizer, cuda, log_interval, metrics)\n train_loss_total.append(train_loss)\n\n message = 'Epoch: {}/{}. Train set: Average loss: {:.4f}'.format(epoch + 1, n_epochs, train_loss)\n for metric in metrics:\n message += '\\t{}: {}'.format(metric.name(), metric.value())\n\n val_loss, metrics = test_epoch(val_loader, model, loss_fn, cuda, metrics)\n val_loss /= len(val_loader)\n val_loss_total.append(val_loss)\n\n message += '\\nEpoch: {}/{}. Validation set: Average loss: {:.4f}'.format(epoch + 1, n_epochs,\n val_loss)\n for metric in metrics:\n message += '\\t{}: {}'.format(metric.name(), metric.value())\n\n scheduler.step()\n\n print(message)\n \n return train_loss_total, val_loss_total\n\n\ndef train_epoch(train_loader, model, loss_fn, optimizer, cuda, log_interval, metrics):\n device = next(model.parameters()).device\n for metric in metrics:\n metric.reset()\n \n model.train()\n losses = []\n total_loss = 0\n\n for batch_idx, (data, target) in enumerate(train_loader):\n target = target if len(target) > 0 else None\n if not type(data) in (tuple, list):\n data = (data,)\n if cuda:\n data = tuple(d.cuda(device=device.index) for d in data)\n if target is not None:\n target = target.cuda(device=device.index)\n\n optimizer.zero_grad()\n outputs = model(*data)\n \n if type(outputs) not in (tuple, list):\n outputs = (outputs,)\n\n loss_inputs = outputs\n if target is not None:\n target = (target,)\n loss_inputs += target\n\n loss_outputs = loss_fn(*loss_inputs)\n loss = loss_outputs[0] if type(loss_outputs) in (tuple, list) else loss_outputs\n losses.append(loss.item())\n total_loss += loss.item()\n loss.backward()\n optimizer.step()\n\n for metric in metrics:\n metric(outputs, target, loss_outputs)\n\n if batch_idx % log_interval == 0:\n message = 'Train: [{}/{} ({:.0f}%)]\\tLoss: {:.6f}'.format(\n batch_idx * len(data[0]), len(train_loader.dataset),\n 100. * batch_idx / len(train_loader), np.mean(losses))\n for metric in metrics:\n message += '\\t{}: {}'.format(metric.name(), metric.value())\n\n print(message)\n losses = []\n\n total_loss /= (batch_idx + 1)\n# torch.cuda.empty_cache()\n return total_loss, metrics\n\n\ndef test_epoch(val_loader, model, loss_fn, cuda, metrics):\n device = next(model.parameters()).device\n with torch.no_grad():\n for metric in metrics:\n metric.reset()\n model.eval()\n val_loss = 0\n for batch_idx, (data, target) in enumerate(val_loader):\n target = target if len(target) > 0 else None\n if not type(data) in (tuple, list):\n data = (data,)\n if cuda:\n data = tuple(d.cuda(device=device.index) for d in data)\n if target is not None:\n target = target.cuda(device=device.index)\n\n outputs = model(*data)\n \n if type(outputs) not in (tuple, list):\n outputs = (outputs,)\n loss_inputs = outputs\n if target is not None:\n target = (target,)\n loss_inputs += target\n\n loss_outputs = loss_fn(*loss_inputs)\n loss = loss_outputs[0] if type(loss_outputs) in (tuple, list) else loss_outputs\n val_loss += loss.item()\n\n for metric in metrics:\n metric(outputs, target, loss_outputs)\n\n return val_loss, metrics\n"
] |
[
[
"torch.no_grad",
"numpy.mean"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
00-01/gap_sdk
|
[
"25444d752b26ccf0b848301c381692d77172852c",
"25444d752b26ccf0b848301c381692d77172852c",
"25444d752b26ccf0b848301c381692d77172852c",
"25444d752b26ccf0b848301c381692d77172852c"
] |
[
"tools/nntool/importer/onnx/handlers/backend/slice.py",
"examples/nntool/mnist_rnn/model/save_samples.py",
"tools/nntool/utils/ssd_postprocess_decoder.py",
"tools/nntool/importer/onnx/handlers/backend/tile.py"
] |
[
"# Copyright (C) 2020 GreenWaves Technologies, SAS\n\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as\n# published by the Free Software Foundation, either version 3 of the\n# License, or (at your option) any later version.\n\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n\n# You should have received a copy of the GNU Affero General Public License\n# along with this program. If not, see <https://www.gnu.org/licenses/>.\n\nimport math\nimport sys\n\nimport numpy as np\nfrom graph.dim import Dim\nfrom graph.types import ConstantInputParameters, NNEdge, StridedSliceParameters\nfrom importer.common.constant_mixin import ConstantMixin\nfrom importer.common.provisional_dim import ProvisionalDim\nfrom importer.onnx.common import logger\n\nfrom ..backend_handler import BackendHandler\nfrom ..handler import onnx_op, partial_support, ps_description\n\n\n@onnx_op(\"Slice\")\n@partial_support(True)\n@ps_description(\"Supports only constant inputs for starts, ends, axes and steps\")\nclass Slice(ConstantMixin, BackendHandler):\n\n @classmethod\n def _common(cls, node, starts, ends, axes, steps, **kwargs):\n all_nodes = kwargs['all_nodes']\n G = kwargs['G']\n valid_name = kwargs['valid_name']\n x = all_nodes[node.input[0]]\n x_shape = np.array(x[2].shape)\n x_rank = len(x_shape)\n axes = cls._resolve_negative_ranks(axes, len(x_shape)) if axes else tuple(range(x_rank))\n axes_rank = len(axes)\n steps = steps if steps else [1] * axes_rank\n slices = np.stack([starts, ends, steps]).transpose((1, 0))\n p_slices = []\n p_shape = []\n for idx, dim in enumerate(x_shape):\n try:\n if dim is None:\n p_slices.append(None)\n p_shape.append(None)\n else:\n slice_idx = axes.index(idx) # @IgnoreException\n begin, end, step = slices[slice_idx]\n begin = max(min(begin if begin >= 0 else dim + begin, dim), 0)\n end = max(min(end if end >= 0 else dim + end, dim), -1)\n # -sys.maxsize is used to indicate 0 in the reverse slice direction\n # this makes it compatible with the numpy slice\n p_slices.append((begin, -sys.maxsize if end == -1 else end, step))\n if step < 0:\n p_shape.append(math.ceil((begin - end)/-step))\n else:\n p_shape.append(math.ceil((end - begin)/step))\n\n except ValueError:\n p_slices.append((0, dim, 1))\n p_shape.append(dim)\n slices = cls._get_real_dim(p_slices)\n shape = cls._get_real_dim(p_shape)\n\n params = StridedSliceParameters(\n valid_name, act_slice=slices, out_shape=shape)\n if cls.is_constant(x):\n x_val = cls.get_constant(x)\n x_val = params.numpy_slice(x_val)\n if x_val.size < 10:\n logger.info(\"reducing %s to a constant %s\", valid_name, x_val)\n else:\n logger.info(\"reducing %s to a constant\", valid_name)\n params = ConstantInputParameters(valid_name, dims=Dim.unnamed(x_val.shape), value=x_val)\n else:\n G.add_edge(NNEdge(from_node=x[0], to_node=params, from_idx=x[1], to_idx=0))\n all_nodes[node.output[0]] = (params, 0, ProvisionalDim(p_shape), None)\n return params\n\n @classmethod\n def _pre10(cls, node, **kwargs):\n starts = node.attrs['starts']\n ends = node.attrs['ends']\n axes = node.attrs['axes'] if 'axes' in node.attrs else None\n return cls._common(node, starts, ends, axes, None, **kwargs)\n\n @classmethod\n def _post10(cls, node, **kwargs):\n all_nodes = kwargs['all_nodes']\n inputs = [all_nodes[inp] for inp in node.input]\n\n starts = tuple(cls.get_constant(inputs[1]))\n ends = tuple(cls.get_constant(inputs[2]))\n axes = tuple(cls.get_constant(inputs[3])) if len(inputs) >= 4 and inputs[3] else None\n steps = tuple(cls.get_constant(inputs[4])) if len(inputs) >= 5 and inputs[4] else None\n\n return cls._common(node, starts, ends, axes, steps, **kwargs)\n\n @classmethod\n def version_1(cls, node, **kwargs):\n return cls._pre10(node, **kwargs)\n\n @classmethod\n def version_10(cls, node, **kwargs):\n return cls._post10(node, **kwargs)\n\n @classmethod\n def version_11(cls, node, **kwargs):\n return cls._post10(node, **kwargs)\n\n @classmethod\n def version_13(cls, node, **kwargs):\n return cls._post10(node, **kwargs)\n",
"#!/usr/bin/env python3\n# PYTHON_ARGCOMPLETE_OK\n#\n\nimport argparse\nimport os\nimport random\nimport importlib\n\nimport argcomplete\nimport numpy as np\nimport tensorflow.keras.preprocessing.image as image\nimport tensorflow.keras.datasets as datasets\n\ndef create_parser():\n # create the top-level parser\n parser = argparse.ArgumentParser(prog='save_samples')\n parser.add_argument('-d', '--directory',\n default=\"sample_project/images\",\n help='directory to save samples to')\n parser.add_argument('-n', '--number_of_examples',\n default=1,\n type=int,\n help='number of examples per number')\n parser.add_argument('-s', '--set',\n help='train or test image set',\n choices=['train', 'test'],\n default='test')\n return parser\n\ndef write_image(directory, num, img_idx, sample):\n sample = np.expand_dims(sample, axis=-1)\n image.save_img(os.path.join(directory, \"{}_{}.pgm\".format(img_idx, num)), sample)\n\ndef save_samples(per_number, directory, use_set='test'):\n (x_train, y_train), (x_test, y_test) = datasets.mnist.load_data()\n (x, y) = (x_train, y_train) if use_set == 'train' else (x_test, y_test)\n index = {k: [] for k in range(10)}\n for idx, value in enumerate(y):\n index[value].append(idx)\n random.seed()\n for idx in range(10):\n sample_set = index[idx]\n for _ in range(per_number):\n img_idx = random.randrange(0, len(sample_set))\n write_image(directory, idx, sample_set[img_idx], x[sample_set[img_idx]])\n del sample_set[img_idx]\n\ndef main():\n parser = create_parser()\n argcomplete.autocomplete(parser)\n args = parser.parse_args()\n\n save_samples(args.number_of_examples, args.directory, args.set)\n\nif __name__ == \"__main__\":\n main()\n\n",
"# Copyright (C) 2020 GreenWaves Technologies, SAS\n\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as\n# published by the Free Software Foundation, either version 3 of the\n# License, or (at your option) any later version.\n\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n\n# You should have received a copy of the GNU Affero General Public License\n# along with this program. If not, see <https://www.gnu.org/licenses/>.\n\n# from . import get_ModelInfo_FromPipeline\nimport copy\nimport os\nimport json\nimport numpy as np\n\nclass ValException(Exception):\n pass\n\nclass DecodeBboxes(object):\n '''\n decode anchor boxes using the offsets estimated by ssd model. The class\n requires 4 parameters as x_scale, y_scale, w_scale, and h_scale. These\n four parameters are realated to the variances in the original ssd paper:\n\n _var = 1./_scale\n\n inputs : offsets and default anchors in one of the following format\n decoder_configuration = {'using_json_config':{'INCLUDE':False, 'json_config_path':post_processing_json_path},\\\n 'using_pipeline_config':{'INCLUDE':False, 'pipeline_config_path':pipeline_config_path},\\\n 'using_params':{'INCLUDE':True, 'params':{'x_scale':10.,'y_scale':10.,\n 'height_scale':5., 'width_scale':5.}}}\n outputs: decoded anchors\n '''\n def __init__(self, decoder_configuration):\n # pylint: disable=no-member\n # all allowed keys will be initialized as class attributes\n allowed_keys = set(['using_json_config', 'using_pipeline_config', 'using_params'])\n # initialize all allowed keys to false\n self.__dict__.update((key, None) for key in allowed_keys)\n # and update the given keys by their given values\n self.__dict__.update((key, value) for key, value in decoder_configuration.items() if key in allowed_keys)\n\n if self.using_json_config['INCLUDE']:\n json_config_path = self.using_json_config['json_config_path']\n if not os.path.isfile(json_config_path):\n raise ValException('\\n\\n the path to the json file is not valid.')\n with open(json_config_path) as json_file:\n json_data = json.load(json_file)\n # x_scale, y_scale, h_scale, w_scale\n if isinstance(json_data['x_scale'], (float, int)) and json_data['x_scale'] > 0:\n self.x_scale = json_data['x_scale']\n else:\n raise ValException('\\n\\n no valid x_scale is json file, shoulde be postive float/integer ...')\n if isinstance(json_data['y_scale'], (float, int)) and json_data['y_scale'] > 0:\n self.y_scale = json_data['y_scale']\n else:\n raise ValException('\\n\\n no valid y_scale is json file, shoulde be postive float/integer ...')\n if isinstance(json_data['h_scale'], (float, int)) and json_data['h_scale'] > 0:\n self.height_scale = json_data['h_scale']\n else:\n raise ValException('\\n\\n no valid h_scale is json file, shoulde be postive float/integer ...')\n if isinstance(json_data['w_scale'], (float, int)) and json_data['w_scale'] > 0:\n self.width_scale = json_data['w_scale']\n else:\n raise ValException('\\n\\n no valid w_scale is json file, shoulde be postive float/integer ...')\n elif self.using_pipeline_config['INCLUDE']:\n raise ValException('Need to import object_detection from tf')\n # pipeline_config_path = self.using_pipeline_config['pipeline_config_path']\n # if not os.path.isfile(pipeline_config_path):\n # raise ValException('\\n\\n the path for pipeline configuration dose not exist...')\n # pipeline_info = get_ModelInfo_FromPipeline(pipeline_config_path).pipeline_config\n # box_coder = pipeline_info.model.ssd.box_coder.faster_rcnn_box_coder\n # # x_scale, y_scale, h_scale, w_scale\n # self.x_scale = box_coder.x_scale\n # self.y_scale = box_coder.y_scale\n # self.height_scale = box_coder.height_scale\n # self.width_scale = box_coder.width_scale\n elif self.using_params['INCLUDE']:\n params = self.using_params['params']\n if isinstance(params['x_scale'], (float, int)) and params['x_scale'] > 0:\n self.x_scale = params['x_scale']\n else:\n raise ValException('\\n\\n no valid x_scale is assigned, shoulde be postive float/integer ...')\n if isinstance(params['y_scale'], (float, int)) and params['y_scale'] > 0:\n self.y_scale = params['y_scale']\n else:\n raise ValException('\\n\\n no valid y_scale is assigned, shoulde be postive float/integer ...')\n if isinstance(params['h_scale'], (float, int)) and params['h_scale'] > 0:\n self.height_scale = params['h_scale']\n else:\n raise ValException('\\n\\n no valid height_scale is assigned, shoulde be postive float/integer ...')\n if isinstance(params['w_scale'], (float, int)) and params['w_scale'] > 0:\n self.width_scale = params['w_scale']\n else:\n raise ValException('\\n\\n no valid width_scale is assigned, shoulde be postive float/integer ...')\n # indices\n self.cnty, self.cntx, self.h, self.w = 0, 1, 2, 3\n self.ymin, self.xmin, self.ymax, self.xmax = 0, 1, 2, 3\n\n def convert_cors2cnts(self, bboxes_cors):\n bboxes_cors = copy.deepcopy(bboxes_cors)\n bboxes_cnts = np.zeros_like(bboxes_cors)\n bboxes_cors[:, self.h] = bboxes_cnts[:, self.ymax] - bboxes_cnts[:, self.ymin]\n bboxes_cors[:, self.w] = bboxes_cnts[:, self.xmax] - bboxes_cnts[:, self.xmin]\n bboxes_cors[:, self.cnty] = bboxes_cnts[:, self.ymin] + 0.5 * bboxes_cors[:, self.h]\n bboxes_cors[:, self.cntx] = bboxes_cnts[:, self.xmin] + 0.5 * bboxes_cors[:, self.w]\n return bboxes_cors\n\n def __call__(self, offsets_ref, anchors_ref, anchors_type='centers'):\n '''\n offsets: [batch_size, num_anchors, 4]\n anchors: [num_anchors, 4]: centers\n anchors_type: 'centers' | 'boxes'\n '''\n anchors = copy.deepcopy(anchors_ref)\n offsets = copy.deepcopy(offsets_ref)\n\n if anchors_type == 'centers':\n anchors_cnts = anchors\n else:\n anchors_cnts = self.convert_cors2cnts(anchors)\n\n # add batch dimension, if not included\n if offsets.ndim == 2:\n offsets = offsets.reshape([1, -1, offsets.shape[1]])\n anchors_cnts = anchors_cnts.reshape([1, -1, anchors_cnts.shape[1]])\n else: # offsets.ndim===3\n anchors_cnts = np.tile(anchors_cnts, (offsets.shape[0], 1, 1))\n\n # apply offsets\n ycenter = (offsets[:, :, self.cnty]/self.y_scale) * anchors_cnts[:, :, self.h] + anchors_cnts[:, :, self.cnty]\n xcenter = (offsets[:, :, self.cntx]/self.x_scale) * anchors_cnts[:, :, self.w] + anchors_cnts[:, :, self.cntx]\n half_h = 0.5 * np.exp(offsets[:, :, self.h]/self.height_scale) * anchors_cnts[:, :, self.h]\n half_w = 0.5 * np.exp(offsets[:, :, self.w]/self.width_scale) * anchors_cnts[:, :, self.w]\n\n # min-max or corners format: required for nms\n decoded_anchors = np.zeros_like(anchors_cnts)\n decoded_anchors[:, :, self.ymin] = ycenter - half_h\n decoded_anchors[:, :, self.ymax] = ycenter + half_h\n decoded_anchors[:, :, self.xmin] = xcenter - half_w\n decoded_anchors[:, :, self.xmax] = xcenter + half_w\n return decoded_anchors\n",
"# Copyright (C) 2020 GreenWaves Technologies, SAS\n\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as\n# published by the Free Software Foundation, either version 3 of the\n# License, or (at your option) any later version.\n\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n\n# You should have received a copy of the GNU Affero General Public License\n# along with this program. If not, see <https://www.gnu.org/licenses/>.\n\nimport operator\nfrom functools import reduce\n\nimport numpy as np\nfrom graph.dim import Dim\nfrom graph.types import ConstantInputParameters, NNEdge, ReshapeParameters\nfrom importer.common.constant_mixin import ConstantMixin\nfrom importer.common.provisional_dim import ProvisionalDim\nfrom importer.onnx.common import logger\n\nfrom ..backend_handler import BackendHandler\nfrom ..handler import onnx_op\n\n\ndef prod(iterable):\n return reduce(operator.mul, iterable, 1)\n\n@onnx_op(\"Tile\")\nclass Tile(ConstantMixin, BackendHandler):\n\n @classmethod\n def _common(cls, node, **kwargs):\n all_nodes = kwargs['all_nodes']\n G = kwargs['G']\n valid_name = kwargs['valid_name']\n inputs = [all_nodes[inp] for inp in node.input]\n if not all(cls.is_constant(inp) for inp in inputs):\n raise NotImplementedError(\"Tile is only implemented on constants\")\n\n inp_vals = [cls.get_constant(inp) for inp in inputs]\n out_val = np.tile(inp_vals[0], inp_vals[1])\n logger.info(\"reducing %s to a constant\", valid_name)\n params = ConstantInputParameters(\n valid_name,\n value=out_val\n )\n pshape = ProvisionalDim(out_val.shape)\n all_nodes[node.output[0]] = (params, 0, pshape, inputs[0][3])\n return params\n\n @classmethod\n def version_1(cls, node, **kwargs):\n return cls._common(node, **kwargs)\n\n @classmethod\n def version_6(cls, node, **kwargs):\n return cls._common(node, **kwargs)\n\n @classmethod\n def version_13(cls, node, **kwargs):\n return cls._common(node, **kwargs)\n"
] |
[
[
"numpy.array",
"numpy.stack"
],
[
"numpy.expand_dims",
"tensorflow.keras.datasets.mnist.load_data"
],
[
"numpy.exp",
"numpy.zeros_like",
"numpy.tile"
],
[
"numpy.tile"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"2.2",
"2.3",
"2.4",
"2.5",
"2.6"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
triwahyuu/CenterNet
|
[
"0bdfca453456909e6131e5ee10b2e1f897a54905"
] |
[
"src/lib/trains/ddd.py"
] |
[
"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport torch\nimport numpy as np\n\nfrom models.losses import FocalLoss, L1Loss, BinRotLoss\nfrom models.decode import ddd_decode\nfrom models.utils import _sigmoid\nfrom utils.debugger import Debugger\nfrom utils.post_process import ddd_post_process\nfrom utils.oracle_utils import gen_oracle_map\nfrom .base_trainer import BaseTrainer\n\n\nclass DddLoss(torch.nn.Module):\n def __init__(self, opt):\n super(DddLoss, self).__init__()\n self.crit = torch.nn.MSELoss() if opt.mse_loss else FocalLoss()\n self.crit_reg = L1Loss()\n self.crit_rot = BinRotLoss()\n self.opt = opt\n\n def forward(self, outputs, batch):\n opt = self.opt\n\n hm_loss, dep_loss, rot_loss, dim_loss = 0, 0, 0, 0\n wh_loss, off_loss = 0, 0\n for s in range(opt.num_stacks):\n output = outputs[s]\n output['hm'] = _sigmoid(output['hm'])\n output['dep'] = 1. / (output['dep'].sigmoid() + 1e-6) - 1.\n\n if opt.eval_oracle_dep:\n output['dep'] = torch.from_numpy(gen_oracle_map(\n batch['dep'].detach().cpu().numpy(),\n batch['ind'].detach().cpu().numpy(),\n opt.output_w, opt.output_h)).to(opt.device)\n\n hm_loss += self.crit(output['hm'], batch['hm']) / opt.num_stacks\n if opt.dep_weight > 0:\n dep_loss += self.crit_reg(output['dep'], batch['reg_mask'],\n batch['ind'], batch['dep']) / opt.num_stacks\n if opt.dim_weight > 0:\n dim_loss += self.crit_reg(output['dim'], batch['reg_mask'],\n batch['ind'], batch['dim']) / opt.num_stacks\n if opt.rot_weight > 0:\n rot_loss += self.crit_rot(output['rot'], batch['rot_mask'],\n batch['ind'], batch['rotbin'],\n batch['rotres']) / opt.num_stacks\n if opt.reg_bbox and opt.wh_weight > 0:\n wh_loss += self.crit_reg(output['wh'], batch['rot_mask'],\n batch['ind'], batch['wh']) / opt.num_stacks\n if opt.reg_offset and opt.off_weight > 0:\n off_loss += self.crit_reg(output['reg'], batch['rot_mask'],\n batch['ind'], batch['reg']) / opt.num_stacks\n loss = opt.hm_weight * hm_loss + opt.dep_weight * dep_loss + \\\n opt.dim_weight * dim_loss + opt.rot_weight * rot_loss + \\\n opt.wh_weight * wh_loss + opt.off_weight * off_loss\n\n loss_stats = {'loss': loss, 'hm_loss': hm_loss, 'dep_loss': dep_loss,\n 'dim_loss': dim_loss, 'rot_loss': rot_loss,\n 'wh_loss': wh_loss, 'off_loss': off_loss}\n return loss, loss_stats\n\n\nclass DddTrainer(BaseTrainer):\n def __init__(self, opt, model, optimizer=None):\n super(DddTrainer, self).__init__(opt, model, optimizer=optimizer)\n\n def _get_losses(self, opt):\n loss_states = ['loss', 'hm_loss', 'dep_loss', 'dim_loss', 'rot_loss',\n 'wh_loss', 'off_loss']\n loss = DddLoss(opt)\n return loss_states, loss\n\n def debug(self, batch, output, iter_id):\n opt = self.opt\n wh = output['wh'] if opt.reg_bbox else None\n reg = output['reg'] if opt.reg_offset else None\n dets = ddd_decode(output['hm'], output['rot'], output['dep'],\n output['dim'], wh=wh, reg=reg, K=opt.K)\n\n # x, y, score, r1-r8, depth, dim1-dim3, cls\n dets = dets.detach().cpu().numpy().reshape(1, -1, dets.shape[2])\n calib = batch['meta']['calib'].detach().numpy()\n # x, y, score, rot, depth, dim1, dim2, dim3\n # if opt.dataset == 'gta':\n # dets[:, 12:15] /= 3\n dets_pred = ddd_post_process(\n dets.copy(), batch['meta']['c'].detach().numpy(),\n batch['meta']['s'].detach().numpy(), calib, opt)\n dets_gt = ddd_post_process(\n batch['meta']['gt_det'].detach().numpy().copy(),\n batch['meta']['c'].detach().numpy(),\n batch['meta']['s'].detach().numpy(), calib, opt)\n # for i in range(input.size(0)):\n for i in range(1):\n debugger = Debugger(dataset=opt.dataset, ipynb=(opt.debug == 3),\n theme=opt.debugger_theme)\n img = batch['input'][i].detach().cpu().numpy().transpose(1, 2, 0)\n img = ((img * self.opt.std + self.opt.mean) * 255.).astype(np.uint8)\n pred = debugger.gen_colormap(\n output['hm'][i].detach().cpu().numpy())\n gt = debugger.gen_colormap(batch['hm'][i].detach().cpu().numpy())\n debugger.add_blend_img(img, pred, 'hm_pred')\n debugger.add_blend_img(img, gt, 'hm_gt')\n # decode\n debugger.add_ct_detection(\n img, dets[i], show_box=opt.reg_bbox, center_thresh=opt.center_thresh,\n img_id='det_pred')\n debugger.add_ct_detection(\n img, batch['meta']['gt_det'][i].cpu().numpy().copy(),\n show_box=opt.reg_bbox, img_id='det_gt')\n debugger.add_3d_detection(\n batch['meta']['image_path'][i], dets_pred[i], calib[i],\n center_thresh=opt.center_thresh, img_id='add_pred')\n debugger.add_3d_detection(\n batch['meta']['image_path'][i], dets_gt[i], calib[i],\n center_thresh=opt.center_thresh, img_id='add_gt')\n # debugger.add_bird_view(\n # dets_pred[i], center_thresh=opt.center_thresh, img_id='bird_pred')\n # debugger.add_bird_view(dets_gt[i], img_id='bird_gt')\n debugger.add_bird_views(\n dets_pred[i], dets_gt[i],\n center_thresh=opt.center_thresh, img_id='bird_pred_gt')\n\n # debugger.add_blend_img(img, pred, 'out', white=True)\n debugger.compose_vis_add(\n batch['meta']['image_path'][i], dets_pred[i], calib[i],\n opt.center_thresh, pred, 'bird_pred_gt', img_id='out')\n # debugger.add_img(img, img_id='out')\n if opt.debug == 4:\n debugger.save_all_imgs(\n opt.debug_dir, prefix='{}'.format(iter_id))\n else:\n debugger.show_all_imgs(pause=True)\n\n def save_result(self, output, batch, results):\n opt = self.opt\n wh = output['wh'] if opt.reg_bbox else None\n reg = output['reg'] if opt.reg_offset else None\n dets = ddd_decode(output['hm'], output['rot'], output['dep'],\n output['dim'], wh=wh, reg=reg, K=opt.K)\n\n # x, y, score, r1-r8, depth, dim1-dim3, cls\n dets = dets.detach().cpu().numpy().reshape(1, -1, dets.shape[2])\n calib = batch['meta']['calib'].detach().numpy()\n # x, y, score, rot, depth, dim1, dim2, dim3\n dets_pred = ddd_post_process(\n dets.copy(), batch['meta']['c'].detach().numpy(),\n batch['meta']['s'].detach().numpy(), calib, opt)\n img_id = batch['meta']['img_id'].detach().numpy()[0]\n results[img_id] = dets_pred[0]\n for j in range(1, opt.num_classes + 1):\n keep_inds = (results[img_id][j][:, -1] > opt.center_thresh)\n results[img_id][j] = results[img_id][j][keep_inds]\n"
] |
[
[
"torch.nn.MSELoss"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
erasche/biopython
|
[
"fba3c8a926a07b8b6b821759db8c71d93c51be86"
] |
[
"setup.py"
] |
[
"\"\"\"Distutils based setup script for Biopython.\n\nThis uses Distutils (http://python.org/sigs/distutils-sig/) the standard\npython mechanism for installing packages. For the easiest installation\njust type the command:\n\npython setup.py install\n\nFor more in-depth instructions, see the installation section of the\nBiopython manual, linked to from:\n\nhttp://biopython.org/wiki/Documentation\n\nOr for more details about the options available from distutils, look at\nthe 'Installing Python Modules' distutils documentation, available from:\n\nhttp://python.org/sigs/distutils-sig/doc/\n\nOr, if all else fails, feel free to write to the sign up to the Biopython\nmailing list and ask for help. See:\n\nhttp://biopython.org/wiki/Mailing_lists\n\"\"\"\nfrom __future__ import print_function\n\nimport sys\nimport os\nimport shutil\n\nfrom distutils.core import setup\nfrom distutils.core import Command\nfrom distutils.command.install import install\nfrom distutils.command.build_py import build_py\nfrom distutils.command.build_ext import build_ext\nfrom distutils.extension import Extension\n\n_CHECKED = None\n\n\ndef osx_clang_fix():\n \"\"\"Add clang switch to ignore unused arguments to avoid OS X compile error.\n\n This is a hack to cope with Apple shipping a version of Python compiled\n with the -mno-fused-madd argument which clang from XCode 5.1 does not\n support::\n\n $ cc -v\n Apple LLVM version 5.1 (clang-503.0.40) (based on LLVM 3.4svn)\n Target: x86_64-apple-darwin13.2.0\n Thread model: posix\n\n $ which python-config\n /Library/Frameworks/Python.framework/Versions/Current/bin/python-config\n\n $ python-config --cflags\n -I/Library/Frameworks/Python.framework/Versions/2.5/include/python2.5\n -I/Library/Frameworks/Python.framework/Versions/2.5/include/python2.5\n -arch ppc -arch i386 -isysroot /Developer/SDKs/MacOSX10.4u.sdk\n -fno-strict-aliasing -Wno-long-double -no-cpp-precomp -mno-fused-madd\n -fno-common -dynamic -DNDEBUG -g -O3\n\n We can avoid the clang compilation error with -Qunused-arguments which is\n (currently) harmless if gcc is being used instead (e.g. compiling Biopython\n against a locally compiled Python rather than the Apple provided Python).\n \"\"\"\n # see http://lists.open-bio.org/pipermail/biopython-dev/2014-April/011240.html\n if sys.platform != \"darwin\":\n return\n # see also Bio/_py3k/__init__.py (which we can't use in setup.py)\n if sys.version_info[0] >= 3:\n from subprocess import getoutput\n else:\n from commands import getoutput\n cc = getoutput(\"cc -v\")\n if \"gcc\" in cc or \"clang\" not in cc:\n return\n for flag in [\"CFLAGS\", \"CPPFLAGS\"]:\n if flag not in os.environ:\n os.environ[flag] = \"-Qunused-arguments\"\n elif \"-Qunused-arguments\" not in os.environ[flag]:\n os.environ[flag] += \" -Qunused-arguments\"\n\nosx_clang_fix()\n\n\ndef is_pypy():\n import platform\n try:\n if platform.python_implementation() == 'PyPy':\n return True\n except AttributeError:\n # New in Python 2.6, not in Jython yet either\n pass\n return False\n\n\ndef is_ironpython():\n return sys.platform == \"cli\"\n # TODO - Use platform as in Pypy test?\n\n\ndef get_yes_or_no(question, default):\n if default:\n option_str = \"(Y/n)\"\n default_str = 'y'\n else:\n option_str = \"(y/N)\"\n default_str = 'n'\n\n while True:\n print(\"%s %s:\" % (question, option_str))\n if sys.version_info[0] == 3:\n response = input().lower()\n else:\n response = raw_input().lower()\n if not response:\n response = default_str\n if response[0] in ['y', 'n']:\n break\n print(\"Please answer y or n.\")\n return response[0] == 'y'\n\n\n# Make sure we have the right Python version.\nif sys.version_info[:2] < (2, 6):\n print(\"Biopython requires Python 2.6 or 2.7 (or Python 3.3 or later). \"\n \"Python %d.%d detected\" % sys.version_info[:2])\n sys.exit(1)\nelif is_pypy() and sys.version_info[0] == 3 and sys.version_info[:2] == (3, 2):\n # PyPy3 2.4.0 is compatibile with Python 3.2.5 plus unicode literals\n # so ought to work with Biopython\n pass\nelif sys.version_info[0] == 3 and sys.version_info[:2] < (3, 3):\n print(\"Biopython requires Python 3.3 or later (or Python 2.6 or 2.7). \"\n \"Python %d.%d detected\" % sys.version_info[:2])\n sys.exit(1)\n\n\ndef check_dependencies_once():\n # Call check_dependencies, but cache the result for subsequent\n # calls.\n global _CHECKED\n if _CHECKED is None:\n _CHECKED = check_dependencies()\n return _CHECKED\n\n\ndef is_automated():\n \"\"\"Check for installation with easy_install or pip.\n \"\"\"\n is_automated = False\n # easy_install: --dist-dir option passed\n try:\n dist_dir_i = sys.argv.index(\"--dist-dir\")\n except ValueError:\n dist_dir_i = None\n if dist_dir_i is not None:\n dist_dir = sys.argv[dist_dir_i + 1]\n if \"egg-dist-tmp\" in dist_dir:\n is_automated = True\n # pip -- calls from python directly with \"-c\"\n if sys.argv in [[\"-c\", \"develop\", \"--no-deps\"],\n [\"--no-deps\", \"-c\", \"develop\"],\n [\"-c\", \"egg_info\"]] \\\n or \"pip-egg-info\" in sys.argv \\\n or sys.argv[:3] == [\"-c\", \"install\", \"--record\"] \\\n or sys.argv[:4] == ['-c', 'install', '--single-version-externally-managed',\n '--record']:\n is_automated = True\n return is_automated\n\n\ndef check_dependencies():\n \"\"\"Return whether the installation should continue.\"\"\"\n # There should be some way for the user to tell specify not to\n # check dependencies. For example, it probably should not if\n # the user specified \"-q\". However, I'm not sure where\n # distutils stores that information. Also, install has a\n # --force option that gets saved in self.user_options. It\n # means overwrite previous installations. If the user has\n # forced an installation, should we also ignore dependencies?\n\n # We only check for NumPy, as this is a compile time dependency\n if is_Numpy_installed():\n return True\n if is_automated():\n return True # For automated builds go ahead with installed packages\n if os.name == 'java':\n return True # NumPy is not avaliable for Jython (for now)\n if is_pypy():\n return True # Full NumPy not available for PyPy (for now)\n if is_ironpython():\n return True # We're ignoring NumPy under IronPython (for now)\n\n print(\"\"\"\nNumerical Python (NumPy) is not installed.\n\nThis package is required for many Biopython features. Please install\nit before you install Biopython. You can install Biopython anyway, but\nanything dependent on NumPy will not work. If you do this, and later\ninstall NumPy, you should then re-install Biopython.\n\nYou can find NumPy at http://www.numpy.org\n\"\"\")\n # exit automatically if running as part of some script\n # (e.g. PyPM, ActiveState's Python Package Manager)\n if not sys.stdout.isatty():\n sys.exit(-1)\n # We can ask the user\n return get_yes_or_no(\"Do you want to continue this installation?\", False)\n\n\nclass install_biopython(install):\n \"\"\"Override the standard install to check for dependencies.\n\n This will just run the normal install, and then print warning messages\n if packages are missing.\n\n \"\"\"\n # Adds support for the single-version-externally-managed flag\n # which is present in setuptools but not distutils. pip requires it.\n # In setuptools this forces installation the \"old way\" which we\n # only support here, so we just make it a no-op.\n user_options = install.user_options + [\n ('single-version-externally-managed', None,\n \"used by system package builders to create 'flat' eggs\"),\n ]\n boolean_options = install.boolean_options + [\n 'single-version-externally-managed',\n ]\n\n def initialize_options(self):\n install.initialize_options(self)\n self.single_version_externally_managed = None\n\n def run(self):\n if check_dependencies_once():\n # Run the normal install.\n install.run(self)\n\n\nclass build_py_biopython(build_py):\n def run(self):\n if not check_dependencies_once():\n return\n # Add software that requires Numpy to be installed.\n if is_Numpy_installed():\n self.packages.extend(NUMPY_PACKAGES)\n build_py.run(self)\n\n\nclass build_ext_biopython(build_ext):\n def run(self):\n if not check_dependencies_once():\n return\n build_ext.run(self)\n\n\nclass test_biopython(Command):\n \"\"\"Run all of the tests for the package.\n\n This is a automatic test run class to make distutils kind of act like\n perl. With this you can do:\n\n python setup.py build\n python setup.py install\n python setup.py test\n\n \"\"\"\n description = \"Automatically run the test suite for Biopython.\"\n user_options = []\n\n def initialize_options(self):\n pass\n\n def finalize_options(self):\n pass\n\n def run(self):\n this_dir = os.getcwd()\n\n # change to the test dir and run the tests\n os.chdir(\"Tests\")\n sys.path.insert(0, '')\n import run_tests\n run_tests.main([])\n\n # change back to the current directory\n os.chdir(this_dir)\n\n\ndef can_import(module_name):\n \"\"\"can_import(module_name) -> module or None\"\"\"\n try:\n return __import__(module_name)\n except ImportError:\n return None\n\n\ndef is_Numpy_installed():\n if is_pypy():\n return False\n return bool(can_import(\"numpy\"))\n\n# --- set up the packages we are going to install\n# standard biopython packages\nPACKAGES = [\n 'Bio',\n 'Bio.Align',\n 'Bio.Align.Applications',\n 'Bio.AlignIO',\n 'Bio.Alphabet',\n 'Bio.Application',\n 'Bio.Blast',\n 'Bio.CAPS',\n 'Bio.codonalign',\n 'Bio.Compass',\n 'Bio.Crystal',\n 'Bio.Data',\n 'Bio.Emboss',\n 'Bio.Entrez',\n 'Bio.ExPASy',\n 'Bio.FSSP',\n 'Bio.GA',\n 'Bio.GA.Crossover',\n 'Bio.GA.Mutation',\n 'Bio.GA.Repair',\n 'Bio.GA.Selection',\n 'Bio.GenBank',\n 'Bio.Geo',\n 'Bio.Graphics',\n 'Bio.Graphics.GenomeDiagram',\n 'Bio.HMM',\n 'Bio.KEGG',\n 'Bio.KEGG.Compound',\n 'Bio.KEGG.Enzyme',\n 'Bio.KEGG.Map',\n 'Bio.KEGG.KGML',\n 'Bio.Medline',\n 'Bio.Motif',\n 'Bio.Motif.Parsers',\n 'Bio.Motif.Applications',\n 'Bio.motifs',\n 'Bio.motifs.applications',\n 'Bio.motifs.jaspar',\n 'Bio.NeuralNetwork',\n 'Bio.NeuralNetwork.BackPropagation',\n 'Bio.NeuralNetwork.Gene',\n 'Bio.Nexus',\n 'Bio.NMR',\n 'Bio.Pathway',\n 'Bio.Pathway.Rep',\n 'Bio.PDB',\n 'Bio.PopGen',\n 'Bio.PopGen.Async',\n 'Bio.PopGen.FDist',\n 'Bio.PopGen.GenePop',\n 'Bio.PopGen.SimCoal',\n 'Bio.Restriction',\n 'Bio.SCOP',\n 'Bio.SearchIO',\n 'Bio.SearchIO._model',\n 'Bio.SearchIO.BlastIO',\n 'Bio.SearchIO.HmmerIO',\n 'Bio.SearchIO.ExonerateIO',\n 'Bio.SeqIO',\n 'Bio.SeqUtils',\n 'Bio.Sequencing',\n 'Bio.Sequencing.Applications',\n 'Bio.Statistics',\n 'Bio.SubsMat',\n 'Bio.SVDSuperimposer',\n 'Bio.SwissProt',\n 'Bio.TogoWS',\n 'Bio.Phylo',\n 'Bio.Phylo.Applications',\n 'Bio.Phylo.PAML',\n 'Bio.UniGene',\n 'Bio.UniProt',\n 'Bio.Wise',\n 'Bio._py3k',\n # Other top level packages,\n 'BioSQL',\n ]\n\n# packages that require Numeric Python\nNUMPY_PACKAGES = [\n 'Bio.Affy',\n 'Bio.Cluster',\n 'Bio.KDTree',\n]\n\nif os.name == 'java':\n # Jython doesn't support C extensions\n EXTENSIONS = []\nelif is_pypy() or is_ironpython():\n # Skip C extensions for now\n EXTENSIONS = []\nelse:\n EXTENSIONS = [\n Extension('Bio.cpairwise2',\n ['Bio/cpairwise2module.c'],\n ),\n Extension('Bio.trie',\n ['Bio/triemodule.c',\n 'Bio/trie.c'],\n include_dirs=[\"Bio\"]\n ),\n Extension('Bio.Nexus.cnexus',\n ['Bio/Nexus/cnexus.c']\n ),\n ]\n\n# Add extensions that requires NumPy to build\nif is_Numpy_installed():\n import numpy\n numpy_include_dir = numpy.get_include()\n EXTENSIONS.append(\n Extension('Bio.Cluster.cluster',\n ['Bio/Cluster/clustermodule.c',\n 'Bio/Cluster/cluster.c'],\n include_dirs=[numpy_include_dir],\n ))\n EXTENSIONS.append(\n Extension('Bio.KDTree._CKDTree',\n [\"Bio/KDTree/KDTree.c\",\n \"Bio/KDTree/KDTreemodule.c\"],\n include_dirs=[numpy_include_dir],\n ))\n EXTENSIONS.append(\n Extension('Bio.Motif._pwm',\n [\"Bio/Motif/_pwm.c\"],\n include_dirs=[numpy_include_dir],\n ))\n EXTENSIONS.append(\n Extension('Bio.motifs._pwm',\n [\"Bio/motifs/_pwm.c\"],\n include_dirs=[numpy_include_dir],\n ))\n\n\n# We now define the Biopython version number in Bio/__init__.py\n# Here we can't use \"import Bio\" then \"Bio.__version__\" as that would\n# tell us the version of Biopython already installed (if any).\n__version__ = \"Undefined\"\nfor line in open('Bio/__init__.py'):\n if (line.startswith('__version__')):\n exec(line.strip())\n\n# Simple trick to use the 2to3 converted source under Python 3,\n# change the current directory before/after running setup.\n# Note as a side effect there will be a build folder underneath\n# the python3_source folder.\nold_path = os.getcwd()\ntry:\n src_path = python3_source\nexcept NameError:\n src_path = os.path.dirname(os.path.abspath(sys.argv[0]))\nos.chdir(src_path)\nsys.path.insert(0, src_path)\n\nsetup_args = {\n \"name\": 'biopython',\n \"version\": __version__,\n \"author\": 'The Biopython Contributors',\n \"author_email\": '[email protected]',\n \"url\": 'http://www.biopython.org/',\n \"description\": 'Freely available tools for computational molecular biology.',\n \"download_url\": 'http://biopython.org/DIST/',\n \"cmdclass\": {\n \"install\": install_biopython,\n \"build_py\": build_py_biopython,\n \"build_ext\": build_ext_biopython,\n \"test\": test_biopython,\n },\n \"packages\": PACKAGES,\n \"ext_modules\": EXTENSIONS,\n \"package_data\": {\n 'Bio.Entrez': ['DTDs/*.dtd', 'DTDs/*.ent', 'DTDs/*.mod'],\n 'Bio.PopGen': ['SimCoal/data/*.par'],\n },\n }\n\ntry:\n setup(**setup_args)\nfinally:\n del sys.path[0]\n os.chdir(old_path)\n"
] |
[
[
"numpy.get_include"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
xidianfushuai/DeepCTR
|
[
"66d173e5736eae2e19c32e28e6d656ef873461a5"
] |
[
"tests/models/DIEN_test.py"
] |
[
"import numpy as np\nimport pytest\nimport tensorflow as tf\nfrom packaging import version\n\nfrom deepctr.feature_column import SparseFeat, VarLenSparseFeat, DenseFeat, get_feature_names\nfrom deepctr.models import DIEN\nfrom ..utils import check_model\n\n\ndef get_xy_fd(use_neg=False, hash_flag=False):\n feature_columns = [SparseFeat('user', 3, hash_flag),\n SparseFeat('gender', 2, hash_flag),\n SparseFeat('item', 3 + 1, hash_flag),\n SparseFeat('item_gender', 2 + 1, hash_flag),\n DenseFeat('score', 1)]\n\n feature_columns += [\n VarLenSparseFeat(SparseFeat('hist_item', vocabulary_size=3 + 1, embedding_dim=8, embedding_name='item'),\n maxlen=4),\n VarLenSparseFeat(SparseFeat('hist_item_gender', 2 + 1, embedding_dim=4, embedding_name='item_gender'),\n maxlen=4)]\n\n behavior_feature_list = [\"item\", \"item_gender\"]\n uid = np.array([0, 1, 2])\n ugender = np.array([0, 1, 0])\n iid = np.array([1, 2, 3]) # 0 is mask value\n igender = np.array([1, 2, 1]) # 0 is mask value\n score = np.array([0.1, 0.2, 0.3])\n\n hist_iid = np.array([[1, 2, 3, 0], [1, 2, 3, 0], [1, 2, 0, 0]])\n hist_igender = np.array([[1, 1, 2, 0], [2, 1, 1, 0], [2, 1, 0, 0]])\n\n behavior_length = np.array([3, 3, 2])\n\n feature_dict = {'user': uid, 'gender': ugender, 'item': iid, 'item_gender': igender,\n 'hist_item': hist_iid, 'hist_item_gender': hist_igender,\n 'score': score}\n\n if use_neg:\n feature_dict['neg_hist_item'] = np.array([[1, 2, 3, 0], [1, 2, 3, 0], [1, 2, 0, 0]])\n feature_dict['neg_hist_item_gender'] = np.array([[1, 1, 2, 0], [2, 1, 1, 0], [2, 1, 0, 0]])\n feature_columns += [\n VarLenSparseFeat(SparseFeat('neg_hist_item', vocabulary_size=3 + 1, embedding_dim=8, embedding_name='item'),\n maxlen=4),\n VarLenSparseFeat(SparseFeat('neg_hist_item_gender', 2 + 1, embedding_dim=4, embedding_name='item_gender'),\n maxlen=4)]\n\n feature_names = get_feature_names(feature_columns)\n x = {name: feature_dict[name] for name in feature_names}\n x[\"seq_length\"] = behavior_length\n y = [1, 0, 1]\n return x, y, feature_columns, behavior_feature_list\n\n\n# @pytest.mark.xfail(reason=\"There is a bug when save model use Dice\")\n# @pytest.mark.skip(reason=\"misunderstood the API\")\n\[email protected](\n 'gru_type',\n ['GRU', 'AIGRU', 'AGRU' # ,'AUGRU',\n ]\n)\ndef test_DIEN(gru_type):\n if version.parse(tf.__version__) >= version.parse('2.0.0'):\n tf.compat.v1.disable_eager_execution() # todo\n model_name = \"DIEN_\" + gru_type\n\n x, y, feature_columns, behavior_feature_list = get_xy_fd(hash_flag=True)\n\n model = DIEN(feature_columns, behavior_feature_list,\n dnn_hidden_units=[4, 4, 4], dnn_dropout=0.5, gru_type=gru_type)\n\n check_model(model, model_name, x, y,\n check_model_io=(gru_type == \"GRU\")) # TODO:fix bugs when load model in other type\n\n\ndef test_DIEN_neg():\n model_name = \"DIEN_neg\"\n if version.parse(tf.__version__) >= version.parse(\"1.14.0\"):\n return\n\n x, y, feature_dim_dict, behavior_feature_list = get_xy_fd(use_neg=True)\n\n model = DIEN(feature_dim_dict, behavior_feature_list,\n dnn_hidden_units=[4, 4, 4], dnn_dropout=0.5, gru_type=\"AUGRU\", use_negsampling=True)\n check_model(model, model_name, x, y)\n\n\nif __name__ == \"__main__\":\n pass\n"
] |
[
[
"numpy.array",
"tensorflow.compat.v1.disable_eager_execution"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
calista-ai/calista-ml-engine
|
[
"6c3bfd31908af79d7ccdeb3bc9b7e65ca8fa95da"
] |
[
"CNN/src/preprocess.py"
] |
[
"import numpy as np\nimport cv2\n\nwidth = 256\nheight = 192\nchannels = 3\n\ndef prepare_image(imagePath):\n\n X = []\n\n X.append(cv2.resize(cv2.imread(imagePath, cv2.IMREAD_COLOR), (width, height), \\\n interpolation=cv2.INTER_AREA))\n return np.array(X)\n"
] |
[
[
"numpy.array"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
TheOnlyCryptoParadise/crypto_package
|
[
"6ed348712a2477babee16c2cfd87e1fb34584a86"
] |
[
"crypto_package/candles/get_candles.py"
] |
[
"from datetime import datetime\nfrom time import time\n\nimport pandas\nimport requests\n\nfrom crypto_package.conf import service_config as conf\n\n\ndef get_candles(exchange: str, currency_pair: str, timeframe: str, time_start=None, time_end=None, last_n_candles=None):\n \"\"\"Gets required candles.\n\n Parameters:\n exchange : str\n exchange name (Binance, Bitbay)\n currency_pair : str\n string performing name of the pair (eg. UDT/STH)\n timeframe : str\n size of candle\n time_start: datetime [OPTIONAL]\n from what time you want to get candles (time is a timestamp)\n time_end: datetime [OPTIONAL]\n to which time you want to get candles (time is a timestamp)\n IMPORTANT: If you pass time_start and don't pass time_end value, time_end would be current time\n last_n_candles: int [OPTIONAL]\n if you want to get last n candles pass a number of candles to get\n\n Returns:\n pandas DataFrame with candles data\n \"\"\"\n\n args = _make_get_candles_args(exchange, currency_pair, timeframe, time_start, time_end, last_n_candles)\n try:\n res = requests.get(conf.CANDLE_DATA_SERVICE + conf.EP_CANDLES, args)\n except requests.ConnectionError as e:\n print(\"CONNECTION ERROR OCCURRED \"+str(e))\n raise e\n\n if res.status_code != 200:\n print(\"Some exception occurred while connecting to server.\"+str(res))\n raise Exception(str(res) + \" \"+str(res.reason))\n\n clist = res.json()['data']\n candles = pandas.DataFrame(clist)\n\n return candles, clist\n\n\ndef _make_get_candles_args(exchange, currency_pair, ticker, time_start, time_end, last_n_candles):\n args = {\"exchange\": exchange,\n \"currency_pair\": currency_pair,\n \"candle_size\": ticker,\n }\n\n args = _add_time_values(args, time_start, time_end)\n args = _add_last_candles_values(args, last_n_candles)\n\n return args\n\n\ndef _add_time_values(args, time_start, time_end):\n if time_start is not None:\n if type(time_start) is not datetime: # do we check whether user gave int value or are we adults? xd\n raise TypeError(\"time_start has to be datetime\")\n if time_end is not None and type(time_end) is not datetime:\n raise TypeError(\"time_start has to be datetime\")\n\n args[\"time_start\"] = int(time_start.timestamp())\n args[\"time_end\"] = int(_check_time(time_start, time_end)) if time_end is not None else int(time())\n\n elif time_start is None and time_end is not None:\n raise ValueError(\"you cannot pass time_end without time_start\")\n\n return args\n\n\ndef _check_time(time_start, time_end):\n assert time_end.timestamp() > time_start.timestamp(), \"time_end has to be after time_start\"\n return time_end.timestamp()\n\n\ndef _add_last_candles_values(args, last_n_candles):\n if last_n_candles is not None:\n if type(last_n_candles) is not int:\n raise TypeError(\"last_n_candles has to be int\")\n args[\"last_n_candles\"] = last_n_candles\n\n return args\n"
] |
[
[
"pandas.DataFrame"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
salesforce/QAConv
|
[
"3ab468c51b09fb5301c8bcedc109d451fd4b853d"
] |
[
"baseline/span_based/utils_qa.py"
] |
[
"# coding=utf-8\n# Copyright 2020 The HuggingFace Team All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nPost-processing utilities for question answering.\n\"\"\"\nimport collections\nimport json\nimport logging\nimport os\nfrom typing import Optional, Tuple\n\nimport numpy as np\nfrom tqdm.auto import tqdm\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef postprocess_qa_predictions(\n examples,\n features,\n predictions: Tuple[np.ndarray, np.ndarray],\n version_2_with_negative: bool = False,\n n_best_size: int = 20,\n max_answer_length: int = 30,\n null_score_diff_threshold: float = 0.0,\n output_dir: Optional[str] = None,\n prefix: Optional[str] = None,\n is_world_process_zero: bool = True,\n):\n \"\"\"\n Post-processes the predictions of a question-answering model to convert them to answers that are substrings of the\n original contexts. This is the base postprocessing functions for models that only return start and end logits.\n\n Args:\n examples: The non-preprocessed dataset (see the main script for more information).\n features: The processed dataset (see the main script for more information).\n predictions (:obj:`Tuple[np.ndarray, np.ndarray]`):\n The predictions of the model: two arrays containing the start logits and the end logits respectively. Its\n first dimension must match the number of elements of :obj:`features`.\n version_2_with_negative (:obj:`bool`, `optional`, defaults to :obj:`False`):\n Whether or not the underlying dataset contains examples with no answers.\n n_best_size (:obj:`int`, `optional`, defaults to 20):\n The total number of n-best predictions to generate when looking for an answer.\n max_answer_length (:obj:`int`, `optional`, defaults to 30):\n The maximum length of an answer that can be generated. This is needed because the start and end predictions\n are not conditioned on one another.\n null_score_diff_threshold (:obj:`float`, `optional`, defaults to 0):\n The threshold used to select the null answer: if the best answer has a score that is less than the score of\n the null answer minus this threshold, the null answer is selected for this example (note that the score of\n the null answer for an example giving several features is the minimum of the scores for the null answer on\n each feature: all features must be aligned on the fact they `want` to predict a null answer).\n\n Only useful when :obj:`version_2_with_negative` is :obj:`True`.\n output_dir (:obj:`str`, `optional`):\n If provided, the dictionaries of predictions, n_best predictions (with their scores and logits) and, if\n :obj:`version_2_with_negative=True`, the dictionary of the scores differences between best and null\n answers, are saved in `output_dir`.\n prefix (:obj:`str`, `optional`):\n If provided, the dictionaries mentioned above are saved with `prefix` added to their names.\n is_world_process_zero (:obj:`bool`, `optional`, defaults to :obj:`True`):\n Whether this process is the main process or not (used to determine if logging/saves should be done).\n \"\"\"\n assert len(predictions) == 2, \"`predictions` should be a tuple with two elements (start_logits, end_logits).\"\n all_start_logits, all_end_logits = predictions\n\n assert len(predictions[0]) == len(features), f\"Got {len(predictions[0])} predictions and {len(features)} features.\"\n\n # Build a map example to its corresponding features.\n example_id_to_index = {k: i for i, k in enumerate(examples[\"id\"])}\n features_per_example = collections.defaultdict(list)\n for i, feature in enumerate(features):\n features_per_example[example_id_to_index[feature[\"example_id\"]]].append(i)\n\n # The dictionaries we have to fill.\n all_predictions = collections.OrderedDict()\n all_nbest_json = collections.OrderedDict()\n if version_2_with_negative:\n scores_diff_json = collections.OrderedDict()\n\n # Logging.\n logger.setLevel(logging.INFO if is_world_process_zero else logging.WARN)\n logger.info(f\"Post-processing {len(examples)} example predictions split into {len(features)} features.\")\n\n # Let's loop over all the examples!\n for example_index, example in enumerate(tqdm(examples)):\n # Those are the indices of the features associated to the current example.\n feature_indices = features_per_example[example_index]\n\n min_null_prediction = None\n prelim_predictions = []\n\n # Looping through all the features associated to the current example.\n for feature_index in feature_indices:\n # We grab the predictions of the model for this feature.\n start_logits = all_start_logits[feature_index]\n end_logits = all_end_logits[feature_index]\n # This is what will allow us to map some the positions in our logits to span of texts in the original\n # context.\n offset_mapping = features[feature_index][\"offset_mapping\"]\n # Optional `token_is_max_context`, if provided we will remove answers that do not have the maximum context\n # available in the current feature.\n token_is_max_context = features[feature_index].get(\"token_is_max_context\", None)\n\n # Update minimum null prediction.\n feature_null_score = start_logits[0] + end_logits[0]\n if min_null_prediction is None or min_null_prediction[\"score\"] > feature_null_score:\n min_null_prediction = {\n \"offsets\": (0, 0),\n \"score\": feature_null_score,\n \"start_logit\": start_logits[0],\n \"end_logit\": end_logits[0],\n }\n\n # Go through all possibilities for the `n_best_size` greater start and end logits.\n start_indexes = np.argsort(start_logits)[-1 : -n_best_size - 1 : -1].tolist()\n end_indexes = np.argsort(end_logits)[-1 : -n_best_size - 1 : -1].tolist()\n for start_index in start_indexes:\n for end_index in end_indexes:\n # Don't consider out-of-scope answers, either because the indices are out of bounds or correspond\n # to part of the input_ids that are not in the context.\n if (\n start_index >= len(offset_mapping)\n or end_index >= len(offset_mapping)\n or offset_mapping[start_index] is None\n or offset_mapping[end_index] is None\n ):\n continue\n # Don't consider answers with a length that is either < 0 or > max_answer_length.\n if end_index < start_index or end_index - start_index + 1 > max_answer_length:\n continue\n # Don't consider answer that don't have the maximum context available (if such information is\n # provided).\n if token_is_max_context is not None and not token_is_max_context.get(str(start_index), False):\n continue\n prelim_predictions.append(\n {\n \"offsets\": (offset_mapping[start_index][0], offset_mapping[end_index][1]),\n \"score\": start_logits[start_index] + end_logits[end_index],\n \"start_logit\": start_logits[start_index],\n \"end_logit\": end_logits[end_index],\n }\n )\n if version_2_with_negative:\n # Add the minimum null prediction\n prelim_predictions.append(min_null_prediction)\n null_score = min_null_prediction[\"score\"]\n\n # Only keep the best `n_best_size` predictions.\n predictions = sorted(prelim_predictions, key=lambda x: x[\"score\"], reverse=True)[:n_best_size]\n\n # Add back the minimum null prediction if it was removed because of its low score.\n if version_2_with_negative and not any(p[\"offsets\"] == (0, 0) for p in predictions):\n predictions.append(min_null_prediction)\n\n # Use the offsets to gather the answer text in the original context.\n context = example[\"context\"]\n for pred in predictions:\n offsets = pred.pop(\"offsets\")\n pred[\"text\"] = context[offsets[0] : offsets[1]]\n\n # In the very rare edge case we have not a single non-null prediction, we create a fake prediction to avoid\n # failure.\n if len(predictions) == 0 or (len(predictions) == 1 and predictions[0][\"text\"] == \"\"):\n predictions.insert(0, {\"text\": \"empty\", \"start_logit\": 0.0, \"end_logit\": 0.0, \"score\": 0.0})\n\n # Compute the softmax of all scores (we do it with numpy to stay independent from torch/tf in this file, using\n # the LogSumExp trick).\n scores = np.array([pred.pop(\"score\") for pred in predictions])\n exp_scores = np.exp(scores - np.max(scores))\n probs = exp_scores / exp_scores.sum()\n\n # Include the probabilities in our predictions.\n for prob, pred in zip(probs, predictions):\n pred[\"probability\"] = prob\n\n # Pick the best prediction. If the null answer is not possible, this is easy.\n if not version_2_with_negative:\n all_predictions[example[\"id\"]] = predictions[0][\"text\"]\n else:\n # Otherwise we first need to find the best non-empty prediction.\n i = 0\n while predictions[i][\"text\"] == \"\":\n i += 1\n best_non_null_pred = predictions[i]\n\n # Then we compare to the null prediction using the threshold.\n score_diff = null_score - best_non_null_pred[\"start_logit\"] - best_non_null_pred[\"end_logit\"]\n scores_diff_json[example[\"id\"]] = float(score_diff) # To be JSON-serializable.\n if score_diff > null_score_diff_threshold:\n all_predictions[example[\"id\"]] = \"\"\n else:\n all_predictions[example[\"id\"]] = best_non_null_pred[\"text\"]\n\n # Make `predictions` JSON-serializable by casting np.float back to float.\n all_nbest_json[example[\"id\"]] = [\n {k: (float(v) if isinstance(v, (np.float16, np.float32, np.float64)) else v) for k, v in pred.items()}\n for pred in predictions\n ]\n\n # If we have an output_dir, let's save all those dicts.\n if output_dir is not None:\n assert os.path.isdir(output_dir), f\"{output_dir} is not a directory.\"\n\n prediction_file = os.path.join(\n output_dir, \"predictions.json\" if prefix is None else f\"{prefix}_predictions.json\"\n )\n nbest_file = os.path.join(\n output_dir, \"nbest_predictions.json\" if prefix is None else f\"{prefix}_nbest_predictions.json\"\n )\n if version_2_with_negative:\n null_odds_file = os.path.join(\n output_dir, \"null_odds.json\" if prefix is None else f\"{prefix}_null_odds.json\"\n )\n\n logger.info(f\"Saving predictions to {prediction_file}.\")\n with open(prediction_file, \"w\") as writer:\n writer.write(json.dumps(all_predictions, indent=4) + \"\\n\")\n logger.info(f\"Saving nbest_preds to {nbest_file}.\")\n with open(nbest_file, \"w\") as writer:\n writer.write(json.dumps(all_nbest_json, indent=4) + \"\\n\")\n if version_2_with_negative:\n logger.info(f\"Saving null_odds to {null_odds_file}.\")\n with open(null_odds_file, \"w\") as writer:\n writer.write(json.dumps(scores_diff_json, indent=4) + \"\\n\")\n\n return all_predictions\n\n\ndef postprocess_qa_predictions_with_beam_search(\n examples,\n features,\n predictions: Tuple[np.ndarray, np.ndarray],\n version_2_with_negative: bool = False,\n n_best_size: int = 20,\n max_answer_length: int = 30,\n start_n_top: int = 5,\n end_n_top: int = 5,\n output_dir: Optional[str] = None,\n prefix: Optional[str] = None,\n is_world_process_zero: bool = True,\n):\n \"\"\"\n Post-processes the predictions of a question-answering model with beam search to convert them to answers that are substrings of the\n original contexts. This is the postprocessing functions for models that return start and end logits, indices, as well as\n cls token predictions.\n\n Args:\n examples: The non-preprocessed dataset (see the main script for more information).\n features: The processed dataset (see the main script for more information).\n predictions (:obj:`Tuple[np.ndarray, np.ndarray]`):\n The predictions of the model: two arrays containing the start logits and the end logits respectively. Its\n first dimension must match the number of elements of :obj:`features`.\n version_2_with_negative (:obj:`bool`, `optional`, defaults to :obj:`False`):\n Whether or not the underlying dataset contains examples with no answers.\n n_best_size (:obj:`int`, `optional`, defaults to 20):\n The total number of n-best predictions to generate when looking for an answer.\n max_answer_length (:obj:`int`, `optional`, defaults to 30):\n The maximum length of an answer that can be generated. This is needed because the start and end predictions\n are not conditioned on one another.\n start_n_top (:obj:`int`, `optional`, defaults to 5):\n The number of top start logits too keep when searching for the :obj:`n_best_size` predictions.\n end_n_top (:obj:`int`, `optional`, defaults to 5):\n The number of top end logits too keep when searching for the :obj:`n_best_size` predictions.\n output_dir (:obj:`str`, `optional`):\n If provided, the dictionaries of predictions, n_best predictions (with their scores and logits) and, if\n :obj:`version_2_with_negative=True`, the dictionary of the scores differences between best and null\n answers, are saved in `output_dir`.\n prefix (:obj:`str`, `optional`):\n If provided, the dictionaries mentioned above are saved with `prefix` added to their names.\n is_world_process_zero (:obj:`bool`, `optional`, defaults to :obj:`True`):\n Whether this process is the main process or not (used to determine if logging/saves should be done).\n \"\"\"\n assert len(predictions) == 5, \"`predictions` should be a tuple with five elements.\"\n start_top_log_probs, start_top_index, end_top_log_probs, end_top_index, cls_logits = predictions\n\n assert len(predictions[0]) == len(\n features\n ), f\"Got {len(predictions[0])} predicitions and {len(features)} features.\"\n\n # Build a map example to its corresponding features.\n example_id_to_index = {k: i for i, k in enumerate(examples[\"id\"])}\n features_per_example = collections.defaultdict(list)\n for i, feature in enumerate(features):\n features_per_example[example_id_to_index[feature[\"example_id\"]]].append(i)\n\n # The dictionaries we have to fill.\n all_predictions = collections.OrderedDict()\n all_nbest_json = collections.OrderedDict()\n scores_diff_json = collections.OrderedDict() if version_2_with_negative else None\n\n # Logging.\n logger.setLevel(logging.INFO if is_world_process_zero else logging.WARN)\n logger.info(f\"Post-processing {len(examples)} example predictions split into {len(features)} features.\")\n\n # Let's loop over all the examples!\n for example_index, example in enumerate(tqdm(examples)):\n # Those are the indices of the features associated to the current example.\n feature_indices = features_per_example[example_index]\n\n min_null_score = None\n prelim_predictions = []\n\n # Looping through all the features associated to the current example.\n for feature_index in feature_indices:\n # We grab the predictions of the model for this feature.\n start_log_prob = start_top_log_probs[feature_index]\n start_indexes = start_top_index[feature_index]\n end_log_prob = end_top_log_probs[feature_index]\n end_indexes = end_top_index[feature_index]\n feature_null_score = cls_logits[feature_index]\n # This is what will allow us to map some the positions in our logits to span of texts in the original\n # context.\n offset_mapping = features[feature_index][\"offset_mapping\"]\n # Optional `token_is_max_context`, if provided we will remove answers that do not have the maximum context\n # available in the current feature.\n token_is_max_context = features[feature_index].get(\"token_is_max_context\", None)\n\n # Update minimum null prediction\n if min_null_score is None or feature_null_score < min_null_score:\n min_null_score = feature_null_score\n\n # Go through all possibilities for the `n_start_top`/`n_end_top` greater start and end logits.\n for i in range(start_n_top):\n for j in range(end_n_top):\n start_index = int(start_indexes[i])\n j_index = i * end_n_top + j\n end_index = int(end_indexes[j_index])\n # Don't consider out-of-scope answers (last part of the test should be unnecessary because of the\n # p_mask but let's not take any risk)\n if (\n start_index >= len(offset_mapping)\n or end_index >= len(offset_mapping)\n or offset_mapping[start_index] is None\n or offset_mapping[end_index] is None\n ):\n continue\n # Don't consider answers with a length negative or > max_answer_length.\n if end_index < start_index or end_index - start_index + 1 > max_answer_length:\n continue\n # Don't consider answer that don't have the maximum context available (if such information is\n # provided).\n if token_is_max_context is not None and not token_is_max_context.get(str(start_index), False):\n continue\n prelim_predictions.append(\n {\n \"offsets\": (offset_mapping[start_index][0], offset_mapping[end_index][1]),\n \"score\": start_log_prob[i] + end_log_prob[j_index],\n \"start_log_prob\": start_log_prob[i],\n \"end_log_prob\": end_log_prob[j_index],\n }\n )\n\n # Only keep the best `n_best_size` predictions.\n predictions = sorted(prelim_predictions, key=lambda x: x[\"score\"], reverse=True)[:n_best_size]\n\n # Use the offsets to gather the answer text in the original context.\n context = example[\"context\"]\n for pred in predictions:\n offsets = pred.pop(\"offsets\")\n pred[\"text\"] = context[offsets[0] : offsets[1]]\n\n # In the very rare edge case we have not a single non-null prediction, we create a fake prediction to avoid\n # failure.\n if len(predictions) == 0:\n predictions.insert(0, {\"text\": \"\", \"start_logit\": -1e-6, \"end_logit\": -1e-6, \"score\": -2e-6})\n\n # Compute the softmax of all scores (we do it with numpy to stay independent from torch/tf in this file, using\n # the LogSumExp trick).\n scores = np.array([pred.pop(\"score\") for pred in predictions])\n exp_scores = np.exp(scores - np.max(scores))\n probs = exp_scores / exp_scores.sum()\n\n # Include the probabilities in our predictions.\n for prob, pred in zip(probs, predictions):\n pred[\"probability\"] = prob\n\n # Pick the best prediction and set the probability for the null answer.\n all_predictions[example[\"id\"]] = predictions[0][\"text\"]\n if version_2_with_negative:\n scores_diff_json[example[\"id\"]] = float(min_null_score)\n\n # Make `predictions` JSON-serializable by casting np.float back to float.\n all_nbest_json[example[\"id\"]] = [\n {k: (float(v) if isinstance(v, (np.float16, np.float32, np.float64)) else v) for k, v in pred.items()}\n for pred in predictions\n ]\n\n # If we have an output_dir, let's save all those dicts.\n if output_dir is not None:\n assert os.path.isdir(output_dir), f\"{output_dir} is not a directory.\"\n\n prediction_file = os.path.join(\n output_dir, \"predictions.json\" if prefix is None else f\"{prefix}_predictions.json\"\n )\n nbest_file = os.path.join(\n output_dir, \"nbest_predictions.json\" if prefix is None else f\"{prefix}_nbest_predictions.json\"\n )\n if version_2_with_negative:\n null_odds_file = os.path.join(\n output_dir, \"null_odds.json\" if prefix is None else f\"{prefix}_null_odds.json\"\n )\n\n print(f\"Saving predictions to {prediction_file}.\")\n with open(prediction_file, \"w\") as writer:\n writer.write(json.dumps(all_predictions, indent=4) + \"\\n\")\n print(f\"Saving nbest_preds to {nbest_file}.\")\n with open(nbest_file, \"w\") as writer:\n writer.write(json.dumps(all_nbest_json, indent=4) + \"\\n\")\n if version_2_with_negative:\n print(f\"Saving null_odds to {null_odds_file}.\")\n with open(null_odds_file, \"w\") as writer:\n writer.write(json.dumps(scores_diff_json, indent=4) + \"\\n\")\n\n return all_predictions, scores_diff_json"
] |
[
[
"numpy.argsort",
"numpy.max"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
travisyates81/object-detection
|
[
"931bebfa54798c08d2c401e9c1bad39015d8c832",
"931bebfa54798c08d2c401e9c1bad39015d8c832",
"931bebfa54798c08d2c401e9c1bad39015d8c832",
"931bebfa54798c08d2c401e9c1bad39015d8c832",
"931bebfa54798c08d2c401e9c1bad39015d8c832",
"931bebfa54798c08d2c401e9c1bad39015d8c832"
] |
[
"object_detection_app.py",
"object_detection/utils/learning_schedules_test.py",
"object_detection/utils/static_shape_test.py",
"object_detection/builders/preprocessor_builder_test.py",
"object_detection/box_coders/keypoint_box_coder.py",
"object_detection/core/batcher.py"
] |
[
"import os\nimport cv2\nimport time\nimport argparse\nimport multiprocessing\nimport numpy as np\nimport tensorflow as tf\n\nfrom utils.app_utils import FPS, WebcamVideoStream\nfrom multiprocessing import Queue, Pool\nfrom object_detection.utils import label_map_util\nfrom object_detection.utils import visualization_utils as vis_util\n\nCWD_PATH = os.getcwd()\n\n# Path to frozen detection graph. This is the actual model that is used for the object detection.\nMODEL_NAME = 'ssd_mobilenet_v1_coco_11_06_2017'\nPATH_TO_CKPT = os.path.join(CWD_PATH, 'object_detection', MODEL_NAME, 'frozen_inference_graph.pb')\n\n# List of the strings that is used to add correct label for each box.\nPATH_TO_LABELS = os.path.join(CWD_PATH, 'object_detection', 'data', 'mscoco_label_map.pbtxt')\n\nNUM_CLASSES = 90\n\n# Loading label map\nlabel_map = label_map_util.load_labelmap(PATH_TO_LABELS)\ncategories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=NUM_CLASSES,\n use_display_name=True)\ncategory_index = label_map_util.create_category_index(categories)\n\n\ndef detect_objects(image_np, sess, detection_graph):\n # Expand dimensions since the model expects images to have shape: [1, None, None, 3]\n image_np_expanded = np.expand_dims(image_np, axis=0)\n image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')\n\n # Each box represents a part of the image where a particular object was detected.\n boxes = detection_graph.get_tensor_by_name('detection_boxes:0')\n\n # Each score represent how level of confidence for each of the objects.\n # Score is shown on the result image, together with the class label.\n scores = detection_graph.get_tensor_by_name('detection_scores:0')\n classes = detection_graph.get_tensor_by_name('detection_classes:0')\n num_detections = detection_graph.get_tensor_by_name('num_detections:0')\n\n # Actual detection.\n (boxes, scores, classes, num_detections) = sess.run(\n [boxes, scores, classes, num_detections],\n feed_dict={image_tensor: image_np_expanded})\n\n # Visualization of the results of a detection.\n vis_util.visualize_boxes_and_labels_on_image_array(\n image_np,\n np.squeeze(boxes),\n np.squeeze(classes).astype(np.int32),\n np.squeeze(scores),\n category_index,\n use_normalized_coordinates=True,\n line_thickness=8)\n return image_np\n\n\ndef worker(input_q, output_q):\n # Load a (frozen) Tensorflow model into memory.\n detection_graph = tf.Graph()\n with detection_graph.as_default():\n od_graph_def = tf.GraphDef()\n with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:\n serialized_graph = fid.read()\n od_graph_def.ParseFromString(serialized_graph)\n tf.import_graph_def(od_graph_def, name='')\n\n sess = tf.Session(graph=detection_graph)\n\n fps = FPS().start()\n while True:\n fps.update()\n frame = input_q.get()\n frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n output_q.put(detect_objects(frame_rgb, sess, detection_graph))\n\n fps.stop()\n sess.close()\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('-src', '--source', dest='video_source', type=int,\n default=0, help='Device index of the camera.')\n parser.add_argument('-wd', '--width', dest='width', type=int,\n default=480, help='Width of the frames in the video stream.')\n parser.add_argument('-ht', '--height', dest='height', type=int,\n default=360, help='Height of the frames in the video stream.')\n parser.add_argument('-num-w', '--num-workers', dest='num_workers', type=int,\n default=2, help='Number of workers.')\n parser.add_argument('-q-size', '--queue-size', dest='queue_size', type=int,\n default=5, help='Size of the queue.')\n args = parser.parse_args()\n\n logger = multiprocessing.log_to_stderr()\n logger.setLevel(multiprocessing.SUBDEBUG)\n\n input_q = Queue(maxsize=args.queue_size)\n output_q = Queue(maxsize=args.queue_size)\n pool = Pool(args.num_workers, worker, (input_q, output_q))\n video_path = 'videoplayback.mp4'\n vidcap = cv2.VideoCapture(video_path)\n # video_capture = WebcamVideoStream(src=args.video_source,\n # width=args.width,\n # height=args.height).start()\n fps = FPS().start()\n success, img = vidcap.read()\n while success: # fps._numFrames < 120\n #frame = video_capture.read()\n success, frame = vidcap.read()\n input_q.put(frame)\n\n t = time.time()\n\n output_rgb = cv2.cvtColor(output_q.get(), cv2.COLOR_RGB2BGR)\n cv2.imshow('Video', output_rgb)\n fps.update()\n\n print('[INFO] elapsed time: {:.2f}'.format(time.time() - t))\n\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\n fps.stop()\n print('[INFO] elapsed time (total): {:.2f}'.format(fps.elapsed()))\n print('[INFO] approx. FPS: {:.2f}'.format(fps.fps()))\n\n pool.terminate()\n # video_capture.stop()\n cv2.destroyAllWindows()\n",
"# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Travis Yates\n\n\"\"\"Tests for object_detection.utils.learning_schedules.\"\"\"\nimport tensorflow as tf\n\nfrom object_detection.utils import learning_schedules\n\n\nclass LearningSchedulesTest(tf.test.TestCase):\n\n def testExponentialDecayWithBurnin(self):\n global_step = tf.placeholder(tf.int32, [])\n learning_rate_base = 1.0\n learning_rate_decay_steps = 3\n learning_rate_decay_factor = .1\n burnin_learning_rate = .5\n burnin_steps = 2\n exp_rates = [.5, .5, 1, .1, .1, .1, .01, .01]\n learning_rate = learning_schedules.exponential_decay_with_burnin(\n global_step, learning_rate_base, learning_rate_decay_steps,\n learning_rate_decay_factor, burnin_learning_rate, burnin_steps)\n with self.test_session() as sess:\n output_rates = []\n for input_global_step in range(8):\n output_rate = sess.run(learning_rate,\n feed_dict={global_step: input_global_step})\n output_rates.append(output_rate)\n self.assertAllClose(output_rates, exp_rates)\n\n def testManualStepping(self):\n global_step = tf.placeholder(tf.int64, [])\n boundaries = [2, 3, 7]\n rates = [1.0, 2.0, 3.0, 4.0]\n exp_rates = [1.0, 1.0, 2.0, 3.0, 3.0, 3.0, 3.0, 4.0, 4.0, 4.0]\n learning_rate = learning_schedules.manual_stepping(global_step, boundaries,\n rates)\n with self.test_session() as sess:\n output_rates = []\n for input_global_step in range(10):\n output_rate = sess.run(learning_rate,\n feed_dict={global_step: input_global_step})\n output_rates.append(output_rate)\n self.assertAllClose(output_rates, exp_rates)\n\nif __name__ == '__main__':\n tf.test.main()\n",
"# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Travis Yates\n\n\"\"\"Tests for object_detection.utils.static_shape.\"\"\"\n\nimport tensorflow as tf\n\nfrom object_detection.utils import static_shape\n\n\nclass StaticShapeTest(tf.test.TestCase):\n\n def test_return_correct_batchSize(self):\n tensor_shape = tf.TensorShape(dims=[32, 299, 384, 3])\n self.assertEqual(32, static_shape.get_batch_size(tensor_shape))\n\n def test_return_correct_height(self):\n tensor_shape = tf.TensorShape(dims=[32, 299, 384, 3])\n self.assertEqual(299, static_shape.get_height(tensor_shape))\n\n def test_return_correct_width(self):\n tensor_shape = tf.TensorShape(dims=[32, 299, 384, 3])\n self.assertEqual(384, static_shape.get_width(tensor_shape))\n\n def test_return_correct_depth(self):\n tensor_shape = tf.TensorShape(dims=[32, 299, 384, 3])\n self.assertEqual(3, static_shape.get_depth(tensor_shape))\n\n def test_die_on_tensor_shape_with_rank_three(self):\n tensor_shape = tf.TensorShape(dims=[32, 299, 384])\n with self.assertRaises(ValueError):\n static_shape.get_batch_size(tensor_shape)\n static_shape.get_height(tensor_shape)\n static_shape.get_width(tensor_shape)\n static_shape.get_depth(tensor_shape)\n\nif __name__ == '__main__':\n tf.test.main()\n",
"# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Travis Yates\n\n\"\"\"Tests for preprocessor_builder.\"\"\"\n\nimport tensorflow as tf\n\nfrom google.protobuf import text_format\n\nfrom object_detection.builders import preprocessor_builder\nfrom object_detection.core import preprocessor\nfrom object_detection.protos import preprocessor_pb2\n\n\nclass PreprocessorBuilderTest(tf.test.TestCase):\n\n def assert_dictionary_close(self, dict1, dict2):\n \"\"\"Helper to check if two dicts with floatst or integers are close.\"\"\"\n self.assertEqual(sorted(dict1.keys()), sorted(dict2.keys()))\n for key in dict1:\n value = dict1[key]\n if isinstance(value, float):\n self.assertAlmostEqual(value, dict2[key])\n else:\n self.assertEqual(value, dict2[key])\n\n def test_build_normalize_image(self):\n preprocessor_text_proto = \"\"\"\n normalize_image {\n original_minval: 0.0\n original_maxval: 255.0\n target_minval: -1.0\n target_maxval: 1.0\n }\n \"\"\"\n preprocessor_proto = preprocessor_pb2.PreprocessingStep()\n text_format.Merge(preprocessor_text_proto, preprocessor_proto)\n function, args = preprocessor_builder.build(preprocessor_proto)\n self.assertEqual(function, preprocessor.normalize_image)\n self.assertEqual(args, {\n 'original_minval': 0.0,\n 'original_maxval': 255.0,\n 'target_minval': -1.0,\n 'target_maxval': 1.0,\n })\n\n def test_build_random_horizontal_flip(self):\n preprocessor_text_proto = \"\"\"\n random_horizontal_flip {\n }\n \"\"\"\n preprocessor_proto = preprocessor_pb2.PreprocessingStep()\n text_format.Merge(preprocessor_text_proto, preprocessor_proto)\n function, args = preprocessor_builder.build(preprocessor_proto)\n self.assertEqual(function, preprocessor.random_horizontal_flip)\n self.assertEqual(args, {})\n\n def test_build_random_pixel_value_scale(self):\n preprocessor_text_proto = \"\"\"\n random_pixel_value_scale {\n minval: 0.8\n maxval: 1.2\n }\n \"\"\"\n preprocessor_proto = preprocessor_pb2.PreprocessingStep()\n text_format.Merge(preprocessor_text_proto, preprocessor_proto)\n function, args = preprocessor_builder.build(preprocessor_proto)\n self.assertEqual(function, preprocessor.random_pixel_value_scale)\n self.assert_dictionary_close(args, {'minval': 0.8, 'maxval': 1.2})\n\n def test_build_random_image_scale(self):\n preprocessor_text_proto = \"\"\"\n random_image_scale {\n min_scale_ratio: 0.8\n max_scale_ratio: 2.2\n }\n \"\"\"\n preprocessor_proto = preprocessor_pb2.PreprocessingStep()\n text_format.Merge(preprocessor_text_proto, preprocessor_proto)\n function, args = preprocessor_builder.build(preprocessor_proto)\n self.assertEqual(function, preprocessor.random_image_scale)\n self.assert_dictionary_close(args, {'min_scale_ratio': 0.8,\n 'max_scale_ratio': 2.2})\n\n def test_build_random_rgb_to_gray(self):\n preprocessor_text_proto = \"\"\"\n random_rgb_to_gray {\n probability: 0.8\n }\n \"\"\"\n preprocessor_proto = preprocessor_pb2.PreprocessingStep()\n text_format.Merge(preprocessor_text_proto, preprocessor_proto)\n function, args = preprocessor_builder.build(preprocessor_proto)\n self.assertEqual(function, preprocessor.random_rgb_to_gray)\n self.assert_dictionary_close(args, {'probability': 0.8})\n\n def test_build_random_adjust_brightness(self):\n preprocessor_text_proto = \"\"\"\n random_adjust_brightness {\n max_delta: 0.2\n }\n \"\"\"\n preprocessor_proto = preprocessor_pb2.PreprocessingStep()\n text_format.Merge(preprocessor_text_proto, preprocessor_proto)\n function, args = preprocessor_builder.build(preprocessor_proto)\n self.assertEqual(function, preprocessor.random_adjust_brightness)\n self.assert_dictionary_close(args, {'max_delta': 0.2})\n\n def test_build_random_adjust_contrast(self):\n preprocessor_text_proto = \"\"\"\n random_adjust_contrast {\n min_delta: 0.7\n max_delta: 1.1\n }\n \"\"\"\n preprocessor_proto = preprocessor_pb2.PreprocessingStep()\n text_format.Merge(preprocessor_text_proto, preprocessor_proto)\n function, args = preprocessor_builder.build(preprocessor_proto)\n self.assertEqual(function, preprocessor.random_adjust_contrast)\n self.assert_dictionary_close(args, {'min_delta': 0.7, 'max_delta': 1.1})\n\n def test_build_random_adjust_hue(self):\n preprocessor_text_proto = \"\"\"\n random_adjust_hue {\n max_delta: 0.01\n }\n \"\"\"\n preprocessor_proto = preprocessor_pb2.PreprocessingStep()\n text_format.Merge(preprocessor_text_proto, preprocessor_proto)\n function, args = preprocessor_builder.build(preprocessor_proto)\n self.assertEqual(function, preprocessor.random_adjust_hue)\n self.assert_dictionary_close(args, {'max_delta': 0.01})\n\n def test_build_random_adjust_saturation(self):\n preprocessor_text_proto = \"\"\"\n random_adjust_saturation {\n min_delta: 0.75\n max_delta: 1.15\n }\n \"\"\"\n preprocessor_proto = preprocessor_pb2.PreprocessingStep()\n text_format.Merge(preprocessor_text_proto, preprocessor_proto)\n function, args = preprocessor_builder.build(preprocessor_proto)\n self.assertEqual(function, preprocessor.random_adjust_saturation)\n self.assert_dictionary_close(args, {'min_delta': 0.75, 'max_delta': 1.15})\n\n def test_build_random_distort_color(self):\n preprocessor_text_proto = \"\"\"\n random_distort_color {\n color_ordering: 1\n }\n \"\"\"\n preprocessor_proto = preprocessor_pb2.PreprocessingStep()\n text_format.Merge(preprocessor_text_proto, preprocessor_proto)\n function, args = preprocessor_builder.build(preprocessor_proto)\n self.assertEqual(function, preprocessor.random_distort_color)\n self.assertEqual(args, {'color_ordering': 1})\n\n def test_build_random_jitter_boxes(self):\n preprocessor_text_proto = \"\"\"\n random_jitter_boxes {\n ratio: 0.1\n }\n \"\"\"\n preprocessor_proto = preprocessor_pb2.PreprocessingStep()\n text_format.Merge(preprocessor_text_proto, preprocessor_proto)\n function, args = preprocessor_builder.build(preprocessor_proto)\n self.assertEqual(function, preprocessor.random_jitter_boxes)\n self.assert_dictionary_close(args, {'ratio': 0.1})\n\n def test_build_random_crop_image(self):\n preprocessor_text_proto = \"\"\"\n random_crop_image {\n min_object_covered: 0.75\n min_aspect_ratio: 0.75\n max_aspect_ratio: 1.5\n min_area: 0.25\n max_area: 0.875\n overlap_thresh: 0.5\n random_coef: 0.125\n }\n \"\"\"\n preprocessor_proto = preprocessor_pb2.PreprocessingStep()\n text_format.Merge(preprocessor_text_proto, preprocessor_proto)\n function, args = preprocessor_builder.build(preprocessor_proto)\n self.assertEqual(function, preprocessor.random_crop_image)\n self.assertEqual(args, {\n 'min_object_covered': 0.75,\n 'aspect_ratio_range': (0.75, 1.5),\n 'area_range': (0.25, 0.875),\n 'overlap_thresh': 0.5,\n 'random_coef': 0.125,\n })\n\n def test_build_random_pad_image(self):\n preprocessor_text_proto = \"\"\"\n random_pad_image {\n }\n \"\"\"\n preprocessor_proto = preprocessor_pb2.PreprocessingStep()\n text_format.Merge(preprocessor_text_proto, preprocessor_proto)\n function, args = preprocessor_builder.build(preprocessor_proto)\n self.assertEqual(function, preprocessor.random_pad_image)\n self.assertEqual(args, {\n 'min_image_size': None,\n 'max_image_size': None,\n 'pad_color': None,\n })\n\n def test_build_random_crop_pad_image(self):\n preprocessor_text_proto = \"\"\"\n random_crop_pad_image {\n min_object_covered: 0.75\n min_aspect_ratio: 0.75\n max_aspect_ratio: 1.5\n min_area: 0.25\n max_area: 0.875\n overlap_thresh: 0.5\n random_coef: 0.125\n }\n \"\"\"\n preprocessor_proto = preprocessor_pb2.PreprocessingStep()\n text_format.Merge(preprocessor_text_proto, preprocessor_proto)\n function, args = preprocessor_builder.build(preprocessor_proto)\n self.assertEqual(function, preprocessor.random_crop_pad_image)\n self.assertEqual(args, {\n 'min_object_covered': 0.75,\n 'aspect_ratio_range': (0.75, 1.5),\n 'area_range': (0.25, 0.875),\n 'overlap_thresh': 0.5,\n 'random_coef': 0.125,\n 'min_padded_size_ratio': None,\n 'max_padded_size_ratio': None,\n 'pad_color': None,\n })\n\n def test_build_random_crop_to_aspect_ratio(self):\n preprocessor_text_proto = \"\"\"\n random_crop_to_aspect_ratio {\n aspect_ratio: 0.85\n overlap_thresh: 0.35\n }\n \"\"\"\n preprocessor_proto = preprocessor_pb2.PreprocessingStep()\n text_format.Merge(preprocessor_text_proto, preprocessor_proto)\n function, args = preprocessor_builder.build(preprocessor_proto)\n self.assertEqual(function, preprocessor.random_crop_to_aspect_ratio)\n self.assert_dictionary_close(args, {'aspect_ratio': 0.85,\n 'overlap_thresh': 0.35})\n\n def test_build_random_black_patches(self):\n preprocessor_text_proto = \"\"\"\n random_black_patches {\n max_black_patches: 20\n probability: 0.95\n size_to_image_ratio: 0.12\n }\n \"\"\"\n preprocessor_proto = preprocessor_pb2.PreprocessingStep()\n text_format.Merge(preprocessor_text_proto, preprocessor_proto)\n function, args = preprocessor_builder.build(preprocessor_proto)\n self.assertEqual(function, preprocessor.random_black_patches)\n self.assert_dictionary_close(args, {'max_black_patches': 20,\n 'probability': 0.95,\n 'size_to_image_ratio': 0.12})\n\n def test_build_random_resize_method(self):\n preprocessor_text_proto = \"\"\"\n random_resize_method {\n target_height: 75\n target_width: 100\n }\n \"\"\"\n preprocessor_proto = preprocessor_pb2.PreprocessingStep()\n text_format.Merge(preprocessor_text_proto, preprocessor_proto)\n function, args = preprocessor_builder.build(preprocessor_proto)\n self.assertEqual(function, preprocessor.random_resize_method)\n self.assert_dictionary_close(args, {'target_size': [75, 100]})\n\n def test_build_scale_boxes_to_pixel_coordinates(self):\n preprocessor_text_proto = \"\"\"\n scale_boxes_to_pixel_coordinates {}\n \"\"\"\n preprocessor_proto = preprocessor_pb2.PreprocessingStep()\n text_format.Merge(preprocessor_text_proto, preprocessor_proto)\n function, args = preprocessor_builder.build(preprocessor_proto)\n self.assertEqual(function, preprocessor.scale_boxes_to_pixel_coordinates)\n self.assertEqual(args, {})\n\n def test_build_resize_image(self):\n preprocessor_text_proto = \"\"\"\n resize_image {\n new_height: 75\n new_width: 100\n method: BICUBIC\n }\n \"\"\"\n preprocessor_proto = preprocessor_pb2.PreprocessingStep()\n text_format.Merge(preprocessor_text_proto, preprocessor_proto)\n function, args = preprocessor_builder.build(preprocessor_proto)\n self.assertEqual(function, preprocessor.resize_image)\n self.assertEqual(args, {'new_height': 75,\n 'new_width': 100,\n 'method': tf.image.ResizeMethod.BICUBIC})\n\n def test_build_subtract_channel_mean(self):\n preprocessor_text_proto = \"\"\"\n subtract_channel_mean {\n means: [1.0, 2.0, 3.0]\n }\n \"\"\"\n preprocessor_proto = preprocessor_pb2.PreprocessingStep()\n text_format.Merge(preprocessor_text_proto, preprocessor_proto)\n function, args = preprocessor_builder.build(preprocessor_proto)\n self.assertEqual(function, preprocessor.subtract_channel_mean)\n self.assertEqual(args, {'means': [1.0, 2.0, 3.0]})\n\n def test_build_ssd_random_crop(self):\n preprocessor_text_proto = \"\"\"\n ssd_random_crop {\n operations {\n min_object_covered: 0.0\n min_aspect_ratio: 0.875\n max_aspect_ratio: 1.125\n min_area: 0.5\n max_area: 1.0\n overlap_thresh: 0.0\n random_coef: 0.375\n }\n operations {\n min_object_covered: 0.25\n min_aspect_ratio: 0.75\n max_aspect_ratio: 1.5\n min_area: 0.5\n max_area: 1.0\n overlap_thresh: 0.25\n random_coef: 0.375\n }\n }\n \"\"\"\n preprocessor_proto = preprocessor_pb2.PreprocessingStep()\n text_format.Merge(preprocessor_text_proto, preprocessor_proto)\n function, args = preprocessor_builder.build(preprocessor_proto)\n self.assertEqual(function, preprocessor.ssd_random_crop)\n self.assertEqual(args, {'min_object_covered': [0.0, 0.25],\n 'aspect_ratio_range': [(0.875, 1.125), (0.75, 1.5)],\n 'area_range': [(0.5, 1.0), (0.5, 1.0)],\n 'overlap_thresh': [0.0, 0.25],\n 'random_coef': [0.375, 0.375]})\n\n def test_build_ssd_random_crop_empty_operations(self):\n preprocessor_text_proto = \"\"\"\n ssd_random_crop {\n }\n \"\"\"\n preprocessor_proto = preprocessor_pb2.PreprocessingStep()\n text_format.Merge(preprocessor_text_proto, preprocessor_proto)\n function, args = preprocessor_builder.build(preprocessor_proto)\n self.assertEqual(function, preprocessor.ssd_random_crop)\n self.assertEqual(args, {})\n\n def test_build_ssd_random_crop_pad(self):\n preprocessor_text_proto = \"\"\"\n ssd_random_crop_pad {\n operations {\n min_object_covered: 0.0\n min_aspect_ratio: 0.875\n max_aspect_ratio: 1.125\n min_area: 0.5\n max_area: 1.0\n overlap_thresh: 0.0\n random_coef: 0.375\n min_padded_size_ratio: [0.0, 0.0]\n max_padded_size_ratio: [2.0, 2.0]\n pad_color_r: 0.5\n pad_color_g: 0.5\n pad_color_b: 0.5\n }\n operations {\n min_object_covered: 0.25\n min_aspect_ratio: 0.75\n max_aspect_ratio: 1.5\n min_area: 0.5\n max_area: 1.0\n overlap_thresh: 0.25\n random_coef: 0.375\n min_padded_size_ratio: [0.0, 0.0]\n max_padded_size_ratio: [2.0, 2.0]\n pad_color_r: 0.5\n pad_color_g: 0.5\n pad_color_b: 0.5\n }\n }\n \"\"\"\n preprocessor_proto = preprocessor_pb2.PreprocessingStep()\n text_format.Merge(preprocessor_text_proto, preprocessor_proto)\n function, args = preprocessor_builder.build(preprocessor_proto)\n self.assertEqual(function, preprocessor.ssd_random_crop_pad)\n self.assertEqual(args, {'min_object_covered': [0.0, 0.25],\n 'aspect_ratio_range': [(0.875, 1.125), (0.75, 1.5)],\n 'area_range': [(0.5, 1.0), (0.5, 1.0)],\n 'overlap_thresh': [0.0, 0.25],\n 'random_coef': [0.375, 0.375],\n 'min_padded_size_ratio': [(0.0, 0.0), (0.0, 0.0)],\n 'max_padded_size_ratio': [(2.0, 2.0), (2.0, 2.0)],\n 'pad_color': [(0.5, 0.5, 0.5), (0.5, 0.5, 0.5)]})\n\n def test_build_ssd_random_crop_fixed_aspect_ratio(self):\n preprocessor_text_proto = \"\"\"\n ssd_random_crop_fixed_aspect_ratio {\n operations {\n min_object_covered: 0.0\n min_area: 0.5\n max_area: 1.0\n overlap_thresh: 0.0\n random_coef: 0.375\n }\n operations {\n min_object_covered: 0.25\n min_area: 0.5\n max_area: 1.0\n overlap_thresh: 0.25\n random_coef: 0.375\n }\n aspect_ratio: 0.875\n }\n \"\"\"\n preprocessor_proto = preprocessor_pb2.PreprocessingStep()\n text_format.Merge(preprocessor_text_proto, preprocessor_proto)\n function, args = preprocessor_builder.build(preprocessor_proto)\n self.assertEqual(function, preprocessor.ssd_random_crop_fixed_aspect_ratio)\n self.assertEqual(args, {'min_object_covered': [0.0, 0.25],\n 'aspect_ratio': 0.875,\n 'area_range': [(0.5, 1.0), (0.5, 1.0)],\n 'overlap_thresh': [0.0, 0.25],\n 'random_coef': [0.375, 0.375]})\n\n\nif __name__ == '__main__':\n tf.test.main()\n",
"# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n\n# Travis Yates\n\"\"\"Keypoint box coder.\n\nThe keypoint box coder follows the coding schema described below (this is\nsimilar to the FasterRcnnBoxCoder, except that it encodes keypoints in addition\nto box coordinates):\n ty = (y - ya) / ha\n tx = (x - xa) / wa\n th = log(h / ha)\n tw = log(w / wa)\n tky0 = (ky0 - ya) / ha\n tkx0 = (kx0 - xa) / ha\n tky1 = (ky1 - ya) / ha\n tkx1 = (kx1 - xa) / ha\n ...\n where x, y, w, h denote the box's center coordinates, width and height\n respectively. Similarly, xa, ya, wa, ha denote the anchor's center\n coordinates, width and height. tx, ty, tw and th denote the anchor-encoded\n center, width and height respectively. ky0, kx0, ky1, kx1, ... denote the\n keypoints' coordinates, and tky0, tkx0, tky1, tkx1, ... denote the\n anchor-encoded keypoint coordinates.\n\"\"\"\n\nimport tensorflow as tf\n\nfrom object_detection.core import box_coder\nfrom object_detection.core import box_list\nfrom object_detection.core import standard_fields as fields\n\nEPSILON = 1e-8\n\n\nclass KeypointBoxCoder(box_coder.BoxCoder):\n \"\"\"Keypoint box coder.\"\"\"\n\n def __init__(self, num_keypoints, scale_factors=None):\n \"\"\"Constructor for KeypointBoxCoder.\n\n Args:\n num_keypoints: Number of keypoints to encode/decode.\n scale_factors: List of 4 positive scalars to scale ty, tx, th and tw.\n In addition to scaling ty and tx, the first 2 scalars are used to scale\n the y and x coordinates of the keypoints as well. If set to None, does\n not perform scaling.\n \"\"\"\n self._num_keypoints = num_keypoints\n\n if scale_factors:\n assert len(scale_factors) == 4\n for scalar in scale_factors:\n assert scalar > 0\n self._scale_factors = scale_factors\n self._keypoint_scale_factors = None\n if scale_factors is not None:\n self._keypoint_scale_factors = tf.expand_dims(tf.tile(\n [tf.to_float(scale_factors[0]), tf.to_float(scale_factors[1])],\n [num_keypoints]), 1)\n\n @property\n def code_size(self):\n return 4 + self._num_keypoints * 2\n\n def _encode(self, boxes, anchors):\n \"\"\"Encode a box and keypoint collection with respect to anchor collection.\n\n Args:\n boxes: BoxList holding N boxes and keypoints to be encoded. Boxes are\n tensors with the shape [N, 4], and keypoints are tensors with the shape\n [N, num_keypoints, 2].\n anchors: BoxList of anchors.\n\n Returns:\n a tensor representing N anchor-encoded boxes of the format\n [ty, tx, th, tw, tky0, tkx0, tky1, tkx1, ...] where tky0 and tkx0\n represent the y and x coordinates of the first keypoint, tky1 and tkx1\n represent the y and x coordinates of the second keypoint, and so on.\n \"\"\"\n # Convert anchors to the center coordinate representation.\n ycenter_a, xcenter_a, ha, wa = anchors.get_center_coordinates_and_sizes()\n ycenter, xcenter, h, w = boxes.get_center_coordinates_and_sizes()\n keypoints = boxes.get_field(fields.BoxListFields.keypoints)\n keypoints = tf.transpose(tf.reshape(keypoints,\n [-1, self._num_keypoints * 2]))\n num_boxes = boxes.num_boxes()\n\n # Avoid NaN in division and log below.\n ha += EPSILON\n wa += EPSILON\n h += EPSILON\n w += EPSILON\n\n tx = (xcenter - xcenter_a) / wa\n ty = (ycenter - ycenter_a) / ha\n tw = tf.log(w / wa)\n th = tf.log(h / ha)\n\n tiled_anchor_centers = tf.tile(\n tf.stack([ycenter_a, xcenter_a]), [self._num_keypoints, 1])\n tiled_anchor_sizes = tf.tile(\n tf.stack([ha, wa]), [self._num_keypoints, 1])\n tkeypoints = (keypoints - tiled_anchor_centers) / tiled_anchor_sizes\n\n # Scales location targets as used in paper for joint training.\n if self._scale_factors:\n ty *= self._scale_factors[0]\n tx *= self._scale_factors[1]\n th *= self._scale_factors[2]\n tw *= self._scale_factors[3]\n tkeypoints *= tf.tile(self._keypoint_scale_factors, [1, num_boxes])\n\n tboxes = tf.stack([ty, tx, th, tw])\n return tf.transpose(tf.concat([tboxes, tkeypoints], 0))\n\n def _decode(self, rel_codes, anchors):\n \"\"\"Decode relative codes to boxes and keypoints.\n\n Args:\n rel_codes: a tensor with shape [N, 4 + 2 * num_keypoints] representing N\n anchor-encoded boxes and keypoints\n anchors: BoxList of anchors.\n\n Returns:\n boxes: BoxList holding N bounding boxes and keypoints.\n \"\"\"\n ycenter_a, xcenter_a, ha, wa = anchors.get_center_coordinates_and_sizes()\n\n num_codes = tf.shape(rel_codes)[0]\n result = tf.unstack(tf.transpose(rel_codes))\n ty, tx, th, tw = result[:4]\n tkeypoints = result[4:]\n if self._scale_factors:\n ty /= self._scale_factors[0]\n tx /= self._scale_factors[1]\n th /= self._scale_factors[2]\n tw /= self._scale_factors[3]\n tkeypoints /= tf.tile(self._keypoint_scale_factors, [1, num_codes])\n\n w = tf.exp(tw) * wa\n h = tf.exp(th) * ha\n ycenter = ty * ha + ycenter_a\n xcenter = tx * wa + xcenter_a\n ymin = ycenter - h / 2.\n xmin = xcenter - w / 2.\n ymax = ycenter + h / 2.\n xmax = xcenter + w / 2.\n decoded_boxes_keypoints = box_list.BoxList(\n tf.transpose(tf.stack([ymin, xmin, ymax, xmax])))\n\n tiled_anchor_centers = tf.tile(\n tf.stack([ycenter_a, xcenter_a]), [self._num_keypoints, 1])\n tiled_anchor_sizes = tf.tile(\n tf.stack([ha, wa]), [self._num_keypoints, 1])\n keypoints = tkeypoints * tiled_anchor_sizes + tiled_anchor_centers\n keypoints = tf.reshape(tf.transpose(keypoints),\n [-1, self._num_keypoints, 2])\n decoded_boxes_keypoints.add_field(fields.BoxListFields.keypoints, keypoints)\n return decoded_boxes_keypoints\n",
"# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Travis Yates\n\n\"\"\"Provides functions to batch a dictionary of input tensors.\"\"\"\nimport collections\n\nimport tensorflow as tf\n\nfrom object_detection.core import prefetcher\n\n\nclass BatchQueue(object):\n \"\"\"BatchQueue class.\n\n This class creates a batch queue to asynchronously enqueue tensors_dict.\n It also adds a FIFO prefetcher so that the batches are readily available\n for the consumers. Dequeue ops for a BatchQueue object can be created via\n the Dequeue method which evaluates to a batch of tensor_dict.\n\n Example input pipeline with batching:\n ------------------------------------\n key, string_tensor = slim.parallel_reader.parallel_read(...)\n tensor_dict = decoder.decode(string_tensor)\n tensor_dict = preprocessor.preprocess(tensor_dict, ...)\n batch_queue = batcher.BatchQueue(tensor_dict,\n batch_size=32,\n batch_queue_capacity=2000,\n num_batch_queue_threads=8,\n prefetch_queue_capacity=20)\n tensor_dict = batch_queue.dequeue()\n outputs = Model(tensor_dict)\n ...\n -----------------------------------\n\n Notes:\n -----\n This class batches tensors of unequal sizes by zero padding and unpadding\n them after generating a batch. This can be computationally expensive when\n batching tensors (such as images) that are of vastly different sizes. So it is\n recommended that the shapes of such tensors be fully defined in tensor_dict\n while other lightweight tensors such as bounding box corners and class labels\n can be of varying sizes. Use either crop or resize operations to fully define\n the shape of an image in tensor_dict.\n\n It is also recommended to perform any preprocessing operations on tensors\n before passing to BatchQueue and subsequently calling the Dequeue method.\n\n Another caveat is that this class does not read the last batch if it is not\n full. The current implementation makes it hard to support that use case. So,\n for evaluation, when it is critical to run all the examples through your\n network use the input pipeline example mentioned in core/prefetcher.py.\n \"\"\"\n\n def __init__(self, tensor_dict, batch_size, batch_queue_capacity,\n num_batch_queue_threads, prefetch_queue_capacity):\n \"\"\"Constructs a batch queue holding tensor_dict.\n\n Args:\n tensor_dict: dictionary of tensors to batch.\n batch_size: batch size.\n batch_queue_capacity: max capacity of the queue from which the tensors are\n batched.\n num_batch_queue_threads: number of threads to use for batching.\n prefetch_queue_capacity: max capacity of the queue used to prefetch\n assembled batches.\n \"\"\"\n # Remember static shapes to set shapes of batched tensors.\n static_shapes = collections.OrderedDict(\n {key: tensor.get_shape() for key, tensor in tensor_dict.iteritems()})\n # Remember runtime shapes to unpad tensors after batching.\n runtime_shapes = collections.OrderedDict(\n {(key, 'runtime_shapes'): tf.shape(tensor)\n for key, tensor in tensor_dict.iteritems()})\n all_tensors = tensor_dict\n all_tensors.update(runtime_shapes)\n batched_tensors = tf.train.batch(\n all_tensors,\n capacity=batch_queue_capacity,\n batch_size=batch_size,\n dynamic_pad=True,\n num_threads=num_batch_queue_threads)\n\n self._queue = prefetcher.prefetch(batched_tensors,\n prefetch_queue_capacity)\n self._static_shapes = static_shapes\n self._batch_size = batch_size\n\n def dequeue(self):\n \"\"\"Dequeues a batch of tensor_dict from the BatchQueue.\n\n TODO: use allow_smaller_final_batch to allow running over the whole eval set\n\n Returns:\n A list of tensor_dicts of the requested batch_size.\n \"\"\"\n batched_tensors = self._queue.dequeue()\n # Separate input tensors from tensors containing their runtime shapes.\n tensors = {}\n shapes = {}\n for key, batched_tensor in batched_tensors.iteritems():\n unbatched_tensor_list = tf.unstack(batched_tensor)\n for i, unbatched_tensor in enumerate(unbatched_tensor_list):\n if isinstance(key, tuple) and key[1] == 'runtime_shapes':\n shapes[(key[0], i)] = unbatched_tensor\n else:\n tensors[(key, i)] = unbatched_tensor\n\n # Undo that padding using shapes and create a list of size `batch_size` that\n # contains tensor dictionaries.\n tensor_dict_list = []\n batch_size = self._batch_size\n for batch_id in range(batch_size):\n tensor_dict = {}\n for key in self._static_shapes:\n tensor_dict[key] = tf.slice(tensors[(key, batch_id)],\n tf.zeros_like(shapes[(key, batch_id)]),\n shapes[(key, batch_id)])\n tensor_dict[key].set_shape(self._static_shapes[key])\n tensor_dict_list.append(tensor_dict)\n\n return tensor_dict_list\n"
] |
[
[
"tensorflow.Graph",
"numpy.expand_dims",
"tensorflow.import_graph_def",
"tensorflow.gfile.GFile",
"numpy.squeeze",
"tensorflow.Session",
"tensorflow.GraphDef"
],
[
"tensorflow.placeholder",
"tensorflow.test.main"
],
[
"tensorflow.TensorShape",
"tensorflow.test.main"
],
[
"tensorflow.test.main"
],
[
"tensorflow.concat",
"tensorflow.transpose",
"tensorflow.shape",
"tensorflow.stack",
"tensorflow.reshape",
"tensorflow.exp",
"tensorflow.log",
"tensorflow.to_float",
"tensorflow.tile"
],
[
"tensorflow.zeros_like",
"tensorflow.train.batch",
"tensorflow.unstack",
"tensorflow.shape"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
guoshengCS/PaddleNLP
|
[
"8c6d4fd7b926577a23a91dc977281758760c3c22",
"8c6d4fd7b926577a23a91dc977281758760c3c22"
] |
[
"examples/distill/distill_lstm/utils.py",
"benchmark/bert/run_pretrain_single.py"
] |
[
"# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport jieba\n\nimport numpy as np\n\n\ndef convert_small_example(example,\n task_name,\n vocab,\n is_tokenized=False,\n max_seq_length=128,\n is_test=False):\n input_ids = []\n if task_name == 'senta':\n for i, token in enumerate(jieba.cut(example[0])):\n if i == max_seq_length:\n break\n token_id = vocab[token]\n input_ids.append(token_id)\n else:\n if is_tokenized:\n tokens = example[0][:max_seq_length]\n else:\n tokens = vocab(example[0])[:max_seq_length]\n input_ids = vocab.convert_tokens_to_ids(tokens)\n\n valid_length = np.array(len(input_ids), dtype='int64')\n\n if not is_test:\n label = np.array(example[-1], dtype=\"int64\")\n return input_ids, valid_length, label\n else:\n return input_ids, valid_length\n\n\ndef convert_pair_example(example,\n task_name,\n vocab,\n is_tokenized=True,\n max_seq_length=128,\n is_test=False):\n is_tokenized &= (task_name != 'senta')\n seq1 = convert_small_example([example[0], example[2]], task_name, vocab,\n is_tokenized, max_seq_length, is_test)[:2]\n\n seq2 = convert_small_example([example[1], example[2]], task_name, vocab,\n is_tokenized, max_seq_length, is_test)\n pair_features = seq1 + seq2\n\n return pair_features\n\n\ndef convert_two_example(example,\n task_name,\n tokenizer,\n label_list,\n max_seq_length,\n vocab,\n is_tokenized=True,\n is_test=False):\n is_tokenized &= (task_name != 'senta')\n bert_features = convert_example(\n example,\n tokenizer=tokenizer,\n label_list=label_list,\n is_tokenized=is_tokenized,\n max_seq_length=max_seq_length,\n is_test=is_test)\n if task_name == 'qqp':\n small_features = convert_pair_example(\n example, task_name, vocab, is_tokenized, max_seq_length, is_test)\n else:\n small_features = convert_small_example(\n example, task_name, vocab, is_tokenized, max_seq_length, is_test)\n\n return bert_features[:2] + small_features\n\n\ndef convert_example(example,\n tokenizer,\n label_list,\n is_tokenized=False,\n max_seq_length=512,\n is_test=False):\n \"\"\"convert a glue example into necessary features\"\"\"\n\n def _truncate_seqs(seqs, max_seq_length):\n if len(seqs) == 1: # single sentence\n # Account for [CLS] and [SEP] with \"- 2\"\n seqs[0] = seqs[0][0:(max_seq_length - 2)]\n else: # Sentence pair\n # Account for [CLS], [SEP], [SEP] with \"- 3\"\n tokens_a, tokens_b = seqs\n max_seq_length -= 3\n while True: # Truncate with longest_first strategy\n total_length = len(tokens_a) + len(tokens_b)\n if total_length <= max_seq_length:\n break\n if len(tokens_a) > len(tokens_b):\n tokens_a.pop()\n else:\n tokens_b.pop()\n return seqs\n\n def _concat_seqs(seqs, separators, seq_mask=0, separator_mask=1):\n concat = sum((seq + sep for sep, seq in zip(separators, seqs)), [])\n segment_ids = sum(\n ([i] * (len(seq) + len(sep))\n for i, (sep, seq) in enumerate(zip(separators, seqs))), [])\n if isinstance(seq_mask, int):\n seq_mask = [[seq_mask] * len(seq) for seq in seqs]\n if isinstance(separator_mask, int):\n separator_mask = [[separator_mask] * len(sep) for sep in separators]\n p_mask = sum((s_mask + mask\n for sep, seq, s_mask, mask in zip(\n separators, seqs, seq_mask, separator_mask)), [])\n return concat, segment_ids, p_mask\n\n if not is_test:\n # `label_list == None` is for regression task\n label_dtype = \"int64\" if label_list else \"float32\"\n # Get the label\n label = example[-1]\n example = example[:-1]\n # Create label maps if classification task\n if label_list:\n label_map = {}\n for (i, l) in enumerate(label_list):\n label_map[l] = i\n label = label_map[label]\n label = np.array([label], dtype=label_dtype)\n\n if is_tokenized:\n tokens_raw = example\n else:\n # Tokenize raw text\n tokens_raw = [tokenizer(l) for l in example]\n # Truncate to the truncate_length,\n tokens_trun = _truncate_seqs(tokens_raw, max_seq_length)\n\n # Concate the sequences with special tokens\n tokens_trun[0] = [tokenizer.cls_token] + tokens_trun[0]\n tokens, segment_ids, _ = _concat_seqs(tokens_trun, [[tokenizer.sep_token]] *\n len(tokens_trun))\n # Convert the token to ids\n input_ids = tokenizer.convert_tokens_to_ids(tokens)\n valid_length = len(input_ids)\n # The mask has 1 for real tokens and 0 for padding tokens. Only real\n # tokens are attended to.\n # input_mask = [1] * len(input_ids)\n if not is_test:\n return input_ids, segment_ids, valid_length, label\n else:\n return input_ids, segment_ids, valid_length\n\n return output_list\n",
"# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport argparse\nimport collections\nimport itertools\nimport os\nimport random\nimport time\nimport h5py\nfrom functools import partial\nimport numpy as np\nimport distutils.util\n\nimport paddle\nfrom paddle.io import DataLoader, Dataset\nfrom paddlenlp.transformers import BertForPretraining, BertModel, BertPretrainingCriterion\nfrom paddlenlp.transformers import ErnieForPretraining, ErnieModel, ErniePretrainingCriterion\nfrom paddlenlp.transformers import BertTokenizer, ErnieTokenizer\nfrom paddlenlp.transformers import LinearDecayWithWarmup\nfrom data import create_data_holder, create_pretraining_dataset\n\nMODEL_CLASSES = {\n \"bert\": (BertForPretraining, BertTokenizer),\n \"ernie\": (ErnieForPretraining, ErnieTokenizer)\n}\n\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"--select_device\",\n default=\"gpu\",\n type=str,\n help=\"The device that selecting for the training, must be gpu/xpu.\")\n parser.add_argument(\n \"--model_type\",\n default=None,\n type=str,\n required=True,\n help=\"Model type selected in the list: \" +\n \", \".join(MODEL_CLASSES.keys()), )\n parser.add_argument(\n \"--model_name_or_path\",\n default=None,\n type=str,\n required=True,\n help=\"Path to pre-trained model or shortcut name selected in the list: \"\n + \", \".join(\n sum([\n list(classes[-1].pretrained_init_configuration.keys())\n for classes in MODEL_CLASSES.values()\n ], [])), )\n parser.add_argument(\n \"--input_dir\",\n default=None,\n type=str,\n required=True,\n help=\"The input directory where the data will be read from.\", )\n parser.add_argument(\n \"--output_dir\",\n default=None,\n type=str,\n required=True,\n help=\"The output directory where the model predictions and checkpoints will be written.\",\n )\n parser.add_argument(\n \"--max_predictions_per_seq\",\n default=80,\n type=int,\n help=\"The maximum total of masked tokens in input sequence\")\n\n parser.add_argument(\n \"--batch_size\",\n default=8,\n type=int,\n help=\"Batch size per GPU/CPU for training.\", )\n parser.add_argument(\n \"--learning_rate\",\n default=5e-5,\n type=float,\n help=\"The initial learning rate for Adam.\")\n parser.add_argument(\n \"--weight_decay\",\n default=0.0,\n type=float,\n help=\"Weight decay if we apply some.\")\n parser.add_argument(\n \"--adam_epsilon\",\n default=1e-8,\n type=float,\n help=\"Epsilon for Adam optimizer.\")\n parser.add_argument(\n \"--max_grad_norm\", default=1.0, type=float, help=\"Max gradient norm.\")\n parser.add_argument(\n \"--max_steps\",\n default=-1,\n type=int,\n help=\"If > 0: set total number of training steps to perform. Override num_train_epochs.\",\n )\n parser.add_argument(\n \"--warmup_steps\",\n default=0,\n type=int,\n help=\"Linear warmup over warmup_steps.\")\n parser.add_argument(\n \"--logging_steps\",\n type=int,\n default=500,\n help=\"Log every X updates steps.\")\n parser.add_argument(\n \"--save_steps\",\n type=int,\n default=500,\n help=\"Save checkpoint every X updates steps.\")\n parser.add_argument(\n \"--seed\", type=int, default=42, help=\"Random seed for initialization\")\n parser.add_argument(\n \"--use_amp\",\n type=distutils.util.strtobool,\n default=False,\n help=\"Enable mixed precision training.\")\n parser.add_argument(\n \"--enable_addto\",\n type=distutils.util.strtobool,\n default=False,\n help=\"Whether to enable the addto strategy for gradient accumulation or not. This is only used for AMP training.\"\n )\n parser.add_argument(\n \"--scale_loss\",\n type=float,\n default=1.0,\n help=\"The value of scale_loss for fp16.\")\n parser.add_argument(\n \"--use_pure_fp16\",\n type=distutils.util.strtobool,\n default=False,\n help=\"Whether to use pure fp16 training.\")\n args = parser.parse_args()\n return args\n\n\ndef build_compiled_program(args, main_program, loss):\n if args.select_device == \"xpu\":\n return main_program\n exec_strategy = paddle.static.ExecutionStrategy()\n exec_strategy.num_threads = 1\n exec_strategy.num_iteration_per_drop_scope = 10000\n build_strategy = paddle.static.BuildStrategy()\n build_strategy.enable_addto = args.enable_addto\n main_program = paddle.static.CompiledProgram(\n main_program).with_data_parallel(\n loss_name=loss.name,\n exec_strategy=exec_strategy,\n build_strategy=build_strategy)\n return main_program\n\n\ndef reset_program_state_dict(model, state_dict):\n scale = model.initializer_range if hasattr(model, \"initializer_range\")\\\n else model.bert.config[\"initializer_range\"]\n\n new_state_dict = dict()\n for n, p in state_dict.items():\n if \"layer_norm\" not in p.name:\n dtype_str = \"float32\"\n if str(p.dtype) == \"VarType.FP64\":\n dtype_str = \"float64\"\n new_state_dict[p.name] = np.random.normal(\n loc=0.0, scale=scale, size=p.shape).astype(dtype_str)\n return new_state_dict\n\n\ndef set_seed(seed):\n random.seed(seed)\n np.random.seed(seed)\n paddle.seed(seed)\n\n\ndef do_train(args):\n # Initialize the paddle execute enviroment\n paddle.enable_static()\n place = paddle.set_device(args.select_device)\n\n # Set the random seed\n set_seed(args.seed)\n\n # Define the input data in the static mode\n main_program = paddle.static.default_main_program()\n startup_program = paddle.static.default_startup_program()\n data_holders = create_data_holder(args)\n [\n input_ids, segment_ids, input_mask, masked_lm_positions,\n masked_lm_labels, next_sentence_labels, masked_lm_scale\n ] = data_holders\n\n # Define the model structure in static mode\n args.model_type = args.model_type.lower()\n model_class, tokenizer_class = MODEL_CLASSES[args.model_type]\n tokenizer = tokenizer_class.from_pretrained(args.model_name_or_path)\n config = model_class.pretrained_init_configuration[args.model_name_or_path]\n if config[\"vocab_size\"] % 8 != 0:\n config[\"vocab_size\"] += 8 - (config[\"vocab_size\"] % 8)\n model = BertForPretraining(BertModel(**config))\n criterion = BertPretrainingCriterion(model.bert.config[\"vocab_size\"])\n prediction_scores, seq_relationship_score = model(\n input_ids=input_ids,\n token_type_ids=segment_ids,\n attention_mask=input_mask,\n masked_positions=masked_lm_positions)\n loss = criterion(prediction_scores, seq_relationship_score,\n masked_lm_labels, next_sentence_labels, masked_lm_scale)\n\n # Define the dynamic learing_reate scheduler and optimizer\n num_training_steps = args.max_steps if args.max_steps > 0 else len(\n train_data_loader) * args.num_train_epochs\n\n lr_scheduler = LinearDecayWithWarmup(args.learning_rate, num_training_steps,\n args.warmup_steps)\n\n optimizer = paddle.optimizer.AdamW(\n learning_rate=lr_scheduler,\n epsilon=args.adam_epsilon,\n parameters=model.parameters(),\n weight_decay=args.weight_decay,\n apply_decay_param_fun=lambda x: x in [\n p.name for n, p in model.named_parameters()\n if not any(nd in n for nd in [\"bias\", \"norm\"])\n ],\n multi_precision=args.use_pure_fp16)\n if args.use_amp:\n custom_black_list = (['lookup_table', 'lookup_table_v2']\n if args.use_pure_fp16 else None)\n amp_list = paddle.static.amp.AutoMixedPrecisionLists(\n custom_white_list=['layer_norm', 'softmax', 'gelu'],\n custom_black_list=custom_black_list)\n optimizer = paddle.static.amp.decorate(\n optimizer,\n amp_list,\n init_loss_scaling=args.scale_loss,\n use_dynamic_loss_scaling=True,\n use_pure_fp16=args.use_pure_fp16)\n optimizer.minimize(loss)\n\n # Define the Executor for running the static model\n exe = paddle.static.Executor(place)\n exe.run(startup_program)\n state_dict = model.state_dict()\n\n # Use the state dict to update the parameter\n reset_state_dict = reset_program_state_dict(model, state_dict)\n paddle.static.set_program_state(main_program, reset_state_dict)\n if args.use_amp:\n optimizer.amp_init(place)\n # Construct the compiled program\n main_program = build_compiled_program(args, main_program, loss)\n global_step = 0\n tic_train = time.time()\n epoch = 0\n while True:\n files = [\n os.path.join(args.input_dir, f) for f in os.listdir(args.input_dir)\n if os.path.isfile(os.path.join(args.input_dir, f)) and \"training\" in\n f\n ]\n files.sort()\n random.Random(args.seed + epoch).shuffle(files)\n\n for f_id in range(0, len(files)):\n train_data_loader, _ = create_pretraining_dataset(\n files[f_id], args.max_predictions_per_seq, args, data_holders)\n train_reader_cost = 0.0\n train_run_cost = 0.0\n total_samples = 0\n reader_start = time.time()\n for step, batch in enumerate(train_data_loader):\n train_reader_cost += time.time() - reader_start\n global_step += 1\n train_start = time.time()\n loss_return = exe.run(main_program,\\\n feed=batch,\n fetch_list=[loss])\n train_run_cost += time.time() - train_start\n total_samples += args.batch_size\n # In the new 2.0 api, must call this function to change the learning_rate\n lr_scheduler.step()\n if global_step % args.logging_steps == 0:\n print(\n \"global step: %d, epoch: %d, batch: %d, loss: %f, \"\n \"avg_reader_cost: %.5f sec, avg_batch_cost: %.5f sec, avg_samples: %.5f, ips: %.5f sequences/sec\"\n %\n (global_step, epoch, step, loss_return[0],\n train_reader_cost / args.logging_steps,\n (train_reader_cost + train_run_cost) /\n args.logging_steps, total_samples / args.logging_steps,\n total_samples / (train_reader_cost + train_run_cost)))\n train_reader_cost = 0.0\n train_run_cost = 0.0\n total_samples = 0\n if global_step % args.save_steps == 0:\n output_dir = os.path.join(args.output_dir,\n \"model_%d\" % global_step)\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n # TODO(fangzeyang): Udpate the save_params to paddle.static\n paddle.fluid.io.save_params(exe, output_dir)\n tokenizer.save_pretrained(output_dir)\n if global_step >= args.max_steps:\n reader_start = time.time()\n del train_data_loader\n return\n reader_start = time.time()\n del train_data_loader\n epoch += 1\n\n\nif __name__ == \"__main__\":\n args = parse_args()\n do_train(args)\n"
] |
[
[
"numpy.array"
],
[
"numpy.random.normal",
"numpy.random.seed"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
svenbuder/TheGALAHPayne
|
[
"fc0b0a227c4f032c6cc26bd8b50c4dce7b50a4f8"
] |
[
"The_Payne/process_spectra.py"
] |
[
"'''\nCode for reading in combined spectra.\nAny way that you can get your hands on the spectra should be fine, as long as you \n\nHere we adopt APOGEE DR14. Edit os.environs below for a later version of APOGEE data release.\nSince our spectral model training set was normalized using the DR12 wavelength definition, \neven thought the spectra are from DR14, we will resample them into DR12 wavelength format.\n'''\n\nfrom __future__ import absolute_import, division, print_function # python2 compatibility\nimport numpy as np\nimport sys\nimport os\nimport subprocess\nimport astropy.io.fits as pyfits\n \nfrom . import utils\nfrom . import spectral_model\n\n# dr14\nmaster_path = \"data.sdss3.org/sas/dr14/apogee/spectro/redux/r8/stars/\" \ncatalog_path = \"l31c/l31c.2/\"\ncatalog_name = \"allStar-l31c.2.fits\"\n\n# download path\ndownload_path = \"apogee_download/\"\n\n# read in the list of pixels used for fitting the APOGEE continuum\ncont_pixels = utils.load_cannon_contpixels()\n\ndef read_apogee_catalog():\n '''\n read in the catalog of info for all stars in a data release. \n '''\n filepath = os.path.join(master_path, catalog_path, catalog_name) # dr14\n filename = os.path.join(download_path, catalog_name)\n \n try:\n os.makedirs(os.path.dirname(download_path))\n except OSError: pass\n if not os.path.exists(filename):\n print(\"Downloading : \" + catalog_name)\n subprocess.check_call([\"wget\", filepath, \"-O\", \"%s\"%filename])\n\n all_star_catalog = pyfits.getdata(filename)\n catalog_id = all_star_catalog['APOGEE_ID'].astype(\"str\")\n return all_star_catalog, catalog_id\n\n\n\ndef get_combined_spectrum_single_object(apogee_id, catalog = None, save_local = False):\n '''\n apogee_id should be a byte-like object; i.e b'2M13012770+5754582'\n This downloads a single combined spectrum and the associated error array,\n and it normalizes both. \n '''\n \n # read in the allStar catalog if you haven't already\n if catalog is None:\n catalog, catalog_id = read_apogee_catalog()\n \n _COMBINED_INDEX = 1\n \n msk = np.where(catalog_id == apogee_id)[0]\n if not len(msk):\n raise ValueError('the desired Apogee ID was not found in the allStar catalog.')\n\n field = catalog['FIELD'][msk[0]]\n loc_id = catalog['LOCATION_ID'][msk[0]]\n\n filename = 'apStar-r8-%s.fits' % apogee_id.strip()\n if loc_id == 1:\n filepath = os.path.join(master_path,'apo1m', field.strip(), filename)\n else:\n filepath = os.path.join(master_path,'apo25m', '%i' % loc_id, filename)\n filename = os.path.join(download_path, filename)\n\n # download spectrum\n try:\n os.makedirs(os.path.dirname(download_path))\n except OSError: pass\n if not os.path.exists(filename):\n subprocess.check_call([\"wget\", filepath, '-O', '%s'%filename])\n\n # read spectrum\n temp1 = pyfits.getdata(filename, ext = 1, header = False)\n temp2 = pyfits.getdata(filename, ext = 2, header = False)\n temp3 = pyfits.getdata(filename, ext = 3, header = False)\n \n if temp1.shape[0] > 6000:\n spec = temp1\n specerr = temp2\n mask = temp3\n else:\n spec = temp1[_COMBINED_INDEX]\n specerr = temp2[_COMBINED_INDEX]\n mask = temp3[_COMBINED_INDEX]\n\n # Inflate uncertainties for bad pixels \n #specerr[mask != 0] += 100*np.median(spec[np.isfinite(spec)])\n\n # Inflate pixels with high SNR to 0.5\n #highsnr = spec/specerr > 200.\n #specerr[highsnr] = 0.005*np.fabs(spec[highsnr])\n \n # convert ApStar grid to Aspcap grid\n spec = toAspcapGrid(spec) # dr12 wavelength format\n specerr = toAspcapGrid(specerr)\n \n # cull dead pixels\n choose = spec <= 0\n spec[choose] = 0.01\n specerr[choose] = np.max(np.abs(spec))*999.\n \n # continuum-normalize\n cont = utils.get_apogee_continuum(spec = spec, \n spec_err = specerr, cont_pixels = cont_pixels)\n spec /= cont\n specerr /= cont\n \n if save_local:\n np.savez(download_path + 'spectrum_ap_id_' + str(apogee_id) + '_.npz',\n spectrum = spec, spec_err = specerr)\n return spec, specerr\n \n\ndef toAspcapGrid(spec):\n \"\"\"\n Convert a spectrum from apStar grid to the ASPCAP grid (w/o the detector gaps)\n Adapted from Jo Bovy's APOGEE package\n \"\"\"\n \n apStarBlu_lo,apStarBlu_hi,apStarGre_lo,apStarGre_hi,apStarRed_lo,apStarRed_hi \\\n = 322, 3242, 3648, 6048, 6412, 8306 # dr12\n aspcapBlu_start = 0\n aspcapGre_start = apStarBlu_hi-apStarBlu_lo+aspcapBlu_start\n aspcapRed_start = apStarGre_hi-apStarGre_lo+aspcapGre_start\n aspcapTotal = apStarRed_hi-apStarRed_lo+aspcapRed_start\n\n out= np.zeros(aspcapTotal,dtype=spec.dtype)\n \n out[:aspcapGre_start]= spec[apStarBlu_lo:apStarBlu_hi]\n out[aspcapGre_start:aspcapRed_start]= spec[apStarGre_lo:apStarGre_hi]\n out[aspcapRed_start:]= spec[apStarRed_lo:apStarRed_hi]\n\n return out\n"
] |
[
[
"numpy.abs",
"numpy.zeros",
"numpy.where"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
jay90099/model-analysis
|
[
"4389611ae476686d349bd6d16de39855d491cf0c",
"4389611ae476686d349bd6d16de39855d491cf0c"
] |
[
"tensorflow_model_analysis/api/model_eval_lib_test.py",
"tensorflow_model_analysis/addons/fairness/metrics/fairness_indicators_test.py"
] |
[
"# Lint as: python3\n# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Test for using the model_eval_lib API.\"\"\"\n\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport json\nimport os\nimport tempfile\n\nfrom absl.testing import absltest\nfrom absl.testing import parameterized\n\nimport pandas as pd\nimport tensorflow as tf\nfrom tensorflow_model_analysis import constants\nfrom tensorflow_model_analysis import types\nfrom tensorflow_model_analysis.api import model_eval_lib\nfrom tensorflow_model_analysis.eval_saved_model import testutil\nfrom tensorflow_model_analysis.eval_saved_model.example_trainers import csv_linear_classifier\nfrom tensorflow_model_analysis.eval_saved_model.example_trainers import fixed_prediction_estimator\nfrom tensorflow_model_analysis.eval_saved_model.example_trainers import linear_classifier\nfrom tensorflow_model_analysis.eval_saved_model.example_trainers import linear_regressor\nfrom tensorflow_model_analysis.evaluators import legacy_metrics_and_plots_evaluator\nfrom tensorflow_model_analysis.evaluators import legacy_query_based_metrics_evaluator\nfrom tensorflow_model_analysis.evaluators import metrics_plots_and_validations_evaluator\nfrom tensorflow_model_analysis.evaluators.query_metrics import ndcg as legacy_ndcg\nfrom tensorflow_model_analysis.evaluators.query_metrics import query_statistics\nfrom tensorflow_model_analysis.extractors import legacy_feature_extractor\nfrom tensorflow_model_analysis.extractors import legacy_predict_extractor\nfrom tensorflow_model_analysis.extractors import slice_key_extractor\nfrom tensorflow_model_analysis.metrics import calibration_plot\nfrom tensorflow_model_analysis.metrics import metric_specs\nfrom tensorflow_model_analysis.metrics import ndcg\nfrom tensorflow_model_analysis.post_export_metrics import metric_keys\nfrom tensorflow_model_analysis.post_export_metrics import post_export_metrics\nfrom tensorflow_model_analysis.proto import config_pb2\nfrom tensorflow_model_analysis.proto import validation_result_pb2\nfrom tensorflow_model_analysis.slicer import slicer_lib\nfrom tensorflow_model_analysis.view import view_types\n\nfrom google.protobuf import text_format\nfrom tensorflow_metadata.proto.v0 import schema_pb2\n\ntry:\n import tensorflow_ranking as tfr # pylint: disable=g-import-not-at-top\n _TFR_IMPORTED = True\nexcept (ImportError, tf.errors.NotFoundError):\n _TFR_IMPORTED = False\n\ntry:\n from tensorflowjs.converters import converter as tfjs_converter # pylint: disable=g-import-not-at-top\n _TFJS_IMPORTED = True\nexcept ModuleNotFoundError:\n _TFJS_IMPORTED = False\n\n_TEST_SEED = 982735\n\n_TF_MAJOR_VERSION = int(tf.version.VERSION.split('.')[0])\n\n\nclass EvaluateTest(testutil.TensorflowModelAnalysisTest,\n parameterized.TestCase):\n\n def setUp(self):\n super(EvaluateTest, self).setUp()\n self.longMessage = True # pylint: disable=invalid-name\n\n def _getTempDir(self):\n return tempfile.mkdtemp()\n\n def _exportEvalSavedModel(self, classifier):\n temp_eval_export_dir = os.path.join(self._getTempDir(), 'eval_export_dir')\n _, eval_export_dir = classifier(None, temp_eval_export_dir)\n return eval_export_dir\n\n def _writeTFExamplesToTFRecords(self, examples):\n data_location = os.path.join(self._getTempDir(), 'input_data.rio')\n with tf.io.TFRecordWriter(data_location) as writer:\n for example in examples:\n writer.write(example.SerializeToString())\n return data_location\n\n def _writeCSVToTextFile(self, examples):\n data_location = os.path.join(self._getTempDir(), 'input_data.csv')\n with open(data_location, 'w') as writer:\n for example in examples:\n writer.write(example + '\\n')\n return data_location\n\n def assertMetricsAlmostEqual(self,\n got_slicing_metrics,\n expected_slicing_metrics,\n output_name='',\n subkey=''):\n if got_slicing_metrics:\n for (s, m) in got_slicing_metrics:\n metrics = m[output_name][subkey]\n self.assertIn(s, expected_slicing_metrics)\n for metric_name in expected_slicing_metrics[s]:\n self.assertIn(metric_name, metrics)\n self.assertDictElementsAlmostEqual(\n metrics[metric_name], expected_slicing_metrics[s][metric_name])\n else:\n # Only pass if expected_slicing_metrics also evaluates to False.\n self.assertFalse(\n expected_slicing_metrics, msg='Actual slicing_metrics was empty.')\n\n def assertSliceMetricsEqual(self, expected_metrics, got_metrics):\n self.assertCountEqual(\n list(expected_metrics.keys()),\n list(got_metrics.keys()),\n msg='keys do not match. expected_metrics: %s, got_metrics: %s' %\n (expected_metrics, got_metrics))\n for key in expected_metrics.keys():\n self.assertProtoEquals(\n expected_metrics[key],\n got_metrics[key],\n msg='value for key %s does not match' % key)\n\n def assertSliceListEqual(self, expected_list, got_list, value_assert_fn):\n self.assertEqual(\n len(expected_list),\n len(got_list),\n msg='expected_list: %s, got_list: %s' % (expected_list, got_list))\n for index, (expected, got) in enumerate(zip(expected_list, got_list)):\n (expected_key, expected_value) = expected\n (got_key, got_value) = got\n self.assertEqual(\n expected_key, got_key, msg='key mismatch at index %d' % index)\n value_assert_fn(expected_value, got_value)\n\n def assertSlicePlotsListEqual(self, expected_list, got_list):\n self.assertSliceListEqual(expected_list, got_list, self.assertProtoEquals)\n\n def assertSliceMetricsListEqual(self, expected_list, got_list):\n self.assertSliceListEqual(expected_list, got_list,\n self.assertSliceMetricsEqual)\n\n def testNoConstructFn(self):\n model_location = self._exportEvalSavedModel(\n linear_classifier.simple_linear_classifier)\n examples = [self._makeExample(age=3.0, language='english', label=1.0)]\n data_location = self._writeTFExamplesToTFRecords(examples)\n eval_config = config_pb2.EvalConfig()\n # No construct_fn should fail when Beam attempts to call the construct_fn.\n eval_shared_model = types.EvalSharedModel(model_path=model_location)\n with self.assertRaisesRegex(AttributeError,\n '\\'NoneType\\' object has no attribute'):\n model_eval_lib.run_model_analysis(\n eval_config=eval_config,\n eval_shared_model=eval_shared_model,\n data_location=data_location,\n output_path=self._getTempDir())\n\n # Using the default_eval_shared_model should pass as it has a construct_fn.\n eval_shared_model = model_eval_lib.default_eval_shared_model(\n eval_saved_model_path=model_location)\n model_eval_lib.run_model_analysis(\n eval_config=eval_config,\n eval_shared_model=eval_shared_model,\n data_location=data_location,\n output_path=self._getTempDir())\n\n def testMixedEvalAndNonEvalSignatures(self):\n examples = [self._makeExample(age=3.0, language='english', label=1.0)]\n data_location = self._writeTFExamplesToTFRecords(examples)\n eval_config = config_pb2.EvalConfig(model_specs=[\n config_pb2.ModelSpec(name='model1'),\n config_pb2.ModelSpec(name='model2', signature_name='eval')\n ])\n eval_shared_models = [\n model_eval_lib.default_eval_shared_model(\n model_name='model1',\n eval_saved_model_path='/model1/path',\n eval_config=eval_config),\n model_eval_lib.default_eval_shared_model(\n model_name='model2',\n eval_saved_model_path='/model2/path',\n eval_config=eval_config),\n ]\n with self.assertRaisesRegex(\n NotImplementedError,\n 'support for mixing eval and non-eval estimator models is not '\n 'implemented'):\n model_eval_lib.run_model_analysis(\n eval_config=eval_config,\n eval_shared_model=eval_shared_models,\n data_location=data_location,\n output_path=self._getTempDir())\n\n @parameterized.named_parameters(('tflite', constants.TF_LITE),\n ('tfjs', constants.TF_JS))\n def testMixedModelTypes(self, model_type):\n examples = [self._makeExample(age=3.0, language='english', label=1.0)]\n data_location = self._writeTFExamplesToTFRecords(examples)\n eval_config = config_pb2.EvalConfig(model_specs=[\n config_pb2.ModelSpec(name='model1'),\n config_pb2.ModelSpec(name='model2', model_type=model_type)\n ])\n eval_shared_models = [\n model_eval_lib.default_eval_shared_model(\n model_name='model1',\n eval_saved_model_path='/model1/path',\n eval_config=eval_config),\n model_eval_lib.default_eval_shared_model(\n model_name='model2',\n eval_saved_model_path='/model2/path',\n eval_config=eval_config)\n ]\n with self.assertRaisesRegex(\n NotImplementedError, 'support for mixing .* models is not implemented'):\n model_eval_lib.run_model_analysis(\n eval_config=eval_config,\n eval_shared_model=eval_shared_models,\n data_location=data_location,\n output_path=self._getTempDir())\n\n def testRunModelAnalysisExtraFieldsPlusFeatureExtraction(self):\n model_location = self._exportEvalSavedModel(\n linear_classifier.simple_linear_classifier)\n examples = [\n self._makeExample(age=3.0, language='english', label=1.0, my_slice='a'),\n self._makeExample(age=3.0, language='chinese', label=0.0, my_slice='a'),\n self._makeExample(age=4.0, language='english', label=1.0, my_slice='b'),\n self._makeExample(age=5.0, language='chinese', label=1.0, my_slice='c'),\n self._makeExample(age=5.0, language='hindi', label=1.0)\n ]\n data_location = self._writeTFExamplesToTFRecords(examples)\n slicing_specs = [slicer_lib.SingleSliceSpec(columns=['my_slice'])]\n eval_shared_model = model_eval_lib.default_eval_shared_model(\n eval_saved_model_path=model_location, example_weight_key='age')\n extractors_with_feature_extraction = [\n legacy_predict_extractor.PredictExtractor(\n eval_shared_model, desired_batch_size=3, materialize=False),\n legacy_feature_extractor.FeatureExtractor(\n extract_source=constants.INPUT_KEY,\n extract_dest=constants.FEATURES_PREDICTIONS_LABELS_KEY),\n slice_key_extractor.SliceKeyExtractor(\n slice_spec=slicing_specs, materialize=False)\n ]\n eval_result = model_eval_lib.run_model_analysis(\n eval_shared_model=model_eval_lib.default_eval_shared_model(\n eval_saved_model_path=model_location, example_weight_key='age'),\n data_location=data_location,\n output_path=self._getTempDir(),\n extractors=extractors_with_feature_extraction,\n slice_spec=slicing_specs)\n # We only check some of the metrics to ensure that the end-to-end\n # pipeline works.\n expected = {\n (('my_slice', 'a'),): {\n 'accuracy': {\n 'doubleValue': 1.0\n },\n 'my_mean_label': {\n 'doubleValue': 0.5\n },\n metric_keys.EXAMPLE_WEIGHT: {\n 'doubleValue': 6.0\n },\n metric_keys.EXAMPLE_COUNT: {\n 'doubleValue': 2.0\n },\n },\n (('my_slice', 'b'),): {\n 'accuracy': {\n 'doubleValue': 1.0\n },\n 'my_mean_label': {\n 'doubleValue': 1.0\n },\n metric_keys.EXAMPLE_WEIGHT: {\n 'doubleValue': 4.0\n },\n metric_keys.EXAMPLE_COUNT: {\n 'doubleValue': 1.0\n },\n },\n (('my_slice', 'c'),): {\n 'accuracy': {\n 'doubleValue': 0.0\n },\n 'my_mean_label': {\n 'doubleValue': 1.0\n },\n metric_keys.EXAMPLE_WEIGHT: {\n 'doubleValue': 5.0\n },\n metric_keys.EXAMPLE_COUNT: {\n 'doubleValue': 1.0\n },\n },\n }\n self.assertEqual(eval_result.model_location, model_location.decode())\n self.assertEqual(eval_result.data_location, data_location)\n self.assertEqual(eval_result.config.slicing_specs[0],\n config_pb2.SlicingSpec(feature_keys=['my_slice']))\n self.assertMetricsAlmostEqual(eval_result.slicing_metrics, expected)\n self.assertFalse(eval_result.plots)\n\n def testRunModelAnalysis(self):\n model_location = self._exportEvalSavedModel(\n linear_classifier.simple_linear_classifier)\n examples = [\n self._makeExample(age=3.0, language='english', label=1.0),\n self._makeExample(age=3.0, language='chinese', label=0.0),\n self._makeExample(age=4.0, language='english', label=1.0),\n self._makeExample(age=5.0, language='chinese', label=1.0),\n self._makeExample(age=5.0, language='hindi', label=1.0)\n ]\n data_location = self._writeTFExamplesToTFRecords(examples)\n slicing_specs = [slicer_lib.SingleSliceSpec(columns=['language'])]\n eval_result = model_eval_lib.run_model_analysis(\n eval_shared_model=model_eval_lib.default_eval_shared_model(\n eval_saved_model_path=model_location, example_weight_key='age'),\n data_location=data_location,\n output_path=self._getTempDir(),\n slice_spec=slicing_specs,\n min_slice_size=2)\n # We only check some of the metrics to ensure that the end-to-end\n # pipeline works.\n expected = {\n (('language', 'hindi'),): {\n u'__ERROR__': {\n 'debugMessage':\n u'Example count for this slice key is lower than the '\n u'minimum required value: 2. No data is aggregated for '\n u'this slice.'\n },\n },\n (('language', 'chinese'),): {\n 'accuracy': {\n 'doubleValue': 0.5\n },\n 'my_mean_label': {\n 'doubleValue': 0.5\n },\n metric_keys.EXAMPLE_WEIGHT: {\n 'doubleValue': 8.0\n },\n metric_keys.EXAMPLE_COUNT: {\n 'doubleValue': 2.0\n },\n },\n (('language', 'english'),): {\n 'accuracy': {\n 'doubleValue': 1.0\n },\n 'my_mean_label': {\n 'doubleValue': 1.0\n },\n metric_keys.EXAMPLE_WEIGHT: {\n 'doubleValue': 7.0\n },\n metric_keys.EXAMPLE_COUNT: {\n 'doubleValue': 2.0\n },\n }\n }\n self.assertEqual(eval_result.model_location, model_location.decode())\n self.assertEqual(eval_result.data_location, data_location)\n self.assertEqual(eval_result.config.slicing_specs[0],\n config_pb2.SlicingSpec(feature_keys=['language']))\n self.assertMetricsAlmostEqual(eval_result.slicing_metrics, expected)\n self.assertFalse(eval_result.plots)\n\n def testRunModelAnalysisWithCustomizations(self):\n model_location = self._exportEvalSavedModel(\n linear_classifier.simple_linear_classifier)\n examples = [\n self._makeExample(age=3.0, language='english', label=1.0),\n self._makeExample(age=3.0, language='chinese', label=0.0),\n self._makeExample(age=4.0, language='english', label=1.0),\n self._makeExample(age=5.0, language='chinese', label=1.0),\n self._makeExample(age=5.0, language='hindi', label=1.0)\n ]\n data_location = self._writeTFExamplesToTFRecords(examples)\n slicing_specs = [config_pb2.SlicingSpec(feature_keys=['language'])]\n options = config_pb2.Options()\n options.min_slice_size.value = 2\n eval_config = config_pb2.EvalConfig(\n model_specs=[config_pb2.ModelSpec(model_type='my_model_type')],\n slicing_specs=slicing_specs,\n options=options)\n # Use default model_loader for testing passing custom_model_loader\n model_loader = model_eval_lib.default_eval_shared_model(\n eval_saved_model_path=model_location,\n example_weight_key='age').model_loader\n eval_shared_model = model_eval_lib.default_eval_shared_model(\n eval_saved_model_path=model_location, custom_model_loader=model_loader)\n # Use PredictExtractor for testing passing custom_predict_extractor\n extractors = model_eval_lib.default_extractors(\n eval_shared_model=eval_shared_model,\n eval_config=eval_config,\n custom_predict_extractor=legacy_predict_extractor.PredictExtractor(\n eval_shared_model=eval_shared_model, eval_config=eval_config))\n eval_result = model_eval_lib.run_model_analysis(\n eval_config=eval_config,\n eval_shared_model=eval_shared_model,\n data_location=data_location,\n output_path=self._getTempDir(),\n extractors=extractors)\n # We only check some of the metrics to ensure that the end-to-end\n # pipeline works.\n expected = {\n (('language', 'hindi'),): {\n u'__ERROR__': {\n 'debugMessage':\n u'Example count for this slice key is lower than the '\n u'minimum required value: 2. No data is aggregated for '\n u'this slice.'\n },\n },\n (('language', 'chinese'),): {\n 'accuracy': {\n 'doubleValue': 0.5\n },\n 'my_mean_label': {\n 'doubleValue': 0.5\n },\n metric_keys.EXAMPLE_WEIGHT: {\n 'doubleValue': 8.0\n },\n metric_keys.EXAMPLE_COUNT: {\n 'doubleValue': 2.0\n },\n },\n (('language', 'english'),): {\n 'accuracy': {\n 'doubleValue': 1.0\n },\n 'my_mean_label': {\n 'doubleValue': 1.0\n },\n metric_keys.EXAMPLE_WEIGHT: {\n 'doubleValue': 7.0\n },\n metric_keys.EXAMPLE_COUNT: {\n 'doubleValue': 2.0\n },\n }\n }\n self.assertEqual(eval_result.model_location, model_location.decode())\n self.assertEqual(eval_result.data_location, data_location)\n self.assertEqual(eval_result.config.slicing_specs[0],\n config_pb2.SlicingSpec(feature_keys=['language']))\n self.assertMetricsAlmostEqual(eval_result.slicing_metrics, expected)\n\n def testRunModelAnalysisMultipleModels(self):\n examples = [\n self._makeExample(age=3.0, language='english', label=1.0),\n self._makeExample(age=3.0, language='chinese', label=0.0),\n self._makeExample(age=4.0, language='english', label=1.0),\n self._makeExample(age=5.0, language='chinese', label=1.0)\n ]\n data_location = self._writeTFExamplesToTFRecords(examples)\n model_specs = [\n config_pb2.ModelSpec(\n name='model1', signature_name='eval', example_weight_key='age'),\n config_pb2.ModelSpec(\n name='model2', signature_name='eval', example_weight_key='age')\n ]\n metrics_specs = [\n config_pb2.MetricsSpec(\n metrics=[config_pb2.MetricConfig(class_name='ExampleCount')],\n model_names=['model1', 'model2'],\n example_weights=config_pb2.ExampleWeightOptions(unweighted=True)),\n config_pb2.MetricsSpec(\n metrics=[\n config_pb2.MetricConfig(class_name='WeightedExampleCount')\n ],\n model_names=['model1', 'model2'],\n example_weights=config_pb2.ExampleWeightOptions(weighted=True)),\n ]\n slicing_specs = [\n config_pb2.SlicingSpec(feature_values={'language': 'english'})\n ]\n options = config_pb2.Options()\n eval_config = config_pb2.EvalConfig(\n model_specs=model_specs,\n metrics_specs=metrics_specs,\n slicing_specs=slicing_specs,\n options=options)\n model_location1 = self._exportEvalSavedModel(\n linear_classifier.simple_linear_classifier)\n model1 = model_eval_lib.default_eval_shared_model(\n model_name='model1',\n eval_saved_model_path=model_location1,\n eval_config=eval_config)\n model_location2 = self._exportEvalSavedModel(\n linear_classifier.simple_linear_classifier)\n model2 = model_eval_lib.default_eval_shared_model(\n model_name='model2',\n eval_saved_model_path=model_location2,\n eval_config=eval_config)\n eval_shared_models = [model1, model2]\n eval_results = model_eval_lib.run_model_analysis(\n eval_shared_model=eval_shared_models,\n eval_config=eval_config,\n data_location=data_location,\n output_path=self._getTempDir())\n # We only check some of the metrics to ensure that the end-to-end\n # pipeline works.\n expected_result_1 = {\n (('language', 'english'),): {\n 'example_count': {\n 'doubleValue': 2.0\n },\n 'weighted_example_count': {\n 'doubleValue': 7.0\n },\n 'my_mean_label': {\n 'doubleValue': 1.0\n },\n metric_keys.EXAMPLE_COUNT: {\n 'doubleValue': 2.0\n },\n }\n }\n expected_result_2 = {\n (('language', 'english'),): {\n 'example_count': {\n 'doubleValue': 2.0\n },\n 'weighted_example_count': {\n 'doubleValue': 7.0\n },\n 'my_mean_label': {\n 'doubleValue': 1.0\n },\n metric_keys.EXAMPLE_COUNT: {\n 'doubleValue': 2.0\n },\n }\n }\n self.assertLen(eval_results._results, 2)\n eval_result_1 = eval_results._results[0]\n eval_result_2 = eval_results._results[1]\n self.assertEqual(eval_result_1.model_location, model_location1.decode())\n self.assertEqual(eval_result_2.model_location, model_location2.decode())\n self.assertEqual(eval_result_1.data_location, data_location)\n self.assertEqual(eval_result_2.data_location, data_location)\n self.assertEqual(\n eval_result_1.config.slicing_specs[0],\n config_pb2.SlicingSpec(feature_values={'language': 'english'}))\n self.assertEqual(\n eval_result_2.config.slicing_specs[0],\n config_pb2.SlicingSpec(feature_values={'language': 'english'}))\n self.assertMetricsAlmostEqual(eval_result_1.slicing_metrics,\n expected_result_1)\n self.assertMetricsAlmostEqual(eval_result_2.slicing_metrics,\n expected_result_2)\n\n def testRunModelAnalysisWithModelAgnosticPredictions(self):\n examples = [\n self._makeExample(\n age=3.0, language='english', label=1.0, prediction=0.9),\n self._makeExample(\n age=3.0, language='chinese', label=0.0, prediction=0.4),\n self._makeExample(\n age=4.0, language='english', label=1.0, prediction=0.7),\n self._makeExample(\n age=5.0, language='chinese', label=1.0, prediction=0.2)\n ]\n data_location = self._writeTFExamplesToTFRecords(examples)\n model_specs = [\n config_pb2.ModelSpec(\n prediction_key='prediction',\n label_key='label',\n example_weight_key='age')\n ]\n metrics_specs = [\n config_pb2.MetricsSpec(\n metrics=[config_pb2.MetricConfig(class_name='ExampleCount')],\n example_weights=config_pb2.ExampleWeightOptions(unweighted=True)),\n config_pb2.MetricsSpec(\n metrics=[\n config_pb2.MetricConfig(class_name='WeightedExampleCount')\n ],\n example_weights=config_pb2.ExampleWeightOptions(weighted=True)),\n config_pb2.MetricsSpec(\n metrics=[config_pb2.MetricConfig(class_name='BinaryAccuracy')],\n example_weights=config_pb2.ExampleWeightOptions(weighted=True))\n ]\n slicing_specs = [config_pb2.SlicingSpec(feature_keys=['language'])]\n eval_config = config_pb2.EvalConfig(\n model_specs=model_specs,\n metrics_specs=metrics_specs,\n slicing_specs=slicing_specs)\n eval_result = model_eval_lib.run_model_analysis(\n eval_config=eval_config,\n data_location=data_location,\n output_path=self._getTempDir())\n expected = {\n (('language', 'chinese'),): {\n 'binary_accuracy': {\n 'doubleValue': 0.375\n },\n 'weighted_example_count': {\n 'doubleValue': 8.0\n },\n 'example_count': {\n 'doubleValue': 2.0\n },\n },\n (('language', 'english'),): {\n 'binary_accuracy': {\n 'doubleValue': 1.0\n },\n 'weighted_example_count': {\n 'doubleValue': 7.0\n },\n 'example_count': {\n 'doubleValue': 2.0\n },\n }\n }\n self.assertEqual(eval_result.data_location, data_location)\n self.assertEqual(eval_result.config.slicing_specs[0],\n config_pb2.SlicingSpec(feature_keys=['language']))\n self.assertMetricsAlmostEqual(eval_result.slicing_metrics, expected)\n\n @parameterized.named_parameters(\n ('tf_keras', constants.TF_KERAS),\n ('tf_lite', constants.TF_LITE),\n ('tf_js', constants.TF_JS),\n ('baseline_missing', constants.TF_KERAS, True),\n ('rubber_stamp', constants.TF_KERAS, True, True),\n ('tf_keras_custom_metrics', constants.TF_KERAS, False, False, True),\n )\n def testRunModelAnalysisWithKerasModel(self,\n model_type,\n remove_baseline=False,\n rubber_stamp=False,\n add_custom_metrics=False):\n if model_type == constants.TF_JS and not _TFJS_IMPORTED:\n self.skipTest('This test requires TensorFlow JS.')\n\n # Custom metrics not supported in TFv1\n if _TF_MAJOR_VERSION < 2:\n add_custom_metrics = False\n\n def _build_keras_model(eval_config,\n export_name='export_dir',\n rubber_stamp=False):\n input_layer = tf.keras.layers.Input(shape=(28 * 28,), name='data')\n output_layer = tf.keras.layers.Dense(\n 10, activation=tf.nn.softmax)(\n input_layer)\n model = tf.keras.models.Model(input_layer, output_layer)\n model.compile(\n optimizer=tf.keras.optimizers.Adam(lr=.001),\n loss=tf.keras.losses.categorical_crossentropy)\n if add_custom_metrics:\n model.add_metric(tf.reduce_sum(input_layer), 'custom')\n model_location = os.path.join(self._getTempDir(), export_name)\n if model_type == constants.TF_LITE:\n converter = tf.compat.v2.lite.TFLiteConverter.from_keras_model(model)\n tflite_model = converter.convert()\n tf.io.gfile.makedirs(model_location)\n with tf.io.gfile.GFile(os.path.join(model_location, 'tflite'),\n 'wb') as f:\n f.write(tflite_model)\n elif model_type == constants.TF_JS:\n src_model_path = tempfile.mkdtemp()\n model.save(src_model_path, save_format='tf')\n\n tfjs_converter.convert([\n '--input_format=tf_saved_model',\n '--saved_model_tags=serve',\n '--signature_name=serving_default',\n src_model_path,\n model_location,\n ])\n else:\n model.save(model_location, save_format='tf')\n return model_eval_lib.default_eval_shared_model(\n eval_saved_model_path=model_location,\n eval_config=eval_config,\n rubber_stamp=rubber_stamp)\n\n examples = [\n self._makeExample(\n data=[0.0] * 28 * 28,\n label=[0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]),\n self._makeExample(\n data=[1.0] * 28 * 28,\n label=[0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0]),\n self._makeExample(\n data=[1.0] * 28 * 28,\n label=[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0]),\n ]\n data_location = self._writeTFExamplesToTFRecords(examples)\n\n schema = text_format.Parse(\n \"\"\"\n tensor_representation_group {\n key: \"\"\n value {\n tensor_representation {\n key: \"data\"\n value {\n dense_tensor {\n column_name: \"data\"\n shape { dim { size: 784 } }\n }\n }\n }\n }\n }\n feature {\n name: \"data\"\n type: FLOAT\n }\n feature {\n name: \"label\"\n type: FLOAT\n }\n \"\"\", schema_pb2.Schema())\n\n metrics_spec = config_pb2.MetricsSpec()\n for metric in (tf.keras.metrics.AUC(),):\n cfg = tf.keras.utils.serialize_keras_object(metric)\n metrics_spec.metrics.append(\n config_pb2.MetricConfig(\n class_name=cfg['class_name'], config=json.dumps(cfg['config'])))\n tf.keras.backend.clear_session()\n slicing_specs = [\n config_pb2.SlicingSpec(),\n config_pb2.SlicingSpec(feature_keys=['non_existent_slice'])\n ]\n metrics_spec.metrics.append(\n config_pb2.MetricConfig(\n class_name='ExampleCount',\n per_slice_thresholds=[\n config_pb2.PerSliceMetricThreshold(\n slicing_specs=slicing_specs,\n threshold=config_pb2.MetricThreshold(\n value_threshold=config_pb2.GenericValueThreshold(\n lower_bound={'value': 1}))),\n # Change thresholds would be ignored when rubber stamp is true.\n config_pb2.PerSliceMetricThreshold(\n slicing_specs=slicing_specs,\n threshold=config_pb2.MetricThreshold(\n change_threshold=config_pb2.GenericChangeThreshold(\n direction=config_pb2.MetricDirection\n .HIGHER_IS_BETTER,\n absolute={'value': 1})))\n ]))\n for class_id in (0, 5):\n metrics_spec.binarize.class_ids.values.append(class_id)\n eval_config = config_pb2.EvalConfig(\n model_specs=[config_pb2.ModelSpec(label_key='label')],\n metrics_specs=[metrics_spec])\n if model_type != constants.TF_KERAS:\n for s in eval_config.model_specs:\n s.model_type = model_type\n\n model = _build_keras_model(eval_config, rubber_stamp=rubber_stamp)\n baseline = _build_keras_model(eval_config, 'baseline_export')\n if remove_baseline:\n eval_shared_model = model\n else:\n eval_shared_model = {'candidate': model, 'baseline': baseline}\n output_path = self._getTempDir()\n # Raise RuntimeError for missing baseline with change thresholds.\n if not rubber_stamp and remove_baseline:\n with self.assertRaises(RuntimeError):\n model_eval_lib.run_model_analysis(\n eval_config=eval_config,\n eval_shared_model=eval_shared_model,\n data_location=data_location,\n output_path=output_path,\n schema=schema)\n # Will not have any result since the pipeline didn't run.\n return\n else:\n eval_results = model_eval_lib.run_model_analysis(\n eval_config=eval_config,\n eval_shared_model=eval_shared_model,\n data_location=data_location,\n output_path=output_path,\n schema=schema)\n\n # Directly check validaton file since it is not in EvalResult.\n validations_file = os.path.join(output_path, constants.VALIDATIONS_KEY)\n self.assertTrue(os.path.exists(validations_file))\n validation_records = []\n for record in tf.compat.v1.python_io.tf_record_iterator(validations_file):\n validation_records.append(\n validation_result_pb2.ValidationResult.FromString(record))\n self.assertLen(validation_records, 1)\n # Change thresholds ignored when rubber stamping\n expected_result = text_format.Parse(\n \"\"\"\n validation_ok: false\n rubber_stamp: %s\n missing_slices: {\n feature_keys: \"non_existent_slice\"\n }\n validation_details {\n slicing_details {\n slicing_spec {\n }\n num_matching_slices: 1\n }\n }\"\"\" % rubber_stamp, validation_result_pb2.ValidationResult())\n # Normal run with change threshold not satisfied.\n if not rubber_stamp and not remove_baseline:\n text_format.Parse(\n \"\"\"\n metric_validations_per_slice {\n slice_key {}\n failures {\n metric_key {\n name: \"example_count\"\n sub_key { class_id {} }\n model_name: \"candidate\"\n is_diff: true\n example_weighted { }\n }\n metric_threshold {\n change_threshold {\n absolute { value: 1 }\n direction: HIGHER_IS_BETTER\n }\n }\n metric_value { double_value {} }\n }\n failures {\n metric_key {\n name: \"example_count\"\n sub_key {\n class_id {\n value: 5\n }\n }\n model_name: \"candidate\"\n is_diff: true\n example_weighted { }\n }\n metric_threshold {\n change_threshold {\n absolute { value: 1}\n direction: HIGHER_IS_BETTER\n }\n }\n metric_value { double_value {} }\n }\n }\"\"\", expected_result)\n self.assertProtoEquals(expected_result, validation_records[0])\n\n def check_eval_result(eval_result, model_location):\n self.assertEqual(eval_result.model_location, model_location)\n self.assertEqual(eval_result.data_location, data_location)\n self.assertLen(eval_result.slicing_metrics, 1)\n got_slice_key, got_metrics = eval_result.slicing_metrics[0]\n self.assertEqual(got_slice_key, ())\n self.assertIn('', got_metrics) # output_name\n got_metrics = got_metrics['']\n expected_metrics = {\n 'classId:0': {\n 'auc': True,\n },\n 'classId:5': {\n 'auc': True,\n },\n }\n if (model_type not in (constants.TF_LITE, constants.TF_JS) and\n _TF_MAJOR_VERSION >= 2):\n expected_metrics[''] = {'loss': True}\n if add_custom_metrics:\n expected_metrics['']['custom'] = True\n for class_id in expected_metrics:\n self.assertIn(class_id, got_metrics)\n for k in expected_metrics[class_id]:\n self.assertIn(k, got_metrics[class_id])\n\n # TODO(b/173657964): assert exception for the missing baseline but non\n # rubber stamping test.\n if rubber_stamp or remove_baseline:\n self.assertIsInstance(eval_results, view_types.EvalResult)\n check_eval_result(eval_results, model.model_path)\n else:\n self.assertLen(eval_results._results, 2)\n eval_result_0, eval_result_1 = eval_results._results\n check_eval_result(eval_result_0, model.model_path)\n check_eval_result(eval_result_1, baseline.model_path)\n\n def testRunModelAnalysisWithKerasMultiOutputModel(self):\n\n def _build_keras_model(eval_config, export_name='export_dir'):\n layers_per_output = {}\n for output_name in ('output_1', 'output_2'):\n layers_per_output[output_name] = tf.keras.layers.Input(\n shape=(1,), name=output_name)\n model = tf.keras.models.Model(layers_per_output, layers_per_output)\n model.compile(loss=tf.keras.losses.categorical_crossentropy)\n model_location = os.path.join(self._getTempDir(), export_name)\n model.save(model_location, save_format='tf')\n return model_eval_lib.default_eval_shared_model(\n eval_saved_model_path=model_location,\n eval_config=eval_config,\n rubber_stamp=False)\n\n examples = [\n self._makeExample(output_1=1.0, output_2=0.0, label_1=0.0, label_2=0.0),\n self._makeExample(output_1=0.7, output_2=0.3, label_1=1.0, label_2=1.0),\n self._makeExample(output_1=0.5, output_2=0.8, label_1=0.0, label_2=1.0),\n ]\n data_location = self._writeTFExamplesToTFRecords(examples)\n\n metrics_spec = config_pb2.MetricsSpec(\n output_names=['output_1', 'output_2'],\n output_weights={\n 'output_1': 1.0,\n 'output_2': 1.0\n })\n for metric in (tf.keras.metrics.AUC(),):\n cfg = tf.keras.utils.serialize_keras_object(metric)\n metrics_spec.metrics.append(\n config_pb2.MetricConfig(\n class_name=cfg['class_name'], config=json.dumps(cfg['config'])))\n slicing_specs = [\n config_pb2.SlicingSpec(),\n config_pb2.SlicingSpec(feature_keys=['non_existent_slice'])\n ]\n metrics_spec.metrics.append(\n config_pb2.MetricConfig(\n class_name='ExampleCount',\n per_slice_thresholds=[\n config_pb2.PerSliceMetricThreshold(\n slicing_specs=slicing_specs,\n threshold=config_pb2.MetricThreshold(\n value_threshold=config_pb2.GenericValueThreshold(\n lower_bound={'value': 1}))),\n # Change thresholds would be ignored when rubber stamp is true.\n config_pb2.PerSliceMetricThreshold(\n slicing_specs=slicing_specs,\n threshold=config_pb2.MetricThreshold(\n change_threshold=config_pb2.GenericChangeThreshold(\n direction=config_pb2.MetricDirection\n .HIGHER_IS_BETTER,\n absolute={'value': 1})))\n ]))\n eval_config = config_pb2.EvalConfig(\n model_specs=[\n config_pb2.ModelSpec(label_keys={\n 'output_1': 'label_1',\n 'output_2': 'label_2'\n })\n ],\n metrics_specs=[metrics_spec])\n\n model = _build_keras_model(eval_config)\n baseline = _build_keras_model(eval_config, 'baseline_export')\n eval_shared_model = {'candidate': model, 'baseline': baseline}\n output_path = self._getTempDir()\n eval_results = model_eval_lib.run_model_analysis(\n eval_config=eval_config,\n eval_shared_model=eval_shared_model,\n data_location=data_location,\n output_path=output_path)\n\n # Directly check validaton file since it is not in EvalResult.\n validations_file = os.path.join(output_path, constants.VALIDATIONS_KEY)\n self.assertTrue(os.path.exists(validations_file))\n validation_records = []\n for record in tf.compat.v1.python_io.tf_record_iterator(validations_file):\n validation_records.append(\n validation_result_pb2.ValidationResult.FromString(record))\n self.assertLen(validation_records, 1)\n expected_result = text_format.Parse(\n \"\"\"\n metric_validations_per_slice {\n slice_key {}\n failures {\n metric_key {\n name: \"example_count\"\n model_name: \"candidate\"\n output_name: \"output_1\"\n is_diff: true\n example_weighted { }\n }\n metric_threshold {\n change_threshold {\n absolute { value: 1 }\n direction: HIGHER_IS_BETTER\n }\n }\n metric_value { double_value {} }\n }\n failures {\n metric_key {\n name: \"example_count\"\n model_name: \"candidate\"\n output_name: \"output_2\"\n is_diff: true\n example_weighted { }\n }\n metric_threshold {\n change_threshold {\n absolute { value: 1}\n direction: HIGHER_IS_BETTER\n }\n }\n metric_value { double_value {} }\n }\n }\n missing_slices {\n feature_keys: \"non_existent_slice\"\n }\n validation_details {\n slicing_details {\n slicing_spec {}\n num_matching_slices: 1\n }\n }\"\"\", validation_result_pb2.ValidationResult())\n self.assertProtoEquals(expected_result, validation_records[0])\n\n def check_eval_result(eval_result, model_location):\n self.assertEqual(eval_result.model_location, model_location)\n self.assertEqual(eval_result.data_location, data_location)\n self.assertLen(eval_result.slicing_metrics, 1)\n got_slice_key, got_metrics = eval_result.slicing_metrics[0]\n self.assertEqual(got_slice_key, ())\n self.assertIn('output_1', got_metrics)\n self.assertIn('auc', got_metrics['output_1'][''])\n self.assertIn('output_2', got_metrics)\n self.assertIn('auc', got_metrics['output_2'][''])\n # Aggregate metrics\n self.assertIn('', got_metrics)\n self.assertIn('auc', got_metrics[''][''])\n\n # TODO(b/173657964): assert exception for the missing baseline but non\n # rubber stamping test.\n self.assertLen(eval_results._results, 2)\n eval_result_0, eval_result_1 = eval_results._results\n check_eval_result(eval_result_0, model.model_path)\n check_eval_result(eval_result_1, baseline.model_path)\n\n def testRunModelAnalysisWithQueryBasedMetrics(self):\n input_layer = tf.keras.layers.Input(shape=(1,), name='age')\n output_layer = tf.keras.layers.Dense(\n 1, activation=tf.nn.sigmoid)(\n input_layer)\n model = tf.keras.models.Model(input_layer, output_layer)\n model.compile(\n optimizer=tf.keras.optimizers.Adam(lr=.001),\n loss=tf.keras.losses.binary_crossentropy)\n\n features = {'age': [[20.0]]}\n labels = [[1]]\n example_weights = [1.0]\n dataset = tf.data.Dataset.from_tensor_slices(\n (features, labels, example_weights))\n dataset = dataset.shuffle(buffer_size=1).repeat().batch(1)\n model.fit(dataset, steps_per_epoch=1)\n\n model_location = os.path.join(self._getTempDir(), 'export_dir')\n model.save(model_location, save_format='tf')\n\n schema = text_format.Parse(\n \"\"\"\n tensor_representation_group {\n key: \"\"\n value {\n tensor_representation {\n key: \"age\"\n value {\n dense_tensor {\n column_name: \"age\"\n shape { dim { size: 1 } }\n }\n }\n }\n tensor_representation {\n key: \"language\"\n value {\n dense_tensor {\n column_name: \"language\"\n shape { dim { size: 1 } }\n }\n }\n }\n }\n }\n feature {\n name: \"age\"\n type: FLOAT\n }\n feature {\n name: \"language\"\n type: BYTES\n }\n feature {\n name: \"label\"\n type: FLOAT\n }\n \"\"\", schema_pb2.Schema())\n examples = [\n self._makeExample(age=3.0, language='english', label=1.0),\n self._makeExample(age=5.0, language='chinese', label=0.0),\n self._makeExample(age=3.0, language='english', label=0.0),\n self._makeExample(age=5.0, language='chinese', label=1.0)\n ]\n data_location = self._writeTFExamplesToTFRecords(examples)\n slicing_specs = [config_pb2.SlicingSpec()]\n # Test with both a TFMA metric (NDCG), a keras metric (Recall).\n metrics = [\n ndcg.NDCG(gain_key='age', name='ndcg', top_k_list=[1, 2]),\n tf.keras.metrics.Recall(top_k=1),\n ]\n # If tensorflow-ranking imported add MRRMetric.\n if _TFR_IMPORTED:\n metrics.append(tfr.keras.metrics.MRRMetric())\n metrics_specs = metric_specs.specs_from_metrics(\n metrics, query_key='language', include_weighted_example_count=True)\n metrics_specs.append(\n config_pb2.MetricsSpec(metrics=[\n config_pb2.MetricConfig(\n class_name='ExampleCount',\n threshold=config_pb2.MetricThreshold(\n value_threshold=config_pb2.GenericValueThreshold(\n lower_bound={'value': 0})))\n ]))\n eval_config = config_pb2.EvalConfig(\n model_specs=[config_pb2.ModelSpec(label_key='label')],\n slicing_specs=slicing_specs,\n metrics_specs=metrics_specs)\n eval_shared_model = model_eval_lib.default_eval_shared_model(\n eval_saved_model_path=model_location, eval_config=eval_config)\n output_path = self._getTempDir()\n eval_result = model_eval_lib.run_model_analysis(\n eval_config=eval_config,\n eval_shared_model=eval_shared_model,\n data_location=data_location,\n output_path=output_path,\n evaluators=[\n metrics_plots_and_validations_evaluator\n .MetricsPlotsAndValidationsEvaluator(\n eval_config=eval_config, eval_shared_model=eval_shared_model)\n ],\n schema=schema)\n\n # Directly check validaton file since it is not in EvalResult.\n validations_file = os.path.join(output_path, constants.VALIDATIONS_KEY)\n self.assertTrue(os.path.exists(validations_file))\n validation_records = []\n for record in tf.compat.v1.python_io.tf_record_iterator(validations_file):\n validation_records.append(\n validation_result_pb2.ValidationResult.FromString(record))\n self.assertLen(validation_records, 1)\n self.assertTrue(validation_records[0].validation_ok)\n\n self.assertEqual(eval_result.model_location, model_location)\n self.assertEqual(eval_result.data_location, data_location)\n self.assertLen(eval_result.slicing_metrics, 1)\n got_slice_key, got_metrics = eval_result.slicing_metrics[0]\n self.assertEqual(got_slice_key, ())\n self.assertIn('', got_metrics) # output_name\n got_metrics = got_metrics['']\n expected_metrics = {\n '': {\n 'example_count': True,\n 'weighted_example_count': True,\n },\n 'topK:1': {\n 'ndcg': True,\n 'recall': True,\n },\n 'topK:2': {\n 'ndcg': True,\n },\n }\n if _TFR_IMPORTED:\n expected_metrics['']['mrr_metric'] = True\n for group in expected_metrics:\n self.assertIn(group, got_metrics)\n for k in expected_metrics[group]:\n self.assertIn(k, got_metrics[group])\n\n def testRunModelAnalysisWithLegacyQueryExtractor(self):\n model_location = self._exportEvalSavedModel(\n linear_classifier.simple_linear_classifier)\n examples = [\n self._makeExample(age=3.0, language='english', label=1.0),\n self._makeExample(age=3.0, language='chinese', label=0.0),\n self._makeExample(age=4.0, language='english', label=0.0),\n self._makeExample(age=5.0, language='chinese', label=1.0)\n ]\n data_location = self._writeTFExamplesToTFRecords(examples)\n slicing_specs = [slicer_lib.SingleSliceSpec()]\n eval_shared_model = model_eval_lib.default_eval_shared_model(\n eval_saved_model_path=model_location, example_weight_key='age')\n eval_result = model_eval_lib.run_model_analysis(\n eval_shared_model=eval_shared_model,\n data_location=data_location,\n output_path=self._getTempDir(),\n evaluators=[\n legacy_metrics_and_plots_evaluator.MetricsAndPlotsEvaluator(\n eval_shared_model),\n legacy_query_based_metrics_evaluator.QueryBasedMetricsEvaluator(\n query_id='language',\n prediction_key='logistic',\n combine_fns=[\n query_statistics.QueryStatisticsCombineFn(),\n legacy_ndcg.NdcgMetricCombineFn(\n at_vals=[1], gain_key='label', weight_key='')\n ]),\n ],\n slice_spec=slicing_specs)\n # We only check some of the metrics to ensure that the end-to-end\n # pipeline works.\n expected = {\n (): {\n 'post_export_metrics/total_queries': {\n 'doubleValue': 2.0\n },\n 'post_export_metrics/min_documents': {\n 'doubleValue': 2.0\n },\n 'post_export_metrics/max_documents': {\n 'doubleValue': 2.0\n },\n 'post_export_metrics/total_documents': {\n 'doubleValue': 4.0\n },\n 'post_export_metrics/ndcg@1': {\n 'doubleValue': 0.5\n },\n 'post_export_metrics/example_weight': {\n 'doubleValue': 15.0\n },\n 'post_export_metrics/example_count': {\n 'doubleValue': 4.0\n },\n }\n }\n self.assertEqual(eval_result.model_location, model_location.decode())\n self.assertEqual(eval_result.data_location, data_location)\n self.assertEqual(eval_result.config.slicing_specs[0],\n config_pb2.SlicingSpec())\n self.assertMetricsAlmostEqual(eval_result.slicing_metrics, expected)\n self.assertFalse(eval_result.plots)\n\n def testRunModelAnalysisWithUncertainty(self):\n model_location = self._exportEvalSavedModel(\n linear_classifier.simple_linear_classifier)\n examples = [\n self._makeExample(age=3.0, language='english', label=1.0),\n self._makeExample(age=3.0, language='chinese', label=0.0),\n self._makeExample(age=4.0, language='english', label=1.0),\n self._makeExample(age=5.0, language='chinese', label=1.0),\n self._makeExample(age=5.0, language='hindi', label=1.0)\n ]\n data_location = self._writeTFExamplesToTFRecords(examples)\n slicing_specs = [slicer_lib.SingleSliceSpec(columns=['language'])]\n eval_result = model_eval_lib.run_model_analysis(\n eval_shared_model=model_eval_lib.default_eval_shared_model(\n eval_saved_model_path=model_location, example_weight_key='age'),\n data_location=data_location,\n output_path=self._getTempDir(),\n slice_spec=slicing_specs,\n compute_confidence_intervals=True,\n min_slice_size=2)\n # We only check some of the metrics to ensure that the end-to-end\n # pipeline works.\n expected = {\n (('language', 'hindi'),): {\n u'__ERROR__': {\n 'debugMessage':\n u'Example count for this slice key is lower than the '\n u'minimum required value: 2. No data is aggregated for '\n u'this slice.'\n },\n },\n (('language', 'chinese'),): {\n metric_keys.EXAMPLE_WEIGHT: {\n 'doubleValue': 8.0\n },\n metric_keys.EXAMPLE_COUNT: {\n 'doubleValue': 2.0\n },\n },\n (('language', 'english'),): {\n 'accuracy': {\n 'boundedValue': {\n 'value': 1.0,\n 'lowerBound': 1.0,\n 'upperBound': 1.0,\n 'methodology': 'POISSON_BOOTSTRAP'\n }\n },\n 'my_mean_label': {\n 'boundedValue': {\n 'value': 1.0,\n 'lowerBound': 1.0,\n 'upperBound': 1.0,\n 'methodology': 'POISSON_BOOTSTRAP'\n }\n },\n metric_keys.EXAMPLE_WEIGHT: {\n 'doubleValue': 7.0\n },\n metric_keys.EXAMPLE_COUNT: {\n 'doubleValue': 2.0\n },\n }\n }\n self.assertEqual(eval_result.model_location, model_location.decode())\n self.assertEqual(eval_result.data_location, data_location)\n self.assertEqual(eval_result.config.slicing_specs[0],\n config_pb2.SlicingSpec(feature_keys=['language']))\n self.assertMetricsAlmostEqual(eval_result.slicing_metrics, expected)\n self.assertFalse(eval_result.plots)\n\n def testRunModelAnalysisWithDeterministicConfidenceIntervals(self):\n model_location = self._exportEvalSavedModel(\n linear_classifier.simple_linear_classifier)\n examples = [\n self._makeExample(age=3.0, language='english', label=1.0),\n self._makeExample(age=3.0, language='chinese', label=0.0),\n self._makeExample(age=4.0, language='english', label=1.0),\n self._makeExample(age=5.0, language='chinese', label=1.0),\n self._makeExample(age=5.0, language='hindi', label=1.0)\n ]\n data_location = self._writeTFExamplesToTFRecords(examples)\n slicing_specs = [slicer_lib.SingleSliceSpec(columns=['language'])]\n eval_result = model_eval_lib.run_model_analysis(\n eval_shared_model=model_eval_lib.default_eval_shared_model(\n eval_saved_model_path=model_location, example_weight_key='age'),\n data_location=data_location,\n output_path=self._getTempDir(),\n slice_spec=slicing_specs,\n compute_confidence_intervals=True,\n min_slice_size=2,\n random_seed_for_testing=_TEST_SEED)\n # We only check some of the metrics to ensure that the end-to-end\n # pipeline works.\n expected = {\n (('language', 'hindi'),): {\n u'__ERROR__': {\n 'debugMessage':\n u'Example count for this slice key is lower than the '\n u'minimum required value: 2. No data is aggregated for '\n u'this slice.'\n },\n },\n (('language', 'chinese'),): {\n metric_keys.EXAMPLE_WEIGHT: {\n 'doubleValue': 8.0\n },\n metric_keys.EXAMPLE_COUNT: {\n 'doubleValue': 2.0\n },\n },\n (('language', 'english'),): {\n 'accuracy': {\n 'boundedValue': {\n 'value': 1.0,\n 'lowerBound': 1.0,\n 'upperBound': 1.0,\n 'methodology': 'POISSON_BOOTSTRAP'\n }\n },\n 'my_mean_label': {\n 'boundedValue': {\n 'value': 1.0,\n 'lowerBound': 1.0,\n 'upperBound': 1.0,\n 'methodology': 'POISSON_BOOTSTRAP'\n }\n },\n metric_keys.EXAMPLE_WEIGHT: {\n 'doubleValue': 7.0\n },\n metric_keys.EXAMPLE_COUNT: {\n 'doubleValue': 2.0\n },\n }\n }\n self.assertEqual(eval_result.model_location, model_location.decode())\n self.assertEqual(eval_result.data_location, data_location)\n self.assertEqual(eval_result.config.slicing_specs[0],\n config_pb2.SlicingSpec(feature_keys=['language']))\n self.assertMetricsAlmostEqual(eval_result.slicing_metrics, expected)\n\n for key, value in eval_result.slicing_metrics:\n if (('language', 'english'),) == key:\n metric = value['']['']['average_loss']\n self.assertAlmostEqual(\n 0.171768754720, metric['boundedValue']['value'], delta=0.1)\n\n metric = value['']['']['auc_precision_recall']\n self.assertAlmostEqual(\n 0.99999940395, metric['boundedValue']['value'], delta=0.1)\n\n self.assertFalse(eval_result.plots)\n\n def testRunModelAnalysisWithSchema(self):\n model_location = self._exportEvalSavedModel(\n linear_regressor.simple_linear_regressor)\n examples = [\n self._makeExample(age=3.0, language='english', label=2.0),\n self._makeExample(age=3.0, language='chinese', label=1.0),\n self._makeExample(age=4.0, language='english', label=2.0),\n self._makeExample(age=5.0, language='chinese', label=2.0),\n self._makeExample(age=5.0, language='hindi', label=2.0)\n ]\n data_location = self._writeTFExamplesToTFRecords(examples)\n eval_config = config_pb2.EvalConfig(\n model_specs=[config_pb2.ModelSpec(label_key='label')],\n metrics_specs=metric_specs.specs_from_metrics(\n [calibration_plot.CalibrationPlot(num_buckets=4)]))\n schema = text_format.Parse(\n \"\"\"\n feature {\n name: \"label\"\n type: INT\n int_domain {\n min: 1\n max: 2\n }\n }\n \"\"\", schema_pb2.Schema())\n eval_result = model_eval_lib.run_model_analysis(\n eval_config=eval_config,\n schema=schema,\n eval_shared_model=model_eval_lib.default_eval_shared_model(\n eval_saved_model_path=model_location, example_weight_key='age'),\n data_location=data_location,\n output_path=self._getTempDir())\n\n expected_metrics = {(): {metric_keys.EXAMPLE_COUNT: {'doubleValue': 5.0},}}\n self.assertMetricsAlmostEqual(eval_result.slicing_metrics, expected_metrics)\n self.assertLen(eval_result.plots, 1)\n slice_key, plots = eval_result.plots[0]\n self.assertEqual((), slice_key)\n got_buckets = plots['']['']['calibrationHistogramBuckets']['buckets']\n # buckets include (-inf, left) and (right, inf) by default, but we are\n # interested in the values of left and right\n self.assertEqual(1.0, got_buckets[1]['lowerThresholdInclusive'])\n self.assertEqual(2.0, got_buckets[-2]['upperThresholdExclusive'])\n\n def testRunModelAnalysisWithPlots(self):\n model_location = self._exportEvalSavedModel(\n fixed_prediction_estimator.simple_fixed_prediction_estimator)\n examples = [\n self._makeExample(prediction=0.0, label=1.0),\n self._makeExample(prediction=0.7, label=0.0),\n self._makeExample(prediction=0.8, label=1.0),\n self._makeExample(prediction=1.0, label=1.0),\n self._makeExample(prediction=1.0, label=1.0)\n ]\n data_location = self._writeTFExamplesToTFRecords(examples)\n eval_shared_model = model_eval_lib.default_eval_shared_model(\n eval_saved_model_path=model_location,\n add_metrics_callbacks=[post_export_metrics.auc_plots()])\n eval_result = model_eval_lib.run_model_analysis(\n eval_shared_model=eval_shared_model,\n data_location=data_location,\n output_path=self._getTempDir())\n # We only check some of the metrics to ensure that the end-to-end\n # pipeline works.\n expected_metrics = {(): {metric_keys.EXAMPLE_COUNT: {'doubleValue': 5.0},}}\n expected_matrix = {\n 'threshold': 0.8,\n 'falseNegatives': 2.0,\n 'trueNegatives': 1.0,\n 'truePositives': 2.0,\n 'precision': 1.0,\n 'recall': 0.5\n }\n self.assertMetricsAlmostEqual(eval_result.slicing_metrics, expected_metrics)\n self.assertLen(eval_result.plots, 1)\n slice_key, plots = eval_result.plots[0]\n self.assertEqual((), slice_key)\n self.assertDictElementsAlmostEqual(\n plots['']['']['confusionMatrixAtThresholds']['matrices'][8001],\n expected_matrix)\n\n def testRunModelAnalysisWithMultiplePlots(self):\n model_location = self._exportEvalSavedModel(\n fixed_prediction_estimator.simple_fixed_prediction_estimator)\n examples = [\n self._makeExample(prediction=0.0, label=1.0),\n self._makeExample(prediction=0.7, label=0.0),\n self._makeExample(prediction=0.8, label=1.0),\n self._makeExample(prediction=1.0, label=1.0),\n self._makeExample(prediction=1.0, label=1.0)\n ]\n data_location = self._writeTFExamplesToTFRecords(examples)\n eval_shared_model = model_eval_lib.default_eval_shared_model(\n eval_saved_model_path=model_location,\n add_metrics_callbacks=[\n post_export_metrics.auc_plots(),\n post_export_metrics.auc_plots(metric_tag='test')\n ])\n eval_result = model_eval_lib.run_model_analysis(\n eval_shared_model=eval_shared_model,\n data_location=data_location,\n output_path=self._getTempDir())\n\n # pipeline works.\n expected_metrics = {(): {metric_keys.EXAMPLE_COUNT: {'doubleValue': 5.0},}}\n expected_matrix = {\n 'threshold': 0.8,\n 'falseNegatives': 2.0,\n 'trueNegatives': 1.0,\n 'truePositives': 2.0,\n 'precision': 1.0,\n 'recall': 0.5\n }\n self.assertMetricsAlmostEqual(eval_result.slicing_metrics, expected_metrics)\n self.assertLen(eval_result.plots, 1)\n slice_key, plots = eval_result.plots[0]\n self.assertEqual((), slice_key)\n self.assertDictElementsAlmostEqual(\n plots['']['']['post_export_metrics']['confusionMatrixAtThresholds']\n ['matrices'][8001], expected_matrix)\n self.assertDictElementsAlmostEqual(\n plots['']['']['post_export_metrics/test']['confusionMatrixAtThresholds']\n ['matrices'][8001], expected_matrix)\n\n def testRunModelAnalysisForCSVText(self):\n model_location = self._exportEvalSavedModel(\n csv_linear_classifier.simple_csv_linear_classifier)\n examples = [\n '3.0,english,1.0', '3.0,chinese,0.0', '4.0,english,1.0',\n '5.0,chinese,1.0'\n ]\n data_location = self._writeCSVToTextFile(examples)\n eval_config = config_pb2.EvalConfig()\n eval_result = model_eval_lib.run_model_analysis(\n eval_config=eval_config,\n eval_shared_model=model_eval_lib.default_eval_shared_model(\n eval_saved_model_path=model_location),\n data_location=data_location,\n file_format='text',\n output_path=self._getTempDir())\n # We only check some of the metrics to ensure that the end-to-end\n # pipeline works.\n expected = {\n (): {\n 'accuracy': {\n 'doubleValue': 0.75\n },\n metric_keys.EXAMPLE_COUNT: {\n 'doubleValue': 4.0\n }\n }\n }\n self.assertMetricsAlmostEqual(eval_result.slicing_metrics, expected)\n\n def testMultipleModelAnalysis(self):\n model_location_1 = self._exportEvalSavedModel(\n linear_classifier.simple_linear_classifier)\n model_location_2 = self._exportEvalSavedModel(\n linear_classifier.simple_linear_classifier)\n examples = [\n self._makeExample(age=3.0, language='english', label=1.0),\n self._makeExample(age=3.0, language='chinese', label=0.0),\n self._makeExample(age=4.0, language='english', label=1.0),\n self._makeExample(age=5.0, language='chinese', label=1.0)\n ]\n data_location = self._writeTFExamplesToTFRecords(examples)\n eval_config = config_pb2.EvalConfig(slicing_specs=[\n config_pb2.SlicingSpec(feature_values={'language': 'english'})\n ])\n eval_results = model_eval_lib.multiple_model_analysis(\n [model_location_1, model_location_2],\n data_location,\n eval_config=eval_config)\n # We only check some of the metrics to ensure that the end-to-end\n # pipeline works.\n self.assertLen(eval_results._results, 2)\n expected_result_1 = {\n (('language', 'english'),): {\n 'my_mean_label': {\n 'doubleValue': 1.0\n },\n metric_keys.EXAMPLE_COUNT: {\n 'doubleValue': 2.0\n },\n }\n }\n expected_result_2 = {\n (('language', 'english'),): {\n 'my_mean_label': {\n 'doubleValue': 1.0\n },\n metric_keys.EXAMPLE_COUNT: {\n 'doubleValue': 2.0\n },\n }\n }\n self.assertMetricsAlmostEqual(eval_results._results[0].slicing_metrics,\n expected_result_1)\n self.assertMetricsAlmostEqual(eval_results._results[1].slicing_metrics,\n expected_result_2)\n\n def testMultipleDataAnalysis(self):\n model_location = self._exportEvalSavedModel(\n linear_classifier.simple_linear_classifier)\n data_location_1 = self._writeTFExamplesToTFRecords([\n self._makeExample(age=3.0, language='english', label=1.0),\n self._makeExample(age=3.0, language='english', label=0.0),\n self._makeExample(age=5.0, language='chinese', label=1.0)\n ])\n data_location_2 = self._writeTFExamplesToTFRecords(\n [self._makeExample(age=4.0, language='english', label=1.0)])\n eval_config = config_pb2.EvalConfig(slicing_specs=[\n config_pb2.SlicingSpec(feature_values={'language': 'english'})\n ])\n eval_results = model_eval_lib.multiple_data_analysis(\n model_location, [data_location_1, data_location_2],\n eval_config=eval_config)\n self.assertLen(eval_results._results, 2)\n # We only check some of the metrics to ensure that the end-to-end\n # pipeline works.\n expected_result_1 = {\n (('language', 'english'),): {\n metric_keys.EXAMPLE_COUNT: {\n 'doubleValue': 2.0\n },\n }\n }\n expected_result_2 = {\n (('language', 'english'),): {\n metric_keys.EXAMPLE_COUNT: {\n 'doubleValue': 1.0\n },\n }\n }\n self.assertMetricsAlmostEqual(eval_results._results[0].slicing_metrics,\n expected_result_1)\n self.assertMetricsAlmostEqual(eval_results._results[1].slicing_metrics,\n expected_result_2)\n\n def testLoadValidationResult(self):\n result = validation_result_pb2.ValidationResult(validation_ok=True)\n path = os.path.join(absltest.get_default_test_tmpdir(), 'results.tfrecord')\n with tf.io.TFRecordWriter(path) as writer:\n writer.write(result.SerializeToString())\n loaded_result = model_eval_lib.load_validation_result(path)\n self.assertTrue(loaded_result.validation_ok)\n\n def testLoadValidationResultDir(self):\n result = validation_result_pb2.ValidationResult(validation_ok=True)\n path = os.path.join(absltest.get_default_test_tmpdir(),\n constants.VALIDATIONS_KEY)\n with tf.io.TFRecordWriter(path) as writer:\n writer.write(result.SerializeToString())\n loaded_result = model_eval_lib.load_validation_result(os.path.dirname(path))\n self.assertTrue(loaded_result.validation_ok)\n\n def testLoadValidationResultEmptyFile(self):\n path = os.path.join(absltest.get_default_test_tmpdir(),\n constants.VALIDATIONS_KEY)\n with tf.io.TFRecordWriter(path):\n pass\n with self.assertRaises(AssertionError):\n model_eval_lib.load_validation_result(path)\n\n def testAnalyzeRawData(self):\n\n # Data\n # age language label prediction\n # 17 english 0 0\n # 30 spanish 1 1\n dict_data = [{\n 'age': 17,\n 'language': 'english',\n 'prediction': 0,\n 'label': 0\n }, {\n 'age': 30,\n 'language': 'spanish',\n 'prediction': 1,\n 'label': 1\n }]\n df_data = pd.DataFrame(dict_data)\n\n # Expected Output\n expected_slicing_metrics = {\n (('language', 'english'),): {\n '': {\n '': {\n 'accuracy': {\n 'doubleValue': 1.0\n },\n 'example_count': {\n 'doubleValue': 1.0\n }\n }\n }\n },\n (('language', 'spanish'),): {\n '': {\n '': {\n 'accuracy': {\n 'doubleValue': 1.0\n },\n 'example_count': {\n 'doubleValue': 1.0\n }\n }\n }\n },\n (): {\n '': {\n '': {\n 'accuracy': {\n 'doubleValue': 1.0\n },\n 'example_count': {\n 'doubleValue': 2.0\n }\n }\n }\n }\n }\n\n # Actual Output\n eval_config = text_format.Parse(\n \"\"\"\n model_specs {\n label_key: 'label'\n prediction_key: 'prediction'\n }\n metrics_specs {\n metrics { class_name: \"Accuracy\" }\n metrics { class_name: \"ExampleCount\" }\n }\n slicing_specs {}\n slicing_specs {\n feature_keys: 'language'\n }\n \"\"\", config_pb2.EvalConfig())\n eval_result = model_eval_lib.analyze_raw_data(df_data, eval_config)\n\n # Compare Actual and Expected\n self.assertEqual(\n len(eval_result.slicing_metrics), len(expected_slicing_metrics))\n for slicing_metric in eval_result.slicing_metrics:\n slice_key, slice_val = slicing_metric\n self.assertIn(slice_key, expected_slicing_metrics)\n self.assertDictEqual(slice_val, expected_slicing_metrics[slice_key])\n\n def testAnalyzeRawDataWithoutPrediction(self):\n model_specs = [\n config_pb2.ModelSpec(prediction_key='nonexistent_prediction_key')\n ]\n metrics_specs = [\n config_pb2.MetricsSpec(\n metrics=[config_pb2.MetricConfig(class_name='Accuracy')])\n ]\n eval_config = config_pb2.EvalConfig(\n model_specs=model_specs, metrics_specs=metrics_specs)\n df_data = pd.DataFrame([{\n 'prediction': 0,\n 'label': 0,\n }])\n with self.assertRaises(KeyError):\n model_eval_lib.analyze_raw_data(df_data, eval_config)\n\n def testAnalyzeRawDataWithoutLabel(self):\n model_specs = [config_pb2.ModelSpec(prediction_key='nonexistent_label_key')]\n metrics_specs = [\n config_pb2.MetricsSpec(\n metrics=[config_pb2.MetricConfig(class_name='Accuracy')])\n ]\n eval_config = config_pb2.EvalConfig(\n model_specs=model_specs, metrics_specs=metrics_specs)\n df_data = pd.DataFrame([{\n 'prediction': 0,\n 'label': 0,\n }])\n with self.assertRaises(KeyError):\n model_eval_lib.analyze_raw_data(df_data, eval_config)\n\n\nif __name__ == '__main__':\n tf.compat.v1.enable_v2_behavior()\n tf.test.main()\n",
"# Copyright 2020 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Tests for fairness indicators metrics.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\n# Standard __future__ imports\nfrom __future__ import print_function\n\nimport math\nfrom absl.testing import parameterized\nimport apache_beam as beam\nfrom apache_beam.testing import util\nimport numpy as np\nimport tensorflow as tf\nfrom tensorflow_model_analysis.addons.fairness.metrics import fairness_indicators\nfrom tensorflow_model_analysis.eval_saved_model import testutil\nfrom tensorflow_model_analysis.metrics import metric_types\nfrom tensorflow_model_analysis.metrics import metric_util\n\n\nclass FairnessIndicatorsTest(testutil.TensorflowModelAnalysisTest,\n parameterized.TestCase):\n\n def testFairessIndicatorsMetricsGeneral(self):\n computations = fairness_indicators.FairnessIndicators(\n thresholds=[0.3, 0.7]).computations()\n histogram = computations[0]\n matrices = computations[1]\n metrics = computations[2]\n examples = [{\n 'labels': np.array([0.0]),\n 'predictions': np.array([0.1]),\n 'example_weights': np.array([1.0]),\n }, {\n 'labels': np.array([0.0]),\n 'predictions': np.array([0.5]),\n 'example_weights': np.array([1.0]),\n }, {\n 'labels': np.array([1.0]),\n 'predictions': np.array([0.5]),\n 'example_weights': np.array([1.0]),\n }, {\n 'labels': np.array([1.0]),\n 'predictions': np.array([0.9]),\n 'example_weights': np.array([1.0]),\n }]\n\n with beam.Pipeline() as pipeline:\n # pylint: disable=no-value-for-parameter\n result = (\n pipeline\n | 'Create' >> beam.Create(examples)\n | 'Process' >> beam.Map(metric_util.to_standard_metric_inputs)\n | 'AddSlice' >> beam.Map(lambda x: ((), x))\n | 'ComputeHistogram' >> beam.CombinePerKey(histogram.combiner)\n | 'ComputeMatrices' >> beam.Map(\n lambda x: (x[0], matrices.result(x[1]))) # pyformat: ignore\n | 'ComputeMetrics' >> beam.Map(lambda x: (x[0], metrics.result(x[1])))\n ) # pyformat: ignore\n\n # pylint: enable=no-value-for-parameter\n\n def check_result(got):\n try:\n self.assertLen(got, 1)\n got_slice_key, got_metrics = got[0]\n self.assertEqual(got_slice_key, ())\n self.assertLen(got_metrics, 16) # 2 thresholds * 8 metrics\n self.assertDictElementsAlmostEqual(\n got_metrics, {\n metric_types.MetricKey(\n name='fairness_indicators_metrics/[email protected]'\n ):\n 0.5,\n metric_types.MetricKey(\n name='fairness_indicators_metrics/[email protected]'\n ):\n 0.0,\n metric_types.MetricKey(\n name='fairness_indicators_metrics/[email protected]'\n ):\n 1.0,\n metric_types.MetricKey(\n name='fairness_indicators_metrics/[email protected]'\n ):\n 0.5,\n metric_types.MetricKey(\n name='fairness_indicators_metrics/[email protected]'):\n 0.75,\n metric_types.MetricKey(\n name='fairness_indicators_metrics/[email protected]'):\n 0.25,\n metric_types.MetricKey(\n name='fairness_indicators_metrics/[email protected]'\n ):\n 1.0 / 3.0,\n metric_types.MetricKey(\n name='fairness_indicators_metrics/[email protected]'\n ):\n 0.0,\n metric_types.MetricKey(\n name='fairness_indicators_metrics/[email protected]'\n ):\n 0.0,\n metric_types.MetricKey(\n name='fairness_indicators_metrics/[email protected]'\n ):\n 0.5,\n metric_types.MetricKey(\n name='fairness_indicators_metrics/[email protected]'\n ):\n 0.5,\n metric_types.MetricKey(\n name='fairness_indicators_metrics/[email protected]'\n ):\n 1.0,\n metric_types.MetricKey(\n name='fairness_indicators_metrics/[email protected]'):\n 0.25,\n metric_types.MetricKey(\n name='fairness_indicators_metrics/[email protected]'):\n 0.75,\n metric_types.MetricKey(\n name='fairness_indicators_metrics/[email protected]'\n ):\n 0.0,\n metric_types.MetricKey(\n name='fairness_indicators_metrics/[email protected]'\n ):\n 1.0 / 3.0\n })\n except AssertionError as err:\n raise util.BeamAssertException(err)\n\n util.assert_that(result, check_result, label='result')\n\n def testFairessIndicatorsMetricsWithNanValue(self):\n computations = fairness_indicators.FairnessIndicators(\n thresholds=[0.5]).computations()\n histogram = computations[0]\n matrices = computations[1]\n metrics = computations[2]\n examples = [{\n 'labels': np.array([0.0]),\n 'predictions': np.array([0.1]),\n 'example_weights': np.array([1.0]),\n }, {\n 'labels': np.array([0.0]),\n 'predictions': np.array([0.7]),\n 'example_weights': np.array([1.0]),\n }]\n\n with beam.Pipeline() as pipeline:\n # pylint: disable=no-value-for-parameter\n result = (\n pipeline\n | 'Create' >> beam.Create(examples)\n | 'Process' >> beam.Map(metric_util.to_standard_metric_inputs)\n | 'AddSlice' >> beam.Map(lambda x: ((), x))\n | 'ComputeHistogram' >> beam.CombinePerKey(histogram.combiner)\n | 'ComputeMatrices' >> beam.Map(\n lambda x: (x[0], matrices.result(x[1]))) # pyformat: ignore\n | 'ComputeMetrics' >> beam.Map(lambda x: (x[0], metrics.result(x[1])))\n ) # pyformat: ignore\n # pylint: enable=no-value-for-parameter\n\n def check_result(got):\n try:\n self.assertLen(got, 1)\n got_slice_key, got_metrics = got[0]\n self.assertEqual(got_slice_key, ())\n self.assertLen(got_metrics, 8) # 1 threshold * 8 metrics\n self.assertTrue(\n math.isnan(got_metrics[metric_types.MetricKey(\n name='fairness_indicators_metrics/[email protected]')]))\n self.assertTrue(\n math.isnan(got_metrics[metric_types.MetricKey(\n name='fairness_indicators_metrics/[email protected]')]))\n\n except AssertionError as err:\n raise util.BeamAssertException(err)\n\n util.assert_that(result, check_result, label='result')\n\n @parameterized.named_parameters(\n ('_default_threshold', {}, 72, ()),\n ('_thresholds_with_different_digits', {\n 'thresholds': [0.1, 0.22, 0.333]\n }, 24, (metric_types.MetricKey(\n name='fairness_indicators_metrics/[email protected]',\n example_weighted=True),\n metric_types.MetricKey(\n name='fairness_indicators_metrics/[email protected]',\n example_weighted=True),\n metric_types.MetricKey(\n name='fairness_indicators_metrics/[email protected]',\n example_weighted=True))))\n def testFairessIndicatorsMetricsWithThresholds(self, kwargs,\n expected_metrics_nums,\n expected_metrics_keys):\n # This is a parameterized test with following parameters.\n # - metric parameters like thresholds.\n # - expected number of metrics computed\n # - expected list of metrics keys\n\n computations = fairness_indicators.FairnessIndicators(\n **kwargs).computations(example_weighted=True)\n histogram = computations[0]\n matrices = computations[1]\n metrics = computations[2]\n examples = [{\n 'labels': np.array([0.0]),\n 'predictions': np.array([0.1]),\n 'example_weights': np.array([1.0]),\n }, {\n 'labels': np.array([0.0]),\n 'predictions': np.array([0.7]),\n 'example_weights': np.array([3.0]),\n }]\n\n with beam.Pipeline() as pipeline:\n # pylint: disable=no-value-for-parameter\n result = (\n pipeline\n | 'Create' >> beam.Create(examples)\n | 'Process' >> beam.Map(metric_util.to_standard_metric_inputs)\n | 'AddSlice' >> beam.Map(lambda x: ((), x))\n | 'ComputeHistogram' >> beam.CombinePerKey(histogram.combiner)\n | 'ComputeMatrices' >> beam.Map(\n lambda x: (x[0], matrices.result(x[1]))) # pyformat: ignore\n | 'ComputeMetrics' >> beam.Map(lambda x: (x[0], metrics.result(x[1])))\n ) # pyformat: ignore\n\n # pylint: enable=no-value-for-parameter\n\n def check_result(got):\n try:\n self.assertLen(got, 1)\n got_slice_key, got_metrics = got[0]\n self.assertEqual(got_slice_key, ())\n self.assertLen(got_metrics, expected_metrics_nums)\n for metrics_key in expected_metrics_keys:\n self.assertIn(metrics_key, got_metrics)\n except AssertionError as err:\n raise util.BeamAssertException(err)\n\n util.assert_that(result, check_result, label='result')\n\n @parameterized.named_parameters(('_has_weight', [{\n 'labels': np.array([0.0]),\n 'predictions': np.array([0.1]),\n 'example_weights': np.array([1.0]),\n }, {\n 'labels': np.array([0.0]),\n 'predictions': np.array([0.7]),\n 'example_weights': np.array([3.0]),\n }], {\n 'example_weighted': True\n }, {\n metric_types.MetricKey(\n name='fairness_indicators_metrics/[email protected]',\n example_weighted=True):\n 0.25,\n metric_types.MetricKey(\n name='fairness_indicators_metrics/[email protected]',\n example_weighted=True):\n 0.75,\n metric_types.MetricKey(\n name='fairness_indicators_metrics/[email protected]',\n example_weighted=True):\n 0.25,\n metric_types.MetricKey(\n name='fairness_indicators_metrics/[email protected]',\n example_weighted=True):\n 0.75,\n metric_types.MetricKey(\n name='fairness_indicators_metrics/[email protected]',\n example_weighted=True):\n 1.0,\n }), ('_has_model_name', [{\n 'labels': np.array([0.0]),\n 'predictions': {\n 'model1': np.array([0.1]),\n },\n 'example_weights': np.array([1.0])\n }, {\n 'labels': np.array([0.0]),\n 'predictions': {\n 'model1': np.array([0.7]),\n },\n 'example_weights': np.array([3.0]),\n }], {\n 'model_names': ['model1'],\n 'example_weighted': True\n }, {\n metric_types.MetricKey(\n name='fairness_indicators_metrics/[email protected]',\n model_name='model1',\n example_weighted=True):\n 0.25,\n metric_types.MetricKey(\n name='fairness_indicators_metrics/[email protected]',\n model_name='model1',\n example_weighted=True):\n 0.75,\n metric_types.MetricKey(\n name='fairness_indicators_metrics/[email protected]',\n model_name='model1',\n example_weighted=True):\n 0.25,\n metric_types.MetricKey(\n name='fairness_indicators_metrics/[email protected]',\n model_name='model1',\n example_weighted=True):\n 0.75,\n metric_types.MetricKey(\n name='fairness_indicators_metrics/[email protected]',\n model_name='model1',\n example_weighted=True):\n 1.0,\n }))\n def testFairessIndicatorsMetricsWithInput(self, input_examples,\n computations_kwargs,\n expected_result):\n # This is a parameterized test with following parameters.\n # - input examples to be used in the test\n # - parameters like model name etc.\n # - expected result to assert on\n\n computations = fairness_indicators.FairnessIndicators(\n thresholds=[0.5]).computations(**computations_kwargs)\n histogram = computations[0]\n matrices = computations[1]\n metrics = computations[2]\n\n with beam.Pipeline() as pipeline:\n # pylint: disable=no-value-for-parameter\n result = (\n pipeline\n | 'Create' >> beam.Create(input_examples)\n | 'Process' >> beam.Map(metric_util.to_standard_metric_inputs)\n | 'AddSlice' >> beam.Map(lambda x: ((), x))\n | 'ComputeHistogram' >> beam.CombinePerKey(histogram.combiner)\n | 'ComputeMatrices' >> beam.Map(\n lambda x: (x[0], matrices.result(x[1]))) # pyformat: ignore\n | 'ComputeMetrics' >> beam.Map(lambda x: (x[0], metrics.result(x[1])))\n ) # pyformat: ignore\n\n # pylint: enable=no-value-for-parameter\n\n def check_result(got):\n try:\n self.assertLen(got, 1)\n got_slice_key, got_metrics = got[0]\n self.assertEqual(got_slice_key, ())\n self.assertLen(got_metrics, 8) # 1 threshold * 8 metrics\n for metrics_key in expected_result:\n self.assertEqual(got_metrics[metrics_key],\n expected_result[metrics_key])\n except AssertionError as err:\n raise util.BeamAssertException(err)\n\n util.assert_that(result, check_result, label='result')\n\n\n# Todo(b/147497357): Add counter test once we have counter setup.\n\nif __name__ == '__main__':\n tf.test.main()\n"
] |
[
[
"tensorflow.io.TFRecordWriter",
"tensorflow.compat.v1.enable_v2_behavior",
"tensorflow.keras.models.Model",
"tensorflow.keras.layers.Dense",
"tensorflow.data.Dataset.from_tensor_slices",
"tensorflow.keras.metrics.AUC",
"tensorflow.compat.v2.lite.TFLiteConverter.from_keras_model",
"tensorflow.test.main",
"pandas.DataFrame",
"tensorflow.io.gfile.makedirs",
"tensorflow.reduce_sum",
"tensorflow.keras.optimizers.Adam",
"tensorflow.keras.backend.clear_session",
"tensorflow.compat.v1.python_io.tf_record_iterator",
"tensorflow.keras.metrics.Recall",
"tensorflow.version.VERSION.split",
"tensorflow.keras.utils.serialize_keras_object",
"tensorflow.keras.layers.Input"
],
[
"numpy.array",
"tensorflow.test.main"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"2.6",
"2.4",
"2.3",
"2.5",
"2.2"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
MahmutOsmanovic/machine-learning-mooc-caltech
|
[
"deca978e13f6d6950f06417c4d520e71904962d7"
] |
[
"lfd_hw4/hw4_7_d.py"
] |
[
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Feb 24 07:01:07 2021\n\n@author: Mahmu\n\"\"\"\n\n# HYPOTHESIS: h(x) = ax^2\n\nimport numpy as np\n\ndef problem4():\n \n RUNS = 1000\n a_total = 0\n N = 2 # size of data set\n \n for _ in range(RUNS):\n # two random points\n x_rnd = np.random.uniform(-1, 1, N)\n y_rnd = np.sin(np.pi * x_rnd)\n\n # linear regression for model y = ax^2\n X = np.array([x_rnd * x_rnd]).T\n w = np.dot(np.dot(np.linalg.inv(np.dot(X.T, X)), X.T), y_rnd)\n a = w[0]\n\n a_total += a\n \n a_avg = a_total / RUNS\n return a_avg\n\nprint(\"h(x) = ax^2\")\nprint(\"solution problem 7: a_avg = \", problem4())\n\n\n#-------------------------------------------------------------------------\n\n\ndef problem5():\n N_test = 1000\n x_test = np.random.uniform(-1,1,N_test)\n\n y_f = np.sin(np.pi * x_test)\n a_avg = problem4()\n y_g_bar = a_avg * (x_test * x_test)\n\n bias = sum((y_f - y_g_bar)**2) / N_test\n return bias\n \n\nprint(\"\\nSolution to problem 7: bias = \", problem5())\n\n#--------------------------------------------------------------------------\n\ndef problem6():\n a_avg = problem4()\n expectation_over_X = 0\n \n RUNS_D = 100\n RUNS_X = 1000\n # variance: Compare each g to g_bar\n \n for i in range(RUNS_X):\n N = 2\n x_test = np.random.uniform(-1,1)\n expectation_over_D = 0\n \n for _ in range(RUNS_D):\n # two random points as data set D\n x_rnd = np.random.uniform(-1, 1, N)\n y_rnd = np.sin(np.pi * x_rnd)\n\n # linear regression for model y = ax^2\n # get a particular g^(D)\n X = np.array([x_rnd * x_rnd]).T\n w = np.dot(np.dot(np.linalg.inv(np.dot(X.T, X)), X.T), y_rnd)\n a = w[0]\n \n # calculate difference on test point\n y_g = a * x_test**2\n y_g_bar = a_avg * x_test**2\n expectation_over_D += (y_g - y_g_bar)**2 / RUNS_D\n\n expectation_over_X += expectation_over_D / RUNS_X\n \n variance = expectation_over_X\n return variance\n\n\nprint(\"\\nSolution to problem 7, variance = \", problem6())"
] |
[
[
"numpy.random.uniform",
"numpy.dot",
"numpy.array",
"numpy.sin"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
thesinepainter/similarity
|
[
"a9f675b8761c2886ca3aa9ea1215ab7f693ab07d"
] |
[
"tensorflow_similarity/callbacks.py"
] |
[
"# Copyright 2021 The TensorFlow Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Specialized callbacks that track similarity metrics during training\"\"\"\nfrom typing import Dict, List, Optional, Sequence, Union\nfrom pathlib import Path\nimport math\n\nimport numpy as np\nimport tensorflow as tf\nfrom tensorflow.keras.callbacks import Callback\n\nfrom .classification_metrics import ClassificationMetric\nfrom .classification_metrics import make_classification_metric # noqa\nfrom .matchers import ClassificationMatch\nfrom .evaluators import Evaluator, MemoryEvaluator\nfrom .models import SimilarityModel\nfrom .types import Tensor, FloatTensor, IntTensor\nfrom .utils import unpack_lookup_distances, unpack_lookup_labels\n\n\nclass EvalCallback(Callback):\n \"\"\"Epoch end evaluation callback that build a test index and evaluate\n model performance on it.\n\n This evaluation only run at epoch_end as it is computationally very\n expensive.\n\n \"\"\"\n\n def __init__(\n self,\n queries: Tensor,\n query_labels: Sequence[int],\n targets: Tensor,\n target_labels: Sequence[int],\n distance: str = \"cosine\",\n metrics: Sequence[Union[str, ClassificationMetric]] = [\n \"binary_accuracy\",\n \"f1score\",\n ], # noqa\n tb_logdir: str = None,\n k: int = 1,\n matcher: Union[str, ClassificationMatch] = \"match_nearest\",\n distance_thresholds: Optional[FloatTensor] = None,\n ):\n \"\"\"Evaluate model matching quality against a validation dataset at\n epoch end.\n\n Args:\n queries: Test examples that will be tested against the built index.\n\n query_labels: Queries nearest neighbors expected labels.\n\n targets: Examples that are indexed.\n\n target_labels: Target examples labels.\n\n distance: Distance function used to compute pairwise distance\n between examples embeddings.\n\n metrics: List of\n 'tf.similarity.classification_metrics.ClassificationMetric()` to\n compute during the evaluation. Defaults to ['binary_accuracy',\n 'f1score'].\n\n tb_logdir: Where to write TensorBoard logs. Defaults to None.\n\n k: The number of nearest neighbors to return for each query.\n\n matcher: {'match_nearest', 'match_majority_vote'} or\n ClassificationMatch object. Defines the classification matching,\n e.g., match_nearest will count a True Positive if the query_label is\n equal to the label of the nearest neighbor and the distance is less\n than or equal to the distance threshold.\n\n distance_thresholds: A 1D tensor denoting the distances points at\n which we compute the metrics. If None, distance_thresholds is set to\n tf.constant([math.inf])\n \"\"\"\n super().__init__()\n self.queries = queries\n if not tf.is_tensor(query_labels):\n query_labels = tf.convert_to_tensor(np.array(query_labels))\n self.query_labels: IntTensor = tf.cast(query_labels, dtype='int32')\n self.targets = targets\n self.target_labels = target_labels\n self.distance = distance\n self.evaluator = MemoryEvaluator()\n # typing requires this weird formulation of creating a new list\n self.metrics: List[ClassificationMetric] = [\n make_classification_metric(m) for m in metrics\n ]\n self.k = k\n self.matcher = matcher\n if distance_thresholds is not None:\n self.distance_thresholds = distance_thresholds\n else:\n self.distance_thresholds = tf.constant([math.inf])\n\n if tb_logdir:\n tb_logdir = str(Path(tb_logdir) / \"index/\")\n self.tb_writer = tf.summary.create_file_writer(tb_logdir)\n print(\"TensorBoard logging enable in %s\" % tb_logdir)\n else:\n self.tb_writer = None\n\n def on_epoch_end(self, epoch: int, logs: dict = None):\n \"\"\"Computes the eval metrics at the end of each epoch.\n\n NOTE: This method resets the index and batch adds the target embeddings\n to the index using the new embeddings generated by the current version\n of the model.\n \"\"\"\n if logs is None:\n logs = {}\n\n # reset the index\n self.model.reset_index()\n\n # rebuild the index\n self.model.index(self.targets, self.target_labels, verbose=0)\n\n results = _compute_classification_metrics(\n queries=self.queries,\n query_labels=self.query_labels,\n model=self.model,\n evaluator=self.evaluator,\n metrics=self.metrics,\n k=self.k,\n matcher=self.matcher,\n distance_thresholds=self.distance_thresholds,\n )\n\n mstr = []\n for metric_name, vals in results.items():\n float_val = vals[0]\n logs[metric_name] = float_val\n mstr.append(f\"{metric_name}: {float_val:.4f}\")\n if self.tb_writer:\n with self.tb_writer.as_default():\n tf.summary.scalar(metric_name, float_val, step=epoch)\n\n # reset the index to prevent users from accidently using this after the\n # callback\n self.model.reset_index()\n\n print(\" - \".join(mstr))\n\n\nclass SplitValidationLoss(Callback):\n \"\"\"A split validation callback.\n\n This callback will split the validation data into two sets.\n\n 1) The set of classes seen during training.\n 2) The set of classes not seen during training.\n\n The callback will then compute a separate validation for each split.\n\n This is useful for separately tracking the validation loss on the seen and\n unseen classes and may provide insight into how well the embedding will\n generalize to new classes.\n \"\"\"\n\n def __init__(\n self,\n queries: Tensor,\n query_labels: Sequence[int],\n targets: Tensor,\n target_labels: Sequence[int],\n known_classes: IntTensor,\n distance: str = \"cosine\",\n metrics: Sequence[Union[str, ClassificationMetric]] = [\n \"binary_accuracy\",\n \"f1score\",\n ], # noqa\n tb_logdir: str = None,\n k: int = 1,\n matcher: Union[str, ClassificationMatch] = \"match_nearest\",\n distance_thresholds: Optional[FloatTensor] = None,\n ):\n \"\"\"Creates the validation callbacks.\n\n Args:\n queries: Test examples that will be tested against the built index.\n\n query_labels: Queries nearest neighbors expected labels.\n\n targets: Examples that are indexed.\n\n target_labels: Target examples labels.\n\n known_classes: The set of classes seen during training.\n\n distance: Distance function used to compute pairwise distance\n between examples embeddings.\n\n metrics: List of\n 'tf.similarity.classification_metrics.ClassificationMetric()` to\n compute during the evaluation. Defaults to ['binary_accuracy',\n 'f1score'].\n\n tb_logdir: Where to write TensorBoard logs. Defaults to None.\n\n k: The number of nearest neighbors to return for each query. The\n lookups are consumed by the Matching Strategy and used to derive the\n matching label and distance.\n\n matcher: {'match_nearest', 'match_majority_vote'} or\n ClassificationMatch object. Defines the classification matching,\n e.g., match_nearest will count a True Positive if the query_label\n is equal to the label of the nearest neighbor and the distance is\n less than or equal to the distance threshold.\n\n distance_thresholds: A 1D tensor denoting the distances points at\n which we compute the metrics. If None, distance_thresholds is set to\n tf.constant([math.inf])\n \"\"\"\n super().__init__()\n self.targets = targets\n self.target_labels = target_labels\n self.distance = distance\n self.evaluator = MemoryEvaluator()\n # typing requires this weird formulation of creating a new list\n self.metrics: List[ClassificationMetric] = [\n make_classification_metric(m) for m in metrics\n ]\n self.k = k\n self.matcher = matcher\n if distance_thresholds is not None:\n self.distance_thresholds = distance_thresholds\n else:\n self.distance_thresholds = tf.constant([math.inf])\n\n if tb_logdir:\n tb_logdir = str(Path(tb_logdir) / \"index/\")\n self.tb_writer = tf.summary.create_file_writer(tb_logdir)\n print(\"TensorBoard logging enable in %s\" % tb_logdir)\n else:\n self.tb_writer = None\n\n if not tf.is_tensor(query_labels):\n query_labels = tf.convert_to_tensor(np.array(query_labels))\n query_labels = tf.cast(query_labels, dtype='int32')\n\n # Create separate validation sets for the known and unknown classes\n known_classes = tf.cast(known_classes, dtype=\"int32\")\n known_classes = tf.reshape(known_classes, (-1))\n\n # Use broadcasting to do a y X known_classes equality check. By adding\n # a dim to the start of known_classes and a dim to the end of y, this\n # essentially checks `for ck in known_classes: for cy in y: ck == cy`.\n # We then reduce_any to find all rows in y that match at least one\n # class in known_classes.\n # See https://numpy.org/doc/stable/user/basics.broadcasting.html\n broadcast_classes = tf.expand_dims(known_classes, axis=0)\n broadcast_labels = tf.expand_dims(query_labels, axis=-1)\n known_mask = tf.math.reduce_any(\n broadcast_classes == broadcast_labels, axis=1\n )\n known_idxs = tf.squeeze(tf.where(known_mask))\n unknown_idxs = tf.squeeze(tf.where(~known_mask))\n\n with tf.device(\"/cpu:0\"):\n self.queries_known = tf.gather(queries, indices=known_idxs)\n self.query_labels_known = tf.gather(\n query_labels, indices=known_idxs\n )\n # Expand to 2D if we only have a single example\n if tf.rank(self.queries_known) == 1:\n self.queries_known = tf.expand_dims(self.queries_known, axis=0)\n self.query_labels_known = tf.expand_dims(\n self.query_labels_known, axis=0\n )\n\n self.queries_unknown = tf.gather(queries, indices=unknown_idxs)\n self.query_labels_unknown = tf.gather(\n query_labels, indices=unknown_idxs\n )\n # Expand to 2D if we only have a single example\n if tf.rank(self.queries_unknown) == 1:\n self.queries_unknown = tf.expand_dims(\n self.queries_unknown, axis=0\n )\n self.query_labels_unknown = tf.expand_dims(\n self.query_labels_unknown, axis=0\n )\n\n def on_epoch_end(self, epoch: int, logs: dict = None):\n \"\"\"Computes the eval metrics at the end of each epoch.\n\n NOTE: This method resets the index and batch adds the target embeddings\n to the index using the new embeddings generated by the current version\n of the model.\n \"\"\"\n _ = epoch\n if logs is None:\n logs = {}\n\n # reset the index\n self.model.reset_index()\n\n # rebuild the index\n self.model.index(self.targets, self.target_labels, verbose=0)\n\n known_results = _compute_classification_metrics(\n queries=self.queries_known,\n query_labels=self.query_labels_known,\n model=self.model,\n evaluator=self.evaluator,\n metrics=self.metrics,\n k=self.k,\n matcher=self.matcher,\n distance_thresholds=self.distance_thresholds,\n )\n\n unknown_results = _compute_classification_metrics(\n queries=self.queries_unknown,\n query_labels=self.query_labels_unknown,\n model=self.model,\n evaluator=self.evaluator,\n metrics=self.metrics,\n k=self.k,\n matcher=self.matcher,\n distance_thresholds=self.distance_thresholds,\n )\n\n mstr = []\n for metric_name, vals in known_results.items():\n float_val = vals[0]\n full_metric_name = f\"{metric_name}_known_classes\"\n logs[full_metric_name] = float_val\n mstr.append(f\"{full_metric_name}: {float_val:0.4f}\")\n if self.tb_writer:\n with self.tb_writer.as_default():\n tf.summary.scalar(full_metric_name, float_val, step=epoch)\n\n for metric_name, vals in unknown_results.items():\n float_val = vals[0]\n full_metric_name = f\"{metric_name}_unknown_classes\"\n logs[full_metric_name] = float_val\n mstr.append(f\"{full_metric_name}: {float_val:0.4f}\")\n if self.tb_writer:\n with self.tb_writer.as_default():\n tf.summary.scalar(full_metric_name, float_val, step=epoch)\n\n # reset the index to prevent users from accidently using this after the\n # callback\n self.model.reset_index()\n\n print(\" - \".join(mstr))\n\n\ndef _compute_classification_metrics(\n queries: Tensor,\n query_labels: IntTensor,\n model: SimilarityModel,\n evaluator: Evaluator,\n metrics: Sequence[ClassificationMetric],\n k: int,\n matcher: Union[str, ClassificationMatch],\n distance_thresholds: FloatTensor,\n) -> Dict[str, np.ndarray]:\n \"\"\"Compute the classification metrics.\n\n Args:\n queries: A Tensor of embeddings representing the queries.\n\n query_labels: An IntTensor representing the class ids associated with\n the queries.\n\n model: The current similarity model.\n\n evaluator: An Evalutar object for evaluating the index performance.\n\n metrics: A list of classification metrics objects.\n\n k: The number of nearest neighbors to return for each query.\n\n matcher: {'match_nearest', 'match_majority_vote'} or ClassificationMatch\n object. Defines the classification matching, e.g., match_nearest will\n count a True Positive if the query_label is equal to the label of the\n nearest neighbor and the distance is less than or equal to the distance\n threshold.\n\n distance_thresholds: A 1D tensor denoting the distances points at which\n we compute the metrics.\n\n Returns:\n A Python dict mapping the metric name to the copmuted value.\n \"\"\"\n lookups = model.lookup(queries, k=k, verbose=0)\n lookup_distances = unpack_lookup_distances(lookups)\n lookup_labels = unpack_lookup_labels(lookups)\n\n # TODO(ovallis): Support passing other matchers. Currently we are using\n # match_nearest.\n results = evaluator.evaluate_classification(\n query_labels=query_labels,\n lookup_labels=lookup_labels,\n lookup_distances=lookup_distances,\n distance_thresholds=distance_thresholds,\n metrics=metrics,\n matcher=matcher,\n verbose=0,\n )\n\n # The callbacks don't set a distance theshold so we remove it here.\n results.pop(\"distance\")\n\n return results\n"
] |
[
[
"tensorflow.device",
"tensorflow.is_tensor",
"tensorflow.constant",
"tensorflow.reshape",
"tensorflow.cast",
"tensorflow.expand_dims",
"tensorflow.math.reduce_any",
"tensorflow.gather",
"tensorflow.where",
"tensorflow.rank",
"numpy.array",
"tensorflow.summary.scalar",
"tensorflow.summary.create_file_writer"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
biplavc/dqn-multi-agent-rl
|
[
"0611fec4e1f075d2c642e0aeccc10bdaecb52854"
] |
[
"sum_tree.py"
] |
[
"import numpy\n\n\nclass SumTree(object):\n\n def __init__(self, capacity):\n self.write = 0\n self.capacity = capacity\n self.tree = numpy.zeros(2*capacity - 1)\n self.data = numpy.zeros(capacity, dtype=object)\n\n def _propagate(self, idx, change):\n parent = (idx - 1) // 2\n\n self.tree[parent] += change\n\n if parent != 0:\n self._propagate(parent, change)\n\n def _retrieve(self, idx, s):\n left = 2 * idx + 1\n right = left + 1\n\n if left >= len(self.tree):\n return idx\n\n if s <= self.tree[left]:\n return self._retrieve(left, s)\n else:\n return self._retrieve(right, s-self.tree[left])\n\n def total(self):\n return self.tree[0]\n\n def add(self, p, data):\n idx = self.write + self.capacity - 1\n\n self.data[self.write] = data\n self.update(idx, p)\n\n self.write += 1\n if self.write >= self.capacity:\n self.write = 0\n\n def update(self, idx, p):\n change = p - self.tree[idx]\n\n self.tree[idx] = p\n self._propagate(idx, change)\n\n # def get_real_idx(self, data_idx):\n #\n # tempIdx = data_idx - self.write\n # if tempIdx >= 0:\n # return tempIdx\n # else:\n # return tempIdx + self.capacity\n\n def get(self, s):\n idx = self._retrieve(0, s)\n dataIdx = idx - self.capacity + 1\n # realIdx = self.get_real_idx(dataIdx)\n\n return idx, self.tree[idx], self.data[dataIdx]\n"
] |
[
[
"numpy.zeros"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
nikhil-garg/VDSP_ocl
|
[
"906867f8cd8a899a1ce309c5ec843fa1ce865373",
"906867f8cd8a899a1ce309c5ec843fa1ce865373",
"906867f8cd8a899a1ce309c5ec843fa1ce865373",
"906867f8cd8a899a1ce309c5ec843fa1ce865373",
"906867f8cd8a899a1ce309c5ec843fa1ce865373",
"906867f8cd8a899a1ce309c5ec843fa1ce865373",
"906867f8cd8a899a1ce309c5ec843fa1ce865373",
"906867f8cd8a899a1ce309c5ec843fa1ce865373",
"906867f8cd8a899a1ce309c5ec843fa1ce865373",
"906867f8cd8a899a1ce309c5ec843fa1ce865373"
] |
[
"mnist_stdp_multiple_exploration_v17.py",
"boise/mnist_multiple_exploration_var_amp_v4.py",
"mnist_multiple_exploration_tio2_v19.py",
"mnist_stdp_multiple_exploration_v16.py",
"mnist_multiple_exploration_baseline_v40.py",
"mnist_vdsp_multiple_baseline_lite.py",
"mnist_multiple_exploration_tio2_v22.py",
"mnist_multiple_exploration_baseline_v16.py",
"boise/mnist_multiple_exploration_var_g_v6.py",
"mnist_multiple_exploration_baseline_v11.py"
] |
[
"import itertools\nimport random\nimport logging\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport os\nfrom mnist_stdp_multiple_baseline import *\nfrom utilis import *\nfrom args_mnist import args as my_args\n# from ax import optimize\nimport pandas as pd\nfrom itertools import product\nimport time\n\n\nif __name__ == '__main__':\n\n\targs = my_args()\n\tprint(args.__dict__)\n\n\tlogging.basicConfig(level=logging.DEBUG)\n\tlogger = logging.getLogger(__name__)\n\n\t# Fix the seed of all random number generator\n\tseed = 50\n\trandom.seed(seed)\n\tnp.random.seed(seed)\n\tpwd = os.getcwd()\n\n\n\tdf = pd.DataFrame({\t\n\t\t\t\t\t\t\"amp_neuron\":[],\n\t\t\t\t\t\t\"input_nbr\":[],\n\t\t\t\t\t\t\"tau_in\" :[],\n\t\t\t\t\t\t\"tau_out\":[], \n \"alpha_p\":[],\n \"alpha_n\":[],\n \"beta_p\":[],\n \"beta_n\":[],\n \"tau_pre\":[],\n \"tau_post\":[],\n \"iterations\":[],\n \"presentation_time\":[],\n \"pause_time\":[],\n \"dt\":[],\n \"n_neurons\":[],\n \"inhibition_time\":[],\n \"tau_ref_in\":[],\n \"tau_ref_out\":[],\n \"inc_n\":[],\n \"tau_n\":[],\n \"synapse_layer_1\":[], \n \"gain_in\":[],\n \"gain_out\":[],\n \"accuracy\":[],\n \"accuracy_2\":[]\n })\n\n\tif args.log_file_path is None:\n\t\tlog_dir = pwd+'/log_dir/'\n\telse : \n\t\tlog_dir = args.log_file_path\n\t\tdf.to_csv(log_dir+'test.csv', index=False)\n\n\tparameters = dict(\n\t\t amp_neuron=[0.05]\n\t\t,input_nbr=[60000]\n\t\t,tau_in = [0.03]\n\t\t,tau_out = [0.03]\n\t\t, alpha_p= [0.7,0.8,0.9]\n\t\t, alpha_n= [0.01,0.005,0.02]\n\t\t, beta_p= [1.4,0.8,1.2]\n\t\t, beta_n= [1,0.8,1.2]\n\t\t, tau_pre= [0.1]\n\t\t, tau_post= [0.1]\n\t\t, iterations=[1]\n\t\t, presentation_time = [0.20]\n\t\t, pause_time = [0.1]\n\t\t, dt = [0.005]\n\t\t, n_neurons = [10]\n\t\t, inhibition_time = [10]\n\t\t, tau_ref_in = [0.005]\n\t\t, tau_ref_out = [0.005]\n\t\t, inc_n = [0.01]\n\t\t, tau_n = [1]\n\t\t, synapse_layer_1=[0.005]\n\t\t, gain_in = [4]\n\t\t, gain_out = [2]\n\t\t, seed =[100]\n )\n\tparam_values = [v for v in parameters.values()]\n\n\tnow = time.strftime(\"%Y%m%d-%H%M%S\")\n\tfolder = os.getcwd()+\"/MNIST_VDSP_explorartion\"+now\n\tos.mkdir(folder)\n\n\tfor args.amp_neuron,args.input_nbr,args.tau_in,args.tau_out,args.alpha_p,args.alpha_n,args.beta_p,args.beta_n,args.tau_pre,args.tau_post,args.iterations,args.presentation_time,args.pause_time, args.dt,args.n_neurons,args.inhibition_time,args.tau_ref_in,args.tau_ref_out,args.inc_n,args.tau_n,args.synapse_layer_1,args.gain_in,args.gain_out,args.seed in product(*param_values):\n\n\n\t\t# args.pause_time = 0\n\n\t\t# args.filename = 'vprog-'+str(args.vprog)+'-g_max-'+str(args.g_max)+'-tau_in-'+str(args.tau_in)+'-tau_out-'+str(args.tau_out)+'-lr-'+str(args.lr)+'-presentation_time-'+str(args.presentation_time)\n\t\targs.filename = 'stdp-'+str(args.amp_neuron)+str(args.input_nbr)+str(args.tau_in)+str(args.tau_out)+str(args.alpha_p)+str(args.alpha_n)+str(args.beta_p)+str(args.beta_n)+str(args.tau_pre)+str(args.tau_post)\n\n\t\ttimestr = time.strftime(\"%Y%m%d-%H%M%S\")\n\t\tlog_file_name = 'accuracy_log'+'.csv'\n\t\tpwd = os.getcwd()\n\n\t\taccuracy, accuracy_2,weights = evaluate_mnist_multiple_baseline(args)\n\n\t\tdf = df.append({ \n\t\t\t\t\t\t\"amp_neuron\":args.amp_neuron,\n\t\t\t\t\t\t\n\t\t\t\t\t\t \"input_nbr\":args.input_nbr,\n\t\t\t\t\t\t \"tau_in\":args.tau_in,\n\t\t\t\t\t\t \"tau_out\": args.tau_out,\n\t\t\t\t\t \n\t\t\t\t\t\t \"alpha_p\": args.alpha_p,\n\t\t\t\t\t\t \"alpha_n\": args.alpha_n,\n\t\t\t\t\t\t \"beta_p\":args.beta_p,\n\t\t\t\t\t\t \"beta_n\": args.beta_n,\n\t\t\t\t\t\t \"tau_pre\": args.tau_pre,\n\t\t\t\t\t\t \"tau_post\": args.tau_post,\n\t\t\t\t\t\t \n\t\t\t\t\t\t \"iterations\":args.iterations,\n\t\t \"presentation_time\":args.presentation_time,\n\t\t \"pause_time\":args.pause_time,\n\t\t \"dt\":args.dt,\n\t\t \"n_neurons\":args.n_neurons,\n\t\t \"seed\":args.seed,\n\t\t \"inhibition_time\":args.inhibition_time,\n\t\t \"tau_ref_in\":args.tau_ref_in,\n\t\t \"tau_ref_out\":args.tau_ref_out,\n\t\t \"inc_n\":args.inc_n,\n\t\t \"tau_n\":args.tau_n,\n\t\t \"synapse_layer_1\":args.synapse_layer_1,\n\t\t \n\t\t \"gain_in\":args.gain_in,\n\t\t \"bias_out\":args.bias_out,\n\n\t\t \"accuracy\":accuracy,\n\t\t \"accuracy_2\":accuracy_2\n\t\t },ignore_index=True)\n\t\t\n\n\t\tplot = True\n\t\tif plot : \t\n\t\t\tprint('accuracy', accuracy)\n\t\t\tprint(args.filename)\n\t\t\t# weights = weights[-1]#Taking only the last weight for plotting\n\n\t\t\tcolumns = int(args.n_neurons/5)\n\t\t\trows = int(args.n_neurons/columns)\n\n\t\t\tfig, axes = plt.subplots(int(args.n_neurons/columns), int(columns), figsize=(columns*5,rows*5))\n\n\t\t\tfor i in range(0,(args.n_neurons)):\n\t\t\t\taxes[int(i/columns)][int(i%columns)].matshow(np.reshape(weights[i],(28,28)),interpolation='nearest', vmax=1, vmin=0)\n\t\t\t\taxes[int(i/columns)][int(i%columns)].get_xaxis().set_visible(False)\n\t\t\t\taxes[int(i/columns)][int(i%columns)].get_yaxis().set_visible(False)\n\t\t\tplt.tight_layout() \n\t\t\tplt.axis('off')\n\n\t \n\t\t\t# fig, axes = plt.subplots(1,1, figsize=(3,3))\n\t\t\t# fig = plt.figure()\n\t\t\t# ax1 = fig.add_subplot()\n\t\t\t# cax = ax1.matshow(np.reshape(weights[0],(28,28)),interpolation='nearest', vmax=1, vmin=0)\n\t\t\t# fig.colorbar(cax)\n\t\t\t# plt.tight_layout() \n\n\t\t\tif args.log_file_path is None:\n\t\t\t\tlog_dir = pwd+'/log_dir/'\n\t\t\telse : \n\t\t\t\tlog_dir = args.log_file_path\n\t\t\tdf.to_csv(log_dir+log_file_name, index=False) \n\n\t\t\tfig.savefig(log_dir+args.filename+'weights.png')\n\t\t\tplt.close()\n\n\t\t\tplt.clf()\n\t\t\tplt.hist(weights.flatten())\n\n\t\t\tplt.tight_layout() \n\t\t\tplt.savefig(log_dir+args.filename+'histogram.png')\n\n\n\t\t\t# plt.figure(figsize=(12,10))\n\n\t\t\t# plt.subplot(2, 1, 1)\n\t\t\t# plt.title('Input neurons')\n\t\t\t# rasterplot(time_points, p_input_layer)\n\t\t\t# plt.xlabel(\"Time [s]\")\n\t\t\t# plt.ylabel(\"Neuron index\")\n\n\t\t\t# plt.subplot(2, 1, 2)\n\t\t\t# plt.title('Output neurons')\n\t\t\t# rasterplot(time_points, p_layer_1)\n\t\t\t# plt.xlabel(\"Time [s]\")\n\t\t\t# plt.ylabel(\"Neuron index\")\n\n\t\t\t# plt.tight_layout()\n\n\t\t\t# plt.savefig(folder+'/raster'+str(args.filename)+'.png')\n\t\ttimestr = time.strftime(\"%Y%m%d-%H%M%S\")\n\t\tlog_file_name = 'accuracy_log'+'.csv'\n\t\tpwd = os.getcwd()\n\n\t\tif args.log_file_path is None:\n\t\t\tlog_dir = pwd+'/log_dir/'\n\t\telse : \n\t\t\tlog_dir = args.log_file_path\n\t\tdf.to_csv(log_dir+log_file_name, index=False)\n\n\tdf.to_csv(log_file_name, index=False)\n\n\n\tlogger.info('All done.')",
"\nimport itertools\nimport random\nimport logging\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport os\nfrom mnist_vdsp_multiple_var_amp import *\nfrom utilis import *\nfrom args_mnist import args as my_args\n# from ax import optimize\nimport pandas as pd\nfrom itertools import product\nimport time\n\n\nif __name__ == '__main__':\n\n\targs = my_args()\n\tprint(args.__dict__)\n\n\tlogging.basicConfig(level=logging.DEBUG)\n\tlogger = logging.getLogger(__name__)\n\n\t# Fix the seed of all random number generator\n\tseed = 50\n\trandom.seed(seed)\n\tnp.random.seed(seed)\n\n\tdf = pd.DataFrame({\t\"vprog\":[],\n\t\t\t\t\t\t\"input_nbr\":[],\n\t\t\t\t\t\t\"tau_in\" :[],\n\t\t\t\t\t\t\"tau_out\":[],\n\t\t\t\t\t\t\"gain_in\":[],\n\t\t\t\t\t\t\"gain_out\":[],\n\t\t\t\t\t\t\"bias_in\":[],\n\t\t\t\t\t\t\"bias_out\":[],\n\t\t\t\t\t\t\"thr_out\":[],\n\t\t\t\t\t\t\"inhibition_time\":[],\n \"lr\":[],\n \"presentation_time\":[],\n \"amp_var\":[],\n \"seed\":[],\n \"accuracy\":[],\n })\n\n\tif args.log_file_path is None:\n\t\tpwd = os.getcwd()\n\t\tlog_dir = pwd+'/log_dir/'\n\telse : \n\t\tlog_dir = args.log_file_path\n\t\tdf.to_csv(log_dir+'test.csv', index=False)\n\n\n\tparameters = dict(\n\t\tvprog = [-0.75]\n\t\t,input_nbr=[60000]\n\t\t,tau_in = [0.06]\n\t\t,tau_out = [0.06]\n\t\t,gain_in = [2]\n\t\t,gain_out = [2]\n\t\t,bias_in = [0]\n\t\t,bias_out = [0]\n\t\t,thr_out = [1]\n\t\t,inhibition_time = [10]\n\t\t, lr = [0.1]\n\t\t, presentation_time = [0.35]\n\t\t, amp_var = [0,0.1,0.2,0.3]\n\t\t, seed = [700]\n )\n\tparam_values = [v for v in parameters.values()]\n\n\tnow = time.strftime(\"%Y%m%d-%H%M%S\")\n\tfolder = os.getcwd()+\"/MNIST_VDSP_explorartion\"+now\n\tos.mkdir(folder)\n\n\tfor args.vprog,args.input_nbr,args.tau_in,args.tau_out,args.gain_in,args.gain_out,args.bias_in,args.bias_out,args.thr_out,args.inhibition_time,args.lr,args.presentation_time,args.amp_var,args.seed in product(*param_values):\n\n\t\t# args.filename = 'vprog-'+str(args.vprog)+'-g_max-'+str(args.g_max)+'-tau_in-'+str(args.tau_in)+'-tau_out-'+str(args.tau_out)+'-lr-'+str(args.lr)+'-presentation_time-'+str(args.presentation_time)\n\t\t\n\n\t\ttimestr = time.strftime(\"%Y%m%d-%H%M%S\")\n\t\tlog_file_name = 'accuracy_log'+str(timestr)+'.csv'\n\t\tpwd = os.getcwd()\n\t\t# args.vthn = args.vthp\n\t\taccuracy, weights = evaluate_mnist_multiple_var_amp(args)\n\n\n\t\t\n\t\tdf = df.append({ \"vprog\":args.vprog,\n\t\t\t\t\t\t \"input_nbr\":args.input_nbr,\n\t\t\t\t\t\t \"tau_in\":args.tau_in,\n\t\t\t\t\t\t \"tau_out\": args.tau_out,\n\t\t\t\t\t\t \"gain_in\":args.gain_in,\n\t\t\t\t\t\t \"gain_out\":args.gain_out,\n\t\t\t\t\t\t \"bias_in\":args.bias_in,\n\t\t\t\t\t\t \"bias_out\":args.bias_out,\n\t\t\t\t\t\t \"thr_out\":args.thr_out,\n\t\t\t\t\t\t \"inhibition_time\":args.inhibition_time,\n\t\t\t\t\t\t \"lr\": args.lr,\n\t\t \"presentation_time\":args.presentation_time,\n\t\t \"amp_var\":args.amp_var,\n\t\t \"seed\":args.seed,\n\t\t \"accuracy\":accuracy\n\t\t },ignore_index=True)\n\t\t\n\n\n\t\tplot = False\n\t\tif plot : \t\n\t\t\tprint('accuracy', accuracy)\n\t\t\tprint(args.filename)\n\t\t\t# weights = weights[-1]#Taking only the last weight for plotting\n\n\t\t\tcolumns = int(args.n_neurons/5)\n\n\t\t\tfig, axes = plt.subplots(int(args.n_neurons/columns), int(columns), figsize=(20,25))\n\n\t\t\tfor i in range(0,(args.n_neurons)):\n\t\t\t\taxes[int(i/columns)][int(i%columns)].matshow(np.reshape(weights[i],(28,28)),interpolation='nearest', vmax=1, vmin=0)\n\n\t\t\tplt.tight_layout() \n\n\t \n\t\t\t# fig, axes = plt.subplots(1,1, figsize=(3,3))\n\t\t\t# fig = plt.figure()\n\t\t\t# ax1 = fig.add_subplot()\n\t\t\t# cax = ax1.matshow(np.reshape(weights[0],(28,28)),interpolation='nearest', vmax=1, vmin=0)\n\t\t\t# fig.colorbar(cax)\n\t\t\t# plt.tight_layout() \n\n\t\t\tfig.savefig(folder+'/weights'+str(args.filename)+'.png')\n\t\t\tplt.close()\n\n\n\t\t\t# plt.figure(figsize=(12,10))\n\n\t\t\t# plt.subplot(2, 1, 1)\n\t\t\t# plt.title('Input neurons')\n\t\t\t# rasterplot(time_points, p_input_layer)\n\t\t\t# plt.xlabel(\"Time [s]\")\n\t\t\t# plt.ylabel(\"Neuron index\")\n\n\t\t\t# plt.subplot(2, 1, 2)\n\t\t\t# plt.title('Output neurons')\n\t\t\t# rasterplot(time_points, p_layer_1)\n\t\t\t# plt.xlabel(\"Time [s]\")\n\t\t\t# plt.ylabel(\"Neuron index\")\n\n\t\t\t# plt.tight_layout()\n\n\t\t\t# plt.savefig(folder+'/raster'+str(args.filename)+'.png')\n\t\ttimestr = time.strftime(\"%Y%m%d-%H%M%S\")\n\t\tlog_file_name = 'accuracy_log'+'.csv'\n\t\tpwd = os.getcwd()\n\n\t\tif args.log_file_path is None:\n\t\t\tlog_dir = pwd+'/log_dir/'\n\t\telse : \n\t\t\tlog_dir = args.log_file_path\n\t\tdf.to_csv(log_dir+log_file_name, index=False)\n\n\tdf.to_csv(log_file_name, index=False)\n\n\n\tlogger.info('All done.')",
"\nimport itertools\nimport random\nimport logging\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport os\nfrom mnist_vdsp_multiple_tio2 import *\nfrom utilis import *\nfrom args_mnist import args as my_args\n# from ax import optimize\nimport pandas as pd\nfrom itertools import product\nimport time\n\n\nif __name__ == '__main__':\n\n\targs = my_args()\n\tprint(args.__dict__)\n\n\tlogging.basicConfig(level=logging.DEBUG)\n\tlogger = logging.getLogger(__name__)\n\n\t# Fix the seed of all random number generator\n\tseed = 50\n\trandom.seed(seed)\n\tnp.random.seed(seed)\n\tpwd = os.getcwd()\n\n\n\tdf = pd.DataFrame({\t\"vprog\":[],\n\t\t\t\t\t\t\"amp_neuron\":[],\n\t\t\t\t\t\t\"vth\":[],\n\t\t\t\t\t\t\"input_nbr\":[],\n\t\t\t\t\t\t\"tau_in\" :[],\n\t\t\t\t\t\t\"tau_out\":[],\n \"lr\":[],\n \"iterations\":[],\n \"presentation_time\":[],\n \"pause_time\":[],\n \"dt\":[],\n \"n_neurons\":[],\n \"inhibition_time\":[],\n \"tau_ref_in\":[],\n \"tau_ref_out\":[],\n \"inc_n\":[],\n \"tau_n\":[],\n \"synapse_layer_1\":[],\n \"winit_max\":[],\n \"vprog_increment\":[],\n \"voltage_clip_max\":[],\n \"voltage_clip_min\":[],\n \"Vapp_multiplier\":[],\n \"gain_in\":[],\n \"bias_in\":[],\n \"noise_input\":[],\n \"accuracy\":[],\n \"accuracy_2\":[]\n })\n\n\tif args.log_file_path is None:\n\t\tlog_dir = pwd+'/log_dir/'\n\telse : \n\t\tlog_dir = args.log_file_path\n\t\tdf.to_csv(log_dir+'test.csv', index=False)\n\n\tparameters = dict(\n\t\tvprog = [0]\n\t\t, amp_neuron=[0.5]\n\t\t,input_nbr=[60000]\n\t\t,tau_in = [0.03]\n\t\t,tau_out = [0.03]\n\t\t, lr = [1]\n\t\t, iterations=[1]\n\t\t, presentation_time = [0.35]\n\t\t, pause_time = [0]\n\t\t, dt = [0.005]\n\t\t, n_neurons = [450]\n\t\t, inhibition_time = [10]\n\t\t, tau_ref_in = [0.01]\n\t\t, tau_ref_out = [0.005]\n\t\t, inc_n = [0.01]\n\t\t, tau_n = [1]\n\t\t, synapse_layer_1=[0.005]\n\t\t, winit_max = [1]\n\t\t, vprog_increment = [0]\n\t\t, voltage_clip_max=[1.8]\n\t\t, voltage_clip_min = [-1.5]\n\t\t, Vapp_multiplier = [1]\n\t\t, gain_in = [3.5]\n\t\t, bias_in = [0.85]\n\t\t, noise_input = [0]\n\t\t, seed =[100]\n )\n\tparam_values = [v for v in parameters.values()]\n\n\tnow = time.strftime(\"%Y%m%d-%H%M%S\")\n\tfolder = os.getcwd()+\"/MNIST_VDSP_explorartion\"+now\n\tos.mkdir(folder)\n\n\tfor args.vprog,args.amp_neuron,args.input_nbr,args.tau_in,args.tau_out,args.lr,args.iterations,args.presentation_time,args.pause_time, args.dt,args.n_neurons,args.inhibition_time,args.tau_ref_in,args.tau_ref_out,args.inc_n,args.tau_n,args.synapse_layer_1,args.winit_max,args.vprog_increment,args.voltage_clip_max,args.voltage_clip_min,args.Vapp_multiplier,args.gain_in,args.bias_in,args.noise_input,args.seed in product(*param_values):\n\n\n\t\t# args.pause_time = 0\n\n\t\t# args.filename = 'vprog-'+str(args.vprog)+'-g_max-'+str(args.g_max)+'-tau_in-'+str(args.tau_in)+'-tau_out-'+str(args.tau_out)+'-lr-'+str(args.lr)+'-presentation_time-'+str(args.presentation_time)\n\t\targs.filename = 'vprog-'+str(args.vprog)+'amp_neuron'+str(args.amp_neuron)+'-tau_in-'+str(args.tau_in)+'-tau_out-'+str(args.tau_out)+'-lr-'+str(args.lr)+'-presentation_time-'+str(args.presentation_time)+'pause_time'+str(args.pause_time) + 'dt-'+str(args.dt)+'ref-'+str(args.tau_ref_in)+str(args.tau_ref_out)+'gain-'+str(args.gain_in)+'bias_in'+str(args.bias_in)+'adaptation'+str(args.inc_n)+str(args.tau_n)+'noise'+str(args.noise_input)+'Vapp_multiplier-'+str(args.Vapp_multiplier)+'winit_max'+str(args.winit_max)+str(args.voltage_clip_max)+str(args.voltage_clip_min)+str(args.n_neurons)+str(args.seed)\n\n\t\ttimestr = time.strftime(\"%Y%m%d-%H%M%S\")\n\t\tlog_file_name = 'accuracy_log'+'.csv'\n\t\tpwd = os.getcwd()\n\n\t\taccuracy, accuracy_2,weights = evaluate_mnist_multiple_tio2(args)\n\n\t\tdf = df.append({ \"vprog\":args.vprog,\n\t\t\t\t\t\t\"amp_neuron\":args.amp_neuron,\n\t\t\t\t\t\t \"vth\":args.vthp,\n\t\t\t\t\t\t \"input_nbr\":args.input_nbr,\n\t\t\t\t\t\t \"tau_in\":args.tau_in,\n\t\t\t\t\t\t \"tau_out\": args.tau_out,\n\t\t\t\t\t\t \"lr\": args.lr,\n\t\t\t\t\t\t \"iterations\":args.iterations,\n\t\t \"presentation_time\":args.presentation_time,\n\t\t \"pause_time\":args.pause_time,\n\t\t \"dt\":args.dt,\n\t\t \"n_neurons\":args.n_neurons,\n\t\t \"seed\":args.seed,\n\t\t \"inhibition_time\":args.inhibition_time,\n\t\t \"tau_ref_in\":args.tau_ref_in,\n\t\t \"tau_ref_out\":args.tau_ref_out,\n\t\t \"inc_n\":args.inc_n,\n\t\t \"tau_n\":args.tau_n,\n\t\t \"synapse_layer_1\":args.synapse_layer_1,\n\t\t \"winit_max\":args.winit_max,\n\t\t \"vprog_increment\":args.vprog_increment,\n\t\t \"voltage_clip_max\":args.voltage_clip_max,\n\t\t \"voltage_clip_min\":args.voltage_clip_min,\n\t\t \"Vapp_multiplier\":args.Vapp_multiplier,\n\t\t \"gain_in\":args.gain_in,\n\t\t \"bias_in\":args.bias_in,\n\t\t \"noise_input\":args.noise_input,\n\t\t \"accuracy\":accuracy,\n\t\t \"accuracy_2\":accuracy_2\n\t\t },ignore_index=True)\n\t\t\n\n\t\tplot = True\n\t\tif plot : \t\n\t\t\tprint('accuracy', accuracy)\n\t\t\tprint(args.filename)\n\t\t\t# weights = weights[-1]#Taking only the last weight for plotting\n\n\t\t\tcolumns = int(args.n_neurons/5)\n\t\t\trows = int(args.n_neurons/columns)\n\n\t\t\tfig, axes = plt.subplots(int(args.n_neurons/columns), int(columns), figsize=(columns*5,rows*5))\n\n\t\t\tfor i in range(0,(args.n_neurons)):\n\t\t\t\taxes[int(i/columns)][int(i%columns)].matshow(np.reshape(weights[i],(28,28)),interpolation='nearest', vmax=1, vmin=0)\n\n\t\t\tplt.tight_layout() \n\n\t \n\t\t\t# fig, axes = plt.subplots(1,1, figsize=(3,3))\n\t\t\t# fig = plt.figure()\n\t\t\t# ax1 = fig.add_subplot()\n\t\t\t# cax = ax1.matshow(np.reshape(weights[0],(28,28)),interpolation='nearest', vmax=1, vmin=0)\n\t\t\t# fig.colorbar(cax)\n\t\t\t# plt.tight_layout() \n\n\t\t\tif args.log_file_path is None:\n\t\t\t\tlog_dir = pwd+'/log_dir/'\n\t\t\telse : \n\t\t\t\tlog_dir = args.log_file_path\n\t\t\tdf.to_csv(log_dir+log_file_name, index=False) \n\n\t\t\tfig.savefig(log_dir+args.filename+'weights.png')\n\t\t\tplt.close()\n\n\t\t\tplt.clf()\n\t\t\tplt.hist(weights.flatten())\n\n\t\t\tplt.tight_layout() \n\t\t\tplt.savefig(log_dir+args.filename+'histogram.png')\n\n\n\t\t\t# plt.figure(figsize=(12,10))\n\n\t\t\t# plt.subplot(2, 1, 1)\n\t\t\t# plt.title('Input neurons')\n\t\t\t# rasterplot(time_points, p_input_layer)\n\t\t\t# plt.xlabel(\"Time [s]\")\n\t\t\t# plt.ylabel(\"Neuron index\")\n\n\t\t\t# plt.subplot(2, 1, 2)\n\t\t\t# plt.title('Output neurons')\n\t\t\t# rasterplot(time_points, p_layer_1)\n\t\t\t# plt.xlabel(\"Time [s]\")\n\t\t\t# plt.ylabel(\"Neuron index\")\n\n\t\t\t# plt.tight_layout()\n\n\t\t\t# plt.savefig(folder+'/raster'+str(args.filename)+'.png')\n\t\ttimestr = time.strftime(\"%Y%m%d-%H%M%S\")\n\t\tlog_file_name = 'accuracy_log'+'.csv'\n\t\tpwd = os.getcwd()\n\n\t\tif args.log_file_path is None:\n\t\t\tlog_dir = pwd+'/log_dir/'\n\t\telse : \n\t\t\tlog_dir = args.log_file_path\n\t\tdf.to_csv(log_dir+log_file_name, index=False)\n\n\tdf.to_csv(log_file_name, index=False)\n\n\n\tlogger.info('All done.')",
"import itertools\nimport random\nimport logging\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport os\nfrom mnist_stdp_multiple_baseline import *\nfrom utilis import *\nfrom args_mnist import args as my_args\n# from ax import optimize\nimport pandas as pd\nfrom itertools import product\nimport time\n\n\nif __name__ == '__main__':\n\n\targs = my_args()\n\tprint(args.__dict__)\n\n\tlogging.basicConfig(level=logging.DEBUG)\n\tlogger = logging.getLogger(__name__)\n\n\t# Fix the seed of all random number generator\n\tseed = 50\n\trandom.seed(seed)\n\tnp.random.seed(seed)\n\tpwd = os.getcwd()\n\n\n\tdf = pd.DataFrame({\t\n\t\t\t\t\t\t\"amp_neuron\":[],\n\t\t\t\t\t\t\"input_nbr\":[],\n\t\t\t\t\t\t\"tau_in\" :[],\n\t\t\t\t\t\t\"tau_out\":[], \n \"alpha_p\":[],\n \"alpha_n\":[],\n \"beta_p\":[],\n \"beta_n\":[],\n \"tau_pre\":[],\n \"tau_post\":[],\n \"iterations\":[],\n \"presentation_time\":[],\n \"pause_time\":[],\n \"dt\":[],\n \"n_neurons\":[],\n \"inhibition_time\":[],\n \"tau_ref_in\":[],\n \"tau_ref_out\":[],\n \"inc_n\":[],\n \"tau_n\":[],\n \"synapse_layer_1\":[], \n \"gain_in\":[],\n \"gain_out\":[],\n \"accuracy\":[],\n \"accuracy_2\":[]\n })\n\n\tif args.log_file_path is None:\n\t\tlog_dir = pwd+'/log_dir/'\n\telse : \n\t\tlog_dir = args.log_file_path\n\t\tdf.to_csv(log_dir+'test.csv', index=False)\n\n\tparameters = dict(\n\t\t amp_neuron=[0.5]\n\t\t,input_nbr=[60000]\n\t\t,tau_in = [0.03]\n\t\t,tau_out = [0.03]\n\t\t, alpha_p= [0.7,0.8,0.9]\n\t\t, alpha_n= [0.01,0.005,0.02]\n\t\t, beta_p= [1.4,0.8,1.2]\n\t\t, beta_n= [1,0.8,1.2]\n\t\t, tau_pre= [0.1]\n\t\t, tau_post= [0.08]\n\t\t, iterations=[1]\n\t\t, presentation_time = [0.20]\n\t\t, pause_time = [0.1]\n\t\t, dt = [0.005]\n\t\t, n_neurons = [10]\n\t\t, inhibition_time = [10]\n\t\t, tau_ref_in = [0.005]\n\t\t, tau_ref_out = [0.005]\n\t\t, inc_n = [0.01]\n\t\t, tau_n = [1]\n\t\t, synapse_layer_1=[0.005]\n\t\t, gain_in = [4]\n\t\t, gain_out = [2]\n\t\t, seed =[100]\n )\n\tparam_values = [v for v in parameters.values()]\n\n\tnow = time.strftime(\"%Y%m%d-%H%M%S\")\n\tfolder = os.getcwd()+\"/MNIST_VDSP_explorartion\"+now\n\tos.mkdir(folder)\n\n\tfor args.amp_neuron,args.input_nbr,args.tau_in,args.tau_out,args.alpha_p,args.alpha_n,args.beta_p,args.beta_n,args.tau_pre,args.tau_post,args.iterations,args.presentation_time,args.pause_time, args.dt,args.n_neurons,args.inhibition_time,args.tau_ref_in,args.tau_ref_out,args.inc_n,args.tau_n,args.synapse_layer_1,args.gain_in,args.gain_out,args.seed in product(*param_values):\n\n\n\t\t# args.pause_time = 0\n\n\t\t# args.filename = 'vprog-'+str(args.vprog)+'-g_max-'+str(args.g_max)+'-tau_in-'+str(args.tau_in)+'-tau_out-'+str(args.tau_out)+'-lr-'+str(args.lr)+'-presentation_time-'+str(args.presentation_time)\n\t\targs.filename = 'stdp-'+str(args.amp_neuron)+str(args.input_nbr)+str(args.tau_in)+str(args.tau_out)+str(args.alpha_p)+str(args.alpha_n)+str(args.beta_p)+str(args.beta_n)+str(args.tau_pre)+str(args.tau_post)\n\n\t\ttimestr = time.strftime(\"%Y%m%d-%H%M%S\")\n\t\tlog_file_name = 'accuracy_log'+'.csv'\n\t\tpwd = os.getcwd()\n\n\t\taccuracy, accuracy_2,weights = evaluate_mnist_multiple_baseline(args)\n\n\t\tdf = df.append({ \n\t\t\t\t\t\t\"amp_neuron\":args.amp_neuron,\n\t\t\t\t\t\t\n\t\t\t\t\t\t \"input_nbr\":args.input_nbr,\n\t\t\t\t\t\t \"tau_in\":args.tau_in,\n\t\t\t\t\t\t \"tau_out\": args.tau_out,\n\t\t\t\t\t \n\t\t\t\t\t\t \"alpha_p\": args.alpha_p,\n\t\t\t\t\t\t \"alpha_n\": args.alpha_n,\n\t\t\t\t\t\t \"beta_p\":args.beta_p,\n\t\t\t\t\t\t \"beta_n\": args.beta_n,\n\t\t\t\t\t\t \"tau_pre\": args.tau_pre,\n\t\t\t\t\t\t \"tau_post\": args.tau_post,\n\t\t\t\t\t\t \n\t\t\t\t\t\t \"iterations\":args.iterations,\n\t\t \"presentation_time\":args.presentation_time,\n\t\t \"pause_time\":args.pause_time,\n\t\t \"dt\":args.dt,\n\t\t \"n_neurons\":args.n_neurons,\n\t\t \"seed\":args.seed,\n\t\t \"inhibition_time\":args.inhibition_time,\n\t\t \"tau_ref_in\":args.tau_ref_in,\n\t\t \"tau_ref_out\":args.tau_ref_out,\n\t\t \"inc_n\":args.inc_n,\n\t\t \"tau_n\":args.tau_n,\n\t\t \"synapse_layer_1\":args.synapse_layer_1,\n\t\t \n\t\t \"gain_in\":args.gain_in,\n\t\t \"bias_out\":args.bias_out,\n\n\t\t \"accuracy\":accuracy,\n\t\t \"accuracy_2\":accuracy_2\n\t\t },ignore_index=True)\n\t\t\n\n\t\tplot = True\n\t\tif plot : \t\n\t\t\tprint('accuracy', accuracy)\n\t\t\tprint(args.filename)\n\t\t\t# weights = weights[-1]#Taking only the last weight for plotting\n\n\t\t\tcolumns = int(args.n_neurons/5)\n\t\t\trows = int(args.n_neurons/columns)\n\n\t\t\tfig, axes = plt.subplots(int(args.n_neurons/columns), int(columns), figsize=(columns*5,rows*5))\n\n\t\t\tfor i in range(0,(args.n_neurons)):\n\t\t\t\taxes[int(i/columns)][int(i%columns)].matshow(np.reshape(weights[i],(28,28)),interpolation='nearest', vmax=1, vmin=0)\n\t\t\t\taxes[int(i/columns)][int(i%columns)].get_xaxis().set_visible(False)\n\t\t\t\taxes[int(i/columns)][int(i%columns)].get_yaxis().set_visible(False)\n\t\t\tplt.tight_layout() \n\t\t\tplt.axis('off')\n\n\t \n\t\t\t# fig, axes = plt.subplots(1,1, figsize=(3,3))\n\t\t\t# fig = plt.figure()\n\t\t\t# ax1 = fig.add_subplot()\n\t\t\t# cax = ax1.matshow(np.reshape(weights[0],(28,28)),interpolation='nearest', vmax=1, vmin=0)\n\t\t\t# fig.colorbar(cax)\n\t\t\t# plt.tight_layout() \n\n\t\t\tif args.log_file_path is None:\n\t\t\t\tlog_dir = pwd+'/log_dir/'\n\t\t\telse : \n\t\t\t\tlog_dir = args.log_file_path\n\t\t\tdf.to_csv(log_dir+log_file_name, index=False) \n\n\t\t\tfig.savefig(log_dir+args.filename+'weights.png')\n\t\t\tplt.close()\n\n\t\t\tplt.clf()\n\t\t\tplt.hist(weights.flatten())\n\n\t\t\tplt.tight_layout() \n\t\t\tplt.savefig(log_dir+args.filename+'histogram.png')\n\n\n\t\t\t# plt.figure(figsize=(12,10))\n\n\t\t\t# plt.subplot(2, 1, 1)\n\t\t\t# plt.title('Input neurons')\n\t\t\t# rasterplot(time_points, p_input_layer)\n\t\t\t# plt.xlabel(\"Time [s]\")\n\t\t\t# plt.ylabel(\"Neuron index\")\n\n\t\t\t# plt.subplot(2, 1, 2)\n\t\t\t# plt.title('Output neurons')\n\t\t\t# rasterplot(time_points, p_layer_1)\n\t\t\t# plt.xlabel(\"Time [s]\")\n\t\t\t# plt.ylabel(\"Neuron index\")\n\n\t\t\t# plt.tight_layout()\n\n\t\t\t# plt.savefig(folder+'/raster'+str(args.filename)+'.png')\n\t\ttimestr = time.strftime(\"%Y%m%d-%H%M%S\")\n\t\tlog_file_name = 'accuracy_log'+'.csv'\n\t\tpwd = os.getcwd()\n\n\t\tif args.log_file_path is None:\n\t\t\tlog_dir = pwd+'/log_dir/'\n\t\telse : \n\t\t\tlog_dir = args.log_file_path\n\t\tdf.to_csv(log_dir+log_file_name, index=False)\n\n\tdf.to_csv(log_file_name, index=False)\n\n\n\tlogger.info('All done.')",
"\nimport itertools\nimport random\nimport logging\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport os\nfrom mnist_vdsp_multiple_baseline import *\nfrom utilis import *\nfrom args_mnist import args as my_args\n# from ax import optimize\nimport pandas as pd\nfrom itertools import product\nimport time\n\n\nif __name__ == '__main__':\n\n\targs = my_args()\n\tprint(args.__dict__)\n\n\tlogging.basicConfig(level=logging.DEBUG)\n\tlogger = logging.getLogger(__name__)\n\n\t# Fix the seed of all random number generator\n\tseed = 50\n\trandom.seed(seed)\n\tnp.random.seed(seed)\n\tpwd = os.getcwd()\n\n\n\tdf = pd.DataFrame({\t\"vprog\":[],\n\t\t\t\t\t\t\"amp_neuron\":[],\n\t\t\t\t\t\t\"vth\":[],\n\t\t\t\t\t\t\"input_nbr\":[],\n\t\t\t\t\t\t\"tau_in\" :[],\n\t\t\t\t\t\t\"tau_out\":[],\n \"lr\":[],\n \"alpha\":[],\n \"multiplicative\":[],\n \"iterations\":[],\n \"presentation_time\":[],\n \"pause_time\":[],\n \"dt\":[],\n \"n_neurons\":[],\n \"inhibition_time\":[],\n \"tau_ref_in\":[],\n \"tau_ref_out\":[],\n \"inc_n\":[],\n \"tau_n\":[],\n \"synapse_layer_1\":[],\n \"winit_max\":[],\n \"vprog_increment\":[],\n \"voltage_clip_max\":[],\n \"voltage_clip_min\":[],\n \"Vapp_multiplier\":[],\n \"gain_in\":[],\n \"bias_in\":[],\n \"noise_input\":[],\n \"accuracy\":[],\n \"accuracy_2\":[]\n })\n\n\tif args.log_file_path is None:\n\t\tlog_dir = pwd+'/log_dir/'\n\telse : \n\t\tlog_dir = args.log_file_path\n\t\tdf.to_csv(log_dir+'test.csv', index=False)\n\n\tparameters = dict(\n\t\tvprog = [0]\n\t\t, amp_neuron=[0.000375]\n\t\t,input_nbr=[60000]\n\t\t,tau_in = [0.03]\n\t\t,tau_out = [0.03]\n\t\t, lr = [0.0005]\n\t\t, alpha= [0]\n\t\t, multiplicative = [1]\n\t\t, iterations=[5]\n\t\t, presentation_time = [0.35]\n\t\t, pause_time = [0]\n\t\t, dt = [0.005]\n\t\t, n_neurons = [400,500]\n\t\t, inhibition_time = [10]\n\t\t, tau_ref_in = [0.005]\n\t\t, tau_ref_out = [0.005]\n\t\t, inc_n = [0.01]\n\t\t, tau_n = [1]\n\t\t, synapse_layer_1=[0.005]\n\t\t, winit_max = [1]\n\t\t, vprog_increment = [0]\n\t\t, voltage_clip_max=[1]\n\t\t, voltage_clip_min = [-1]\n\t\t, Vapp_multiplier = [1]\n\t\t, gain_in = [4]\n\t\t, bias_in = [0.5]\n\t\t, noise_input = [0]\n\t\t, seed =[2011]\n )\n\tparam_values = [v for v in parameters.values()]\n\n\tnow = time.strftime(\"%Y%m%d-%H%M%S\")\n\tfolder = os.getcwd()+\"/MNIST_VDSP_explorartion\"+now\n\tos.mkdir(folder)\n\n\tfor args.vprog,args.amp_neuron,args.input_nbr,args.tau_in,args.tau_out,args.lr,args.alpha,args.multiplicative,args.iterations,args.presentation_time,args.pause_time, args.dt,args.n_neurons,args.inhibition_time,args.tau_ref_in,args.tau_ref_out,args.inc_n,args.tau_n,args.synapse_layer_1,args.winit_max,args.vprog_increment,args.voltage_clip_max,args.voltage_clip_min,args.Vapp_multiplier,args.gain_in,args.bias_in,args.noise_input,args.seed in product(*param_values):\n\n\n\t\t# args.pause_time = 0\n\n\t\t# args.filename = 'vprog-'+str(args.vprog)+'-g_max-'+str(args.g_max)+'-tau_in-'+str(args.tau_in)+'-tau_out-'+str(args.tau_out)+'-lr-'+str(args.lr)+'-presentation_time-'+str(args.presentation_time)\n\t\targs.filename = 'vprog-'+str(args.vprog)+'amp_neuron'+str(args.amp_neuron)+'-tau_in-'+str(args.tau_in)+'-tau_out-'+str(args.tau_out)+'-lr-'+str(args.lr)+'-presentation_time-'+str(args.presentation_time)+'pause_time'+str(args.pause_time) + 'dt-'+str(args.dt)+'ref-'+str(args.tau_ref_in)+str(args.tau_ref_out)+'gain-'+str(args.gain_in)+'bias_in'+str(args.bias_in)+'adaptation'+str(args.inc_n)+str(args.tau_n)+'noise'+str(args.noise_input)+'Vapp_multiplier-'+str(args.Vapp_multiplier)+'winit_max'+str(args.winit_max)+str(args.voltage_clip_max)+str(args.voltage_clip_min)+str(args.n_neurons)+str(args.seed)+str(args.alpha)+str(args.iterations)+str(args.multiplicative)\n\n\t\ttimestr = time.strftime(\"%Y%m%d-%H%M%S\")\n\t\tlog_file_name = 'accuracy_log'+'.csv'\n\t\tpwd = os.getcwd()\n\n\t\taccuracy, accuracy_2,weights = evaluate_mnist_multiple_baseline(args)\n\n\t\tdf = df.append({ \"vprog\":args.vprog,\n\t\t\t\t\t\t\"amp_neuron\":args.amp_neuron,\n\t\t\t\t\t\t \"vth\":args.vthp,\n\t\t\t\t\t\t \"input_nbr\":args.input_nbr,\n\t\t\t\t\t\t \"tau_in\":args.tau_in,\n\t\t\t\t\t\t \"tau_out\": args.tau_out,\n\t\t\t\t\t\t \"lr\": args.lr,\n\t\t\t\t\t\t \"alpha\": args.alpha,\n\t\t\t\t\t\t \"multiplicative\":args.multiplicative,\n\t\t\t\t\t\t \"iterations\":args.iterations,\n\t\t \"presentation_time\":args.presentation_time,\n\t\t \"pause_time\":args.pause_time,\n\t\t \"dt\":args.dt,\n\t\t \"n_neurons\":args.n_neurons,\n\t\t \"seed\":args.seed,\n\t\t \"inhibition_time\":args.inhibition_time,\n\t\t \"tau_ref_in\":args.tau_ref_in,\n\t\t \"tau_ref_out\":args.tau_ref_out,\n\t\t \"inc_n\":args.inc_n,\n\t\t \"tau_n\":args.tau_n,\n\t\t \"synapse_layer_1\":args.synapse_layer_1,\n\t\t \"winit_max\":args.winit_max,\n\t\t \"vprog_increment\":args.vprog_increment,\n\t\t \"voltage_clip_max\":args.voltage_clip_max,\n\t\t \"voltage_clip_min\":args.voltage_clip_min,\n\t\t \"Vapp_multiplier\":args.Vapp_multiplier,\n\t\t \"gain_in\":args.gain_in,\n\t\t \"bias_in\":args.bias_in,\n\t\t \"noise_input\":args.noise_input,\n\t\t \"accuracy\":accuracy,\n\t\t \"accuracy_2\":accuracy_2\n\t\t },ignore_index=True)\n\t\t\n\n\t\tplot = True\n\t\tif plot : \t\n\t\t\tprint('accuracy', accuracy)\n\t\t\tprint(args.filename)\n\t\t\t# weights = weights[-1]#Taking only the last weight for plotting\n\n\t\t\tcolumns = int(args.n_neurons/5)\n\t\t\trows = int(args.n_neurons/columns)\n\n\t\t\tfig, axes = plt.subplots(int(args.n_neurons/columns), int(columns), figsize=(columns*5,rows*5))\n\n\t\t\tfor i in range(0,(args.n_neurons)):\n\t\t\t\taxes[int(i/columns)][int(i%columns)].matshow(np.reshape(weights[i],(28,28)),interpolation='nearest', vmax=1, vmin=0)\n\t\t\t\taxes[int(i/columns)][int(i%columns)].get_xaxis().set_visible(False)\n\t\t\t\taxes[int(i/columns)][int(i%columns)].get_yaxis().set_visible(False)\n\t\t\tplt.tight_layout() \n\t\t\tplt.axis('off')\n\n\t \n\t\t\t# fig, axes = plt.subplots(1,1, figsize=(3,3))\n\t\t\t# fig = plt.figure()\n\t\t\t# ax1 = fig.add_subplot()\n\t\t\t# cax = ax1.matshow(np.reshape(weights[0],(28,28)),interpolation='nearest', vmax=1, vmin=0)\n\t\t\t# fig.colorbar(cax)\n\t\t\t# plt.tight_layout() \n\n\t\t\tif args.log_file_path is None:\n\t\t\t\tlog_dir = pwd+'/log_dir/'\n\t\t\telse : \n\t\t\t\tlog_dir = args.log_file_path\n\t\t\tdf.to_csv(log_dir+log_file_name, index=False) \n\n\t\t\tfig.savefig(log_dir+args.filename+'weights.png')\n\t\t\tplt.close()\n\n\t\t\tplt.clf()\n\t\t\tplt.hist(weights.flatten())\n\n\t\t\tplt.tight_layout() \n\t\t\tplt.savefig(log_dir+args.filename+'histogram.png')\n\n\n\t\t\t# plt.figure(figsize=(12,10))\n\n\t\t\t# plt.subplot(2, 1, 1)\n\t\t\t# plt.title('Input neurons')\n\t\t\t# rasterplot(time_points, p_input_layer)\n\t\t\t# plt.xlabel(\"Time [s]\")\n\t\t\t# plt.ylabel(\"Neuron index\")\n\n\t\t\t# plt.subplot(2, 1, 2)\n\t\t\t# plt.title('Output neurons')\n\t\t\t# rasterplot(time_points, p_layer_1)\n\t\t\t# plt.xlabel(\"Time [s]\")\n\t\t\t# plt.ylabel(\"Neuron index\")\n\n\t\t\t# plt.tight_layout()\n\n\t\t\t# plt.savefig(folder+'/raster'+str(args.filename)+'.png')\n\t\ttimestr = time.strftime(\"%Y%m%d-%H%M%S\")\n\t\tlog_file_name = 'accuracy_log'+'.csv'\n\t\tpwd = os.getcwd()\n\n\t\tif args.log_file_path is None:\n\t\t\tlog_dir = pwd+'/log_dir/'\n\t\telse : \n\t\t\tlog_dir = args.log_file_path\n\t\tdf.to_csv(log_dir+log_file_name, index=False)\n\n\tdf.to_csv(log_file_name, index=False)\n\n\n\tlogger.info('All done.')",
"\nimport nengo\nimport numpy as np\nfrom numpy import random\nimport matplotlib.pyplot as plt\nimport matplotlib.cm as cm\nimport os\nfrom nengo.dists import Choice\nfrom datetime import datetime\nimport pickle\nfrom nengo.utils.matplotlib import rasterplot\nimport time\nfrom InputData import PresentInputWithPause\nfrom nengo.neurons import LIFRate\nfrom nengo.params import Parameter, NumberParam, FrozenObject\nfrom nengo.dists import Choice, Distribution, get_samples, Uniform\nfrom nengo.utils.numpy import clip, is_array_like\nfrom utilis import *\nfrom args_mnist import args as my_args\nimport itertools\nimport random\nimport logging\nimport random \n\ndef evaluate_mnist_multiple_baseline_lite(args):\n\n #############################\n # load the data\n #############################\n input_nbr = args.input_nbr\n input_nbr = args.input_nbr\n\n probe_sample_rate = (input_nbr/10)/1000 #Probe sample rate. Proportional to input_nbr to scale down sampling rate of simulations \n\n\n x = args.digit\n np.random.seed(args.seed)\n random.seed(args.seed)\n\n data = np.load('mnist_norm.npz', allow_pickle=True)\n image_train_filtered = data['image_train_filtered']/255\n label_train_filtered = data['label_train_filtered']\n image_test_filtered = data['image_test_filtered']/255\n label_test_filtered = data['label_test_filtered']\n\n\n image_assign_filtered = image_train_filtered\n label_assign_filtered = label_train_filtered\n\n image_train_filtered = np.tile(image_train_filtered,(args.iterations,1,1))\n label_train_filtered = np.tile(label_train_filtered,(args.iterations))\n\n #Simulation Parameters \n #Presentation time\n presentation_time = args.presentation_time #0.20\n #Pause time\n # pause_time = args.pause_time + 0.0001\n pause_time = args.pause_time\n #Iterations\n iterations=args.iterations\n #Input layer parameters\n n_in = args.n_in\n # g_max = 1/784 #Maximum output contribution\n amp_neuron = args.amp_neuron\n n_neurons = args.n_neurons # Layer 1 neurons\n # inhib_factor = args.inhib_factor #Multiplication factor for lateral inhibition\n\n\n input_neurons_args = {\n \"n_neurons\":n_in,\n \"dimensions\":1,\n \"label\":\"Input layer\",\n \"encoders\":nengo.dists.Choice([[1]]),\n # \"max_rates\":nengo.dists.Uniform(22,22),\n # \"intercepts\":nengo.dists.Uniform(0,0),\n \"gain\":nengo.dists.Choice([args.gain_in]),\n \"bias\":nengo.dists.Choice([args.bias_in]),\n # \"noise\":nengo.processes.WhiteNoise(dist=nengo.dists.Gaussian(args.noise_input, (args.noise_input/2)+0.00001), seed=1), \n\n \"neuron_type\":MyLIF_in_v2(tau_rc=args.tau_in,min_voltage=-1, amplitude=args.amp_neuron, tau_ref=args.tau_ref_in)\n # \"neuron_type\":nengo.neurons.SpikingRectifiedLinear()#SpikingRelu neuron. \n }\n\n #Layer 1 parameters\n layer_1_neurons_args = {\n \"n_neurons\":n_neurons,\n \"dimensions\":1,\n \"label\":\"Layer 1\",\n \"encoders\":nengo.dists.Choice([[1]]),\n \"gain\":nengo.dists.Choice([args.gain_out]),\n \"bias\":nengo.dists.Choice([args.bias_out]),\n # \"intercepts\":nengo.dists.Choice([0]),\n # \"max_rates\":nengo.dists.Choice([args.rate_out,args.rate_out]),\n # \"noise\":nengo.processes.WhiteNoise(dist=nengo.dists.Gaussian(0, 0.5), seed=1), \n # \"neuron_type\":nengo.neurons.LIF(tau_rc=args.tau_out, min_voltage=0)\n # \"neuron_type\":MyLIF_out(tau_rc=args.tau_out, min_voltage=-1)\n \"neuron_type\":STDPLIF(tau_rc=args.tau_out, min_voltage=-1, spiking_threshold=args.thr_out, inhibition_time=args.inhibition_time,tau_ref=args.tau_ref_out,inc_n=args.inc_n,tau_n=args.tau_n)\n }\n\n #Learning rule parameters\n learning_args = {\n \"lr\": args.lr,\n \"winit_min\":0,\n \"winit_max\":args.winit_max,\n \"sample_distance\": int((presentation_time+pause_time)*200*10), #Store weight after 10 images\n }\n\n # argument_string = \"presentation_time: \"+ str(presentation_time)+ \"\\n pause_time: \"+ str(pause_time)+ \"\\n input_neurons_args: \" + str(input_neurons_args)+ \" \\n layer_1_neuron_args: \" + str(layer_1_neurons_args)+\"\\n Lateral Inhibition parameters: \" + str(lateral_inhib_args) + \"\\n learning parameters: \" + str(learning_args)+ \"\\n g_max: \"+ str(g_max) \n\n images = image_train_filtered\n labels = label_train_filtered\n np.random.seed(args.seed)\n random.seed(args.seed) \n\n model = nengo.Network(\"My network\", seed = args.seed)\n #############################\n # Model construction\n #############################\n with model:\n # picture = nengo.Node(PresentInputWithPause(images, presentation_time, pause_time,0))\n picture = nengo.Node(nengo.processes.PresentInput(images, presentation_time=presentation_time))\n true_label = nengo.Node(nengo.processes.PresentInput(labels, presentation_time=presentation_time))\n # true_label = nengo.Node(PresentInputWithPause(labels, presentation_time, pause_time,-1))\n # input layer \n input_layer = nengo.Ensemble(**input_neurons_args)\n input_conn = nengo.Connection(picture,input_layer.neurons,synapse=None)\n #first layer\n layer1 = nengo.Ensemble(**layer_1_neurons_args)\n #Weights between input layer and layer 1\n w = nengo.Node(CustomRule_post_baseline_lite(**learning_args), size_in=n_in, size_out=n_neurons)\n nengo.Connection(input_layer.neurons, w, synapse=None)\n nengo.Connection(w, layer1.neurons, synapse=args.synapse_layer_1)\n weights = w.output.history\n \n # with nengo_ocl.Simulator(model) as sim : \n with nengo.Simulator(model, dt=args.dt, optimize=True) as sim:\n\n \n w.output.set_signal_vmem(sim.signals[sim.model.sig[input_layer.neurons][\"voltage\"]])\n w.output.set_signal_out(sim.signals[sim.model.sig[layer1.neurons][\"out\"]])\n sim.run((presentation_time+pause_time) * labels.shape[0])\n\n last_weight = weights[-1]\n\n sim.close()\n\n pause_time = 0\n\n #Neuron class assingment\n\n images = image_assign_filtered\n labels = label_assign_filtered\n\n\n model = nengo.Network(\"My network\", seed = args.seed)\n\n with model:\n\n # picture = nengo.Node(PresentInputWithPause(images, presentation_time, pause_time,0))\n picture = nengo.Node(nengo.processes.PresentInput(images, presentation_time=presentation_time))\n true_label = nengo.Node(nengo.processes.PresentInput(labels, presentation_time=presentation_time))\n # true_label = nengo.Node(PresentInputWithPause(labels, presentation_time, pause_time,-1))\n # input layer \n input_layer = nengo.Ensemble(**input_neurons_args)\n input_conn = nengo.Connection(picture,input_layer.neurons,synapse=None)\n #first layer\n layer1 = nengo.Ensemble(**layer_1_neurons_args)\n nengo.Connection(input_layer.neurons, layer1.neurons,transform=last_weight,synapse=args.synapse_layer_1)\n #Probes\n p_true_label = nengo.Probe(true_label)\n p_layer_1 = nengo.Probe(layer1.neurons)\n\n # with nengo_ocl.Simulator(model) as sim : \n with nengo.Simulator(model, dt=args.dt, optimize=True) as sim:\n \n sim.run((presentation_time+pause_time) * labels.shape[0])\n \n t_data = sim.trange()\n labels = sim.data[p_true_label][:,0]\n output_spikes = sim.data[p_layer_1]\n neuron_class = np.zeros((n_neurons, 1))\n n_classes = 10\n for j in range(n_neurons):\n spike_times_neuron_j = t_data[np.where(output_spikes[:,j] > 0)]\n max_spike_times = 0 \n for i in range(n_classes):\n class_presentation_times_i = t_data[np.where(labels == i)]\n #Normalized number of spikes wrt class presentation time\n num_spikes = len(np.intersect1d(spike_times_neuron_j,class_presentation_times_i))/(len(class_presentation_times_i)+1)\n if(num_spikes>max_spike_times):\n neuron_class[j] = i\n max_spike_times = num_spikes\n spikes_layer1_probe_train = sim.data[p_layer_1]\n\n\n\n #Testing\n\n images = image_test_filtered\n labels = label_test_filtered\n\n\n\n input_nbr = 10000\n \n model = nengo.Network(label=\"My network\",)\n\n with model:\n\n # picture = nengo.Node(PresentInputWithPause(images, presentation_time, pause_time,0))\n picture = nengo.Node(nengo.processes.PresentInput(images, presentation_time=presentation_time))\n true_label = nengo.Node(nengo.processes.PresentInput(labels, presentation_time=presentation_time))\n # true_label = nengo.Node(PresentInputWithPause(labels, presentation_time, pause_time,-1))\n input_layer = nengo.Ensemble(**input_neurons_args)\n input_conn = nengo.Connection(picture,input_layer.neurons,synapse=None)\n #first layer\n layer1 = nengo.Ensemble(**layer_1_neurons_args)\n nengo.Connection(input_layer.neurons, layer1.neurons,transform=last_weight,synapse=args.synapse_layer_1)\n p_true_label = nengo.Probe(true_label)\n p_layer_1 = nengo.Probe(layer1.neurons)\n\n\n step_time = (presentation_time + pause_time) \n\n with nengo.Simulator(model,dt=args.dt) as sim:\n \n sim.run(presentation_time * label_test_filtered.shape[0])\n\n accuracy_2 = evaluation_v2(10,n_neurons,int(((presentation_time * label_test_filtered.shape[0]) / sim.dt) / input_nbr),spikes_layer1_probe_train,label_train_filtered,sim.data[p_layer_1],label_test_filtered,sim.dt)\n\n\n labels = sim.data[p_true_label][:,0]\n t_data = sim.trange()\n output_spikes = sim.data[p_layer_1]\n n_classes = 10\n predicted_labels = [] \n true_labels = []\n correct_classified = 0\n wrong_classified = 0\n\n class_spikes = np.ones((10,1))\n\n for num in range(input_nbr):\n #np.sum(sim.data[my_spike_probe] > 0, axis=0)\n\n output_spikes_num = output_spikes[num*int((presentation_time + pause_time) /args.dt):(num+1)*int((presentation_time + pause_time) /args.dt),:] # 0.350/0.005\n num_spikes = np.sum(output_spikes_num > 0, axis=0)\n\n for i in range(n_classes):\n sum_temp = 0\n count_temp = 0\n for j in range(n_neurons):\n if((neuron_class[j]) == i) : \n sum_temp += num_spikes[j]\n count_temp +=1\n \n if(count_temp==0):\n class_spikes[i] = 0\n else:\n class_spikes[i] = sum_temp\n # class_spikes[i] = sum_temp/count_temp\n\n # print(class_spikes)\n k = np.argmax(num_spikes)\n # predicted_labels.append(neuron_class[k])\n class_pred = np.argmax(class_spikes)\n predicted_labels.append(class_pred)\n\n true_class = labels[(num*int((presentation_time + pause_time) /args.dt))]\n\n if(class_pred == true_class):\n correct_classified+=1\n else:\n wrong_classified+=1\n\n \n accuracy = correct_classified/ (correct_classified+wrong_classified)*100\n print(\"Accuracy: \", accuracy)\n sim.close()\n\n del sim.data, labels, class_pred, spikes_layer1_probe_train\n\n return accuracy, accuracy_2, weights[-1]\n\n\n # for tstep in np.arange(0, len(weights), 1):\n # tstep = int(tstep)\n # print(tstep)\n # fig, axes = plt.subplots(1,1, figsize=(3,3))\n\n # for i in range(0,(n_neurons)):\n \n # fig = plt.figure()\n # ax1 = fig.add_subplot()\n # cax = ax1.matshow(np.reshape(weights[tstep][i],(28,28)),interpolation='nearest', vmax=1, vmin=0)\n # fig.colorbar(cax)\n\n # plt.tight_layout() \n # fig.savefig(folder+'/weights'+str(tstep)+'.png')\n # plt.close('all')\n\n # gen_video(folder, \"weights\")\n\n # for tstep in np.arange(0, len(weights), 1):\n # tstep = int(tstep)\n # print(tstep)\n # fig, axes = plt.subplots(1,1, figsize=(3,3))\n\n # for i in range(0,(n_neurons)):\n \n # fig = plt.figure()\n # ax1 = fig.add_subplot()\n # cax = ax1.hist(weights[tstep][i])\n # ax1.set_xlim(0,1)\n # ax1.set_ylim(0,350)\n\n # plt.tight_layout() \n # fig.savefig(folder+'/histogram'+str(tstep)+'.png')\n # plt.close('all')\n\n # gen_video(folder, \"histogram\")\n\n\n\nif __name__ == '__main__':\n logger = logging.getLogger(__name__)\n\n args = my_args()\n\n\n print(args.__dict__)\n logging.basicConfig(level=logging.DEBUG)\n # Fix the seed of all random number generator\n seed = 500\n random.seed(seed)\n np.random.seed(seed)\n\n\n\n # params = nni.get_next_parameter()\n\n # args.g_max = params['g_max']\n # args.tau_in = params['tau_in']\n # args.tau_out = params['tau_out']\n # args.lr = params['lr']\n # args.presentation_time = params['presentation_time']\n # args.rate_out = params['rate_out']\n\n\n\n accuracy, weights = evaluate_mnist_multiple(args)\n print('accuracy:', accuracy)\n\n # now = time.strftime(\"%Y%m%d-%H%M%S\")\n # folder = os.getcwd()+\"/MNIST_VDSP\"+now\n # os.mkdir(folder)\n\n\n # plt.figure(figsize=(12,10))\n\n # plt.subplot(2, 1, 1)\n # plt.title('Input neurons')\n # rasterplot(time_points, p_input_layer)\n # plt.xlabel(\"Time [s]\")\n # plt.ylabel(\"Neuron index\")\n\n # plt.subplot(2, 1, 2)\n # plt.title('Output neurons')\n # rasterplot(time_points, p_layer_1)\n # plt.xlabel(\"Time [s]\")\n # plt.ylabel(\"Neuron index\")\n\n # plt.tight_layout()\n\n # plt.savefig(folder+'/raster'+'.png')\n\n\n # for tstep in np.arange(0, len(weights), 1):\n # tstep = int(tstep)\n # # tstep = len(weightds) - tstep -1\n\n\n # print(tstep)\n\n # columns = int(args.n_neurons/5)\n # fig, axes = plt.subplots(int(args.n_neurons/columns), int(columns), figsize=(20,25))\n\n # for i in range(0,(args.n_neurons)):\n\n # axes[int(i/columns)][int(i%columns)].matshow(np.reshape(weights[tstep][i],(28,28)),interpolation='nearest', vmax=1, vmin=0)\n\n\n # plt.tight_layout() \n # fig.savefig(folder+'/weights'+str(tstep)+'.png')\n # plt.close('all')\n\n # gen_video(folder, \"weights\")\n\n\n\n logger.info('All done.')",
"\nimport itertools\nimport random\nimport logging\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport os\nfrom mnist_vdsp_multiple_tio2_gaussian import *\nfrom utilis import *\nfrom args_mnist import args as my_args\n# from ax import optimize\nimport pandas as pd\nfrom itertools import product\nimport time\n\n\nif __name__ == '__main__':\n\n\targs = my_args()\n\tprint(args.__dict__)\n\n\tlogging.basicConfig(level=logging.DEBUG)\n\tlogger = logging.getLogger(__name__)\n\n\t# Fix the seed of all random number generator\n\tseed = 50\n\trandom.seed(seed)\n\tnp.random.seed(seed)\n\tpwd = os.getcwd()\n\n\n\tdf = pd.DataFrame({\t\"vprog\":[],\n\t\t\t\t\t\t\"amp_neuron\":[],\n\t\t\t\t\t\t\"vth\":[],\n\t\t\t\t\t\t\"input_nbr\":[],\n\t\t\t\t\t\t\"tau_in\" :[],\n\t\t\t\t\t\t\"tau_out\":[],\n \"lr\":[],\n \"iterations\":[],\n \"presentation_time\":[],\n \"pause_time\":[],\n \"dt\":[],\n \"n_neurons\":[],\n \"inhibition_time\":[],\n \"tau_ref_in\":[],\n \"tau_ref_out\":[],\n \"inc_n\":[],\n \"tau_n\":[],\n \"synapse_layer_1\":[],\n \"winit_mean\":[],\n \"winit_dev\":[],\n \"vprog_increment\":[],\n \"voltage_clip_max\":[],\n \"voltage_clip_min\":[],\n \"Vapp_multiplier\":[],\n \"gain_in\":[],\n \"bias_in\":[],\n \"noise_input\":[],\n \"accuracy\":[],\n \"accuracy_2\":[]\n })\n\n\tif args.log_file_path is None:\n\t\tlog_dir = pwd+'/log_dir/'\n\telse : \n\t\tlog_dir = args.log_file_path\n\t\tdf.to_csv(log_dir+'test.csv', index=False)\n\n\tparameters = dict(\n\t\tvprog = [0]\n\t\t, amp_neuron=[0.5]\n\t\t,input_nbr=[60000]\n\t\t,tau_in = [0.03]\n\t\t,tau_out = [0.03]\n\t\t, lr = [1]\n\t\t, iterations=[1]\n\t\t, presentation_time = [0.35]\n\t\t, pause_time = [0]\n\t\t, dt = [0.005]\n\t\t, n_neurons = [50]\n\t\t, inhibition_time = [10]\n\t\t, tau_ref_in = [0.01]\n\t\t, tau_ref_out = [0.005]\n\t\t, inc_n = [0.01]\n\t\t, tau_n = [1]\n\t\t, synapse_layer_1=[0.005]\n\t\t, winit_mean = [0.75]\n\t\t, winit_dev = [0.1,0.2,0.3,0.4,0.5]\n\t\t, vprog_increment = [0]\n\t\t, voltage_clip_max=[1.8]\n\t\t, voltage_clip_min = [-1.5]\n\t\t, Vapp_multiplier = [1]\n\t\t, gain_in = [3.5]\n\t\t, bias_in = [0.85]\n\t\t, noise_input = [0]\n\t\t, seed =[100]\n )\n\tparam_values = [v for v in parameters.values()]\n\n\tnow = time.strftime(\"%Y%m%d-%H%M%S\")\n\tfolder = os.getcwd()+\"/MNIST_VDSP_explorartion\"+now\n\tos.mkdir(folder)\n\n\tfor args.vprog,args.amp_neuron,args.input_nbr,args.tau_in,args.tau_out,args.lr,args.iterations,args.presentation_time,args.pause_time, args.dt,args.n_neurons,args.inhibition_time,args.tau_ref_in,args.tau_ref_out,args.inc_n,args.tau_n,args.synapse_layer_1,args.winit_mean,args.winit_dev,args.vprog_increment,args.voltage_clip_max,args.voltage_clip_min,args.Vapp_multiplier,args.gain_in,args.bias_in,args.noise_input,args.seed in product(*param_values):\n\n\n\t\t# args.pause_time = 0\n\n\t\t# args.filename = 'vprog-'+str(args.vprog)+'-g_max-'+str(args.g_max)+'-tau_in-'+str(args.tau_in)+'-tau_out-'+str(args.tau_out)+'-lr-'+str(args.lr)+'-presentation_time-'+str(args.presentation_time)\n\t\targs.filename = 'vprog-'+str(args.vprog)+'amp_neuron'+str(args.amp_neuron)+'-tau_in-'+str(args.tau_in)+'-tau_out-'+str(args.tau_out)+'-lr-'+str(args.lr)+'-presentation_time-'+str(args.presentation_time)+'pause_time'+str(args.pause_time) + 'dt-'+str(args.dt)+'ref-'+str(args.tau_ref_in)+str(args.tau_ref_out)+'gain-'+str(args.gain_in)+'bias_in'+str(args.bias_in)+'adaptation'+str(args.inc_n)+str(args.tau_n)+'noise'+str(args.noise_input)+'Vapp_multiplier-'+str(args.Vapp_multiplier)+'winit_max'+str(args.winit_max)+str(args.voltage_clip_max)+str(args.voltage_clip_min)+str(args.n_neurons)+str(args.seed)+str(args.winit_mean)+str(args.winit_dev)\n\n\t\ttimestr = time.strftime(\"%Y%m%d-%H%M%S\")\n\t\tlog_file_name = 'accuracy_log'+'.csv'\n\t\tpwd = os.getcwd()\n\n\t\taccuracy, accuracy_2,weights = evaluate_mnist_multiple_tio2_gaussian(args)\n\n\t\tdf = df.append({ \"vprog\":args.vprog,\n\t\t\t\t\t\t\"amp_neuron\":args.amp_neuron,\n\t\t\t\t\t\t \"vth\":args.vthp,\n\t\t\t\t\t\t \"input_nbr\":args.input_nbr,\n\t\t\t\t\t\t \"tau_in\":args.tau_in,\n\t\t\t\t\t\t \"tau_out\": args.tau_out,\n\t\t\t\t\t\t \"lr\": args.lr,\n\t\t\t\t\t\t \"iterations\":args.iterations,\n\t\t \"presentation_time\":args.presentation_time,\n\t\t \"pause_time\":args.pause_time,\n\t\t \"dt\":args.dt,\n\t\t \"n_neurons\":args.n_neurons,\n\t\t \"seed\":args.seed,\n\t\t \"inhibition_time\":args.inhibition_time,\n\t\t \"tau_ref_in\":args.tau_ref_in,\n\t\t \"tau_ref_out\":args.tau_ref_out,\n\t\t \"inc_n\":args.inc_n,\n\t\t \"tau_n\":args.tau_n,\n\t\t \"synapse_layer_1\":args.synapse_layer_1,\n\t\t \"winit_mean\":args.winit_mean,\n\t\t \"winit_dev\":args.winit_dev,\n\t\t \"vprog_increment\":args.vprog_increment,\n\t\t \"voltage_clip_max\":args.voltage_clip_max,\n\t\t \"voltage_clip_min\":args.voltage_clip_min,\n\t\t \"Vapp_multiplier\":args.Vapp_multiplier,\n\t\t \"gain_in\":args.gain_in,\n\t\t \"bias_in\":args.bias_in,\n\t\t \"noise_input\":args.noise_input,\n\t\t \"accuracy\":accuracy,\n\t\t \"accuracy_2\":accuracy_2\n\t\t },ignore_index=True)\n\t\t\n\n\t\tplot = True\n\t\tif plot : \t\n\t\t\tprint('accuracy', accuracy)\n\t\t\tprint(args.filename)\n\t\t\t# weights = weights[-1]#Taking only the last weight for plotting\n\n\t\t\tcolumns = int(args.n_neurons/5)\n\t\t\trows = int(args.n_neurons/columns)\n\n\t\t\tfig, axes = plt.subplots(int(args.n_neurons/columns), int(columns), figsize=(columns*5,rows*5))\n\n\t\t\tfor i in range(0,(args.n_neurons)):\n\t\t\t\taxes[int(i/columns)][int(i%columns)].matshow(np.reshape(weights[i],(28,28)),interpolation='nearest', vmax=1, vmin=0)\n\n\t\t\tplt.tight_layout() \n\n\t \n\t\t\t# fig, axes = plt.subplots(1,1, figsize=(3,3))\n\t\t\t# fig = plt.figure()\n\t\t\t# ax1 = fig.add_subplot()\n\t\t\t# cax = ax1.matshow(np.reshape(weights[0],(28,28)),interpolation='nearest', vmax=1, vmin=0)\n\t\t\t# fig.colorbar(cax)\n\t\t\t# plt.tight_layout() \n\n\t\t\tif args.log_file_path is None:\n\t\t\t\tlog_dir = pwd+'/log_dir/'\n\t\t\telse : \n\t\t\t\tlog_dir = args.log_file_path\n\t\t\tdf.to_csv(log_dir+log_file_name, index=False) \n\n\t\t\tfig.savefig(log_dir+args.filename+'weights.png')\n\t\t\tplt.close()\n\n\t\t\tplt.clf()\n\t\t\tplt.hist(weights.flatten())\n\n\t\t\tplt.tight_layout() \n\t\t\tplt.savefig(log_dir+args.filename+'histogram.png')\n\n\n\t\t\t# plt.figure(figsize=(12,10))\n\n\t\t\t# plt.subplot(2, 1, 1)\n\t\t\t# plt.title('Input neurons')\n\t\t\t# rasterplot(time_points, p_input_layer)\n\t\t\t# plt.xlabel(\"Time [s]\")\n\t\t\t# plt.ylabel(\"Neuron index\")\n\n\t\t\t# plt.subplot(2, 1, 2)\n\t\t\t# plt.title('Output neurons')\n\t\t\t# rasterplot(time_points, p_layer_1)\n\t\t\t# plt.xlabel(\"Time [s]\")\n\t\t\t# plt.ylabel(\"Neuron index\")\n\n\t\t\t# plt.tight_layout()\n\n\t\t\t# plt.savefig(folder+'/raster'+str(args.filename)+'.png')\n\t\ttimestr = time.strftime(\"%Y%m%d-%H%M%S\")\n\t\tlog_file_name = 'accuracy_log'+'.csv'\n\t\tpwd = os.getcwd()\n\n\t\tif args.log_file_path is None:\n\t\t\tlog_dir = pwd+'/log_dir/'\n\t\telse : \n\t\t\tlog_dir = args.log_file_path\n\t\tdf.to_csv(log_dir+log_file_name, index=False)\n\n\tdf.to_csv(log_file_name, index=False)\n\n\n\tlogger.info('All done.')",
"\nimport itertools\nimport random\nimport logging\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport os\nfrom mnist_vdsp_multiple_baseline import *\nfrom utilis import *\nfrom args_mnist import args as my_args\n# from ax import optimize\nimport pandas as pd\nfrom itertools import product\nimport time\n\n\nif __name__ == '__main__':\n\n\targs = my_args()\n\tprint(args.__dict__)\n\n\tlogging.basicConfig(level=logging.DEBUG)\n\tlogger = logging.getLogger(__name__)\n\n\t# Fix the seed of all random number generator\n\tseed = 50\n\trandom.seed(seed)\n\tnp.random.seed(seed)\n\tpwd = os.getcwd()\n\n\n\tdf = pd.DataFrame({\t\"input_scale\":[],\n\t\t\t\t\t\t\"amp_neuron\":[],\n\t\t\t\t\t\t\"vth\":[],\n\t\t\t\t\t\t\"input_nbr\":[],\n\t\t\t\t\t\t\"tau_in\" :[],\n\t\t\t\t\t\t\"tau_out\":[],\n \"lr\":[],\n \"alpha\":[],\n \"multiplicative\":[],\n \"iterations\":[],\n \"presentation_time\":[],\n \"pause_time\":[],\n \"dt\":[],\n \"n_neurons\":[],\n \"inhibition_time\":[],\n \"tau_ref_in\":[],\n \"tau_ref_out\":[],\n \"inc_n\":[],\n \"tau_n\":[],\n \"synapse_layer_1\":[],\n \"winit_max\":[],\n \"vprog_increment\":[],\n \"voltage_clip_max\":[],\n \"voltage_clip_min\":[],\n \"Vapp_multiplier\":[],\n \"gain_in\":[],\n \"bias_in\":[],\n \"noise_input\":[],\n \"accuracy\":[],\n \"accuracy_2\":[]\n })\n\n\tif args.log_file_path is None:\n\t\tlog_dir = pwd+'/log_dir/'\n\telse : \n\t\tlog_dir = args.log_file_path\n\t\tdf.to_csv(log_dir+'test.csv', index=False)\n\n\tparameters = dict(\n\t\tinput_scale = [1]\n\t\t, amp_neuron=[0.05]\n\t\t,input_nbr=[60000]\n\t\t,tau_in = [0.03]\n\t\t,tau_out = [0.03]\n\t\t, lr = [0.004,0.005,0.006,0.007,0.008,0.009,0.01,0.011]\n\t\t, alpha= [0]\n\t\t, multiplicative = [1]\n\t\t, iterations=[1]\n\t\t, presentation_time = [0.35]\n\t\t, pause_time = [0]\n\t\t, dt = [0.005]\n\t\t, n_neurons = [500]\n\t\t, inhibition_time = [10]\n\t\t, tau_ref_in = [0.005]\n\t\t, tau_ref_out = [0.005]\n\t\t, inc_n = [0.01]\n\t\t, tau_n = [1]\n\t\t, synapse_layer_1=[0.005]\n\t\t, winit_max = [1]\n\t\t, vprog_increment = [0]\n\t\t, voltage_clip_max=[1]\n\t\t, voltage_clip_min = [-1]\n\t\t, Vapp_multiplier = [1]\n\t\t, gain_in = [4]\n\t\t, bias_in = [0.5]\n\t\t, noise_input = [0]\n\t\t, seed =[1998]\n )\n\tparam_values = [v for v in parameters.values()]\n\n\tnow = time.strftime(\"%Y%m%d-%H%M%S\")\n\tfolder = os.getcwd()+\"/MNIST_VDSP_explorartion\"+now\n\tos.mkdir(folder)\n\n\tfor args.input_scale,args.amp_neuron,args.input_nbr,args.tau_in,args.tau_out,args.lr,args.alpha,args.multiplicative,args.iterations,args.presentation_time,args.pause_time, args.dt,args.n_neurons,args.inhibition_time,args.tau_ref_in,args.tau_ref_out,args.inc_n,args.tau_n,args.synapse_layer_1,args.winit_max,args.vprog_increment,args.voltage_clip_max,args.voltage_clip_min,args.Vapp_multiplier,args.gain_in,args.bias_in,args.noise_input,args.seed in product(*param_values):\n\n\n\t\t# args.pause_time = 0\n\n\t\t# args.filename = 'vprog-'+str(args.vprog)+'-g_max-'+str(args.g_max)+'-tau_in-'+str(args.tau_in)+'-tau_out-'+str(args.tau_out)+'-lr-'+str(args.lr)+'-presentation_time-'+str(args.presentation_time)\n\t\targs.filename = 'input_scale-'+str(args.input_scale)+'amp_neuron'+str(args.amp_neuron)+'-tau_in-'+str(args.tau_in)+'-tau_out-'+str(args.tau_out)+'-lr-'+str(args.lr)+'-presentation_time-'+str(args.presentation_time)+'pause_time'+str(args.pause_time) + 'dt-'+str(args.dt)+'ref-'+str(args.tau_ref_in)+str(args.tau_ref_out)+'gain-'+str(args.gain_in)+'bias_in'+str(args.bias_in)+'adaptation'+str(args.inc_n)+str(args.tau_n)+'noise'+str(args.noise_input)+'Vapp_multiplier-'+str(args.Vapp_multiplier)+'winit_max'+str(args.winit_max)+str(args.voltage_clip_max)+str(args.voltage_clip_min)+str(args.n_neurons)+str(args.seed)+str(args.alpha)+str(args.iterations)+str(args.multiplicative)\n\n\t\ttimestr = time.strftime(\"%Y%m%d-%H%M%S\")\n\t\tlog_file_name = 'accuracy_log'+'.csv'\n\t\tpwd = os.getcwd()\n\n\t\taccuracy, accuracy_2,weights = evaluate_mnist_multiple_baseline(args)\n\n\t\tdf = df.append({ \"input_scale\":args.input_scale,\n\t\t\t\t\t\t\"amp_neuron\":args.amp_neuron,\n\t\t\t\t\t\t \"vth\":args.vthp,\n\t\t\t\t\t\t \"input_nbr\":args.input_nbr,\n\t\t\t\t\t\t \"tau_in\":args.tau_in,\n\t\t\t\t\t\t \"tau_out\": args.tau_out,\n\t\t\t\t\t\t \"lr\": args.lr,\n\t\t\t\t\t\t \"alpha\": args.alpha,\n\t\t\t\t\t\t \"multiplicative\":args.multiplicative,\n\t\t\t\t\t\t \"iterations\":args.iterations,\n\t\t \"presentation_time\":args.presentation_time,\n\t\t \"pause_time\":args.pause_time,\n\t\t \"dt\":args.dt,\n\t\t \"n_neurons\":args.n_neurons,\n\t\t \"seed\":args.seed,\n\t\t \"inhibition_time\":args.inhibition_time,\n\t\t \"tau_ref_in\":args.tau_ref_in,\n\t\t \"tau_ref_out\":args.tau_ref_out,\n\t\t \"inc_n\":args.inc_n,\n\t\t \"tau_n\":args.tau_n,\n\t\t \"synapse_layer_1\":args.synapse_layer_1,\n\t\t \"winit_max\":args.winit_max,\n\t\t \"vprog_increment\":args.vprog_increment,\n\t\t \"voltage_clip_max\":args.voltage_clip_max,\n\t\t \"voltage_clip_min\":args.voltage_clip_min,\n\t\t \"Vapp_multiplier\":args.Vapp_multiplier,\n\t\t \"gain_in\":args.gain_in,\n\t\t \"bias_in\":args.bias_in,\n\t\t \"noise_input\":args.noise_input,\n\t\t \"accuracy\":accuracy,\n\t\t \"accuracy_2\":accuracy_2\n\t\t },ignore_index=True)\n\t\t\n\n\t\tplot = True\n\t\tif plot : \t\n\t\t\tprint('accuracy', accuracy)\n\t\t\tprint(args.filename)\n\t\t\t# weights = weights[-1]#Taking only the last weight for plotting\n\n\t\t\tcolumns = int(args.n_neurons/5)\n\t\t\trows = int(args.n_neurons/columns)\n\n\t\t\tfig, axes = plt.subplots(int(args.n_neurons/columns), int(columns), figsize=(columns*5,rows*5))\n\n\t\t\tfor i in range(0,(args.n_neurons)):\n\t\t\t\taxes[int(i/columns)][int(i%columns)].matshow(np.reshape(weights[i],(28,28)),interpolation='nearest', vmax=1, vmin=0)\n\t\t\t\taxes[int(i/columns)][int(i%columns)].get_xaxis().set_visible(False)\n\t\t\t\taxes[int(i/columns)][int(i%columns)].get_yaxis().set_visible(False)\n\t\t\tplt.tight_layout() \n\t\t\tplt.axis('off')\n\n\t \n\t\t\t# fig, axes = plt.subplots(1,1, figsize=(3,3))\n\t\t\t# fig = plt.figure()\n\t\t\t# ax1 = fig.add_subplot()\n\t\t\t# cax = ax1.matshow(np.reshape(weights[0],(28,28)),interpolation='nearest', vmax=1, vmin=0)\n\t\t\t# fig.colorbar(cax)\n\t\t\t# plt.tight_layout() \n\n\t\t\tif args.log_file_path is None:\n\t\t\t\tlog_dir = pwd+'/log_dir/'\n\t\t\telse : \n\t\t\t\tlog_dir = args.log_file_path\n\t\t\tdf.to_csv(log_dir+log_file_name, index=False) \n\n\t\t\tfig.savefig(log_dir+args.filename+'weights.png')\n\t\t\tplt.close()\n\n\t\t\tplt.clf()\n\t\t\tplt.hist(weights.flatten())\n\n\t\t\tplt.tight_layout() \n\t\t\tplt.savefig(log_dir+args.filename+'histogram.png')\n\n\n\t\t\t# plt.figure(figsize=(12,10))\n\n\t\t\t# plt.subplot(2, 1, 1)\n\t\t\t# plt.title('Input neurons')\n\t\t\t# rasterplot(time_points, p_input_layer)\n\t\t\t# plt.xlabel(\"Time [s]\")\n\t\t\t# plt.ylabel(\"Neuron index\")\n\n\t\t\t# plt.subplot(2, 1, 2)\n\t\t\t# plt.title('Output neurons')\n\t\t\t# rasterplot(time_points, p_layer_1)\n\t\t\t# plt.xlabel(\"Time [s]\")\n\t\t\t# plt.ylabel(\"Neuron index\")\n\n\t\t\t# plt.tight_layout()\n\n\t\t\t# plt.savefig(folder+'/raster'+str(args.filename)+'.png')\n\t\ttimestr = time.strftime(\"%Y%m%d-%H%M%S\")\n\t\tlog_file_name = 'accuracy_log'+'.csv'\n\t\tpwd = os.getcwd()\n\n\t\tif args.log_file_path is None:\n\t\t\tlog_dir = pwd+'/log_dir/'\n\t\telse : \n\t\t\tlog_dir = args.log_file_path\n\t\tdf.to_csv(log_dir+log_file_name, index=False)\n\n\tdf.to_csv(log_file_name, index=False)\n\n\n\tlogger.info('All done.')",
"\nimport itertools\nimport random\nimport logging\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport os\nfrom mnist_vdsp_multiple_var_g import *\nfrom utilis import *\nfrom args_mnist import args as my_args\n# from ax import optimize\nimport pandas as pd\nfrom itertools import product\nimport time\n\n\nif __name__ == '__main__':\n\n\targs = my_args()\n\tprint(args.__dict__)\n\n\tlogging.basicConfig(level=logging.DEBUG)\n\tlogger = logging.getLogger(__name__)\n\n\t# Fix the seed of all random number generator\n\tseed = 50\n\trandom.seed(seed)\n\tnp.random.seed(seed)\n\n\tdf = pd.DataFrame({\t\"vprog\":[],\n\t\t\t\t\t\t\"input_nbr\":[],\n\t\t\t\t\t\t\"tau_in\" :[],\n\t\t\t\t\t\t\"tau_out\":[],\n\t\t\t\t\t\t\"gain_in\":[],\n\t\t\t\t\t\t\"gain_out\":[],\n\t\t\t\t\t\t\"bias_in\":[],\n\t\t\t\t\t\t\"bias_out\":[],\n\t\t\t\t\t\t\"thr_out\":[],\n\t\t\t\t\t\t\"inhibition_time\":[],\n \"lr\":[],\n \"presentation_time\":[],\n \"g_var\":[],\n \"seed\":[],\n \"accuracy\":[],\n })\n\n\tif args.log_file_path is None:\n\t\tpwd = os.getcwd()\n\t\tlog_dir = pwd+'/log_dir/'\n\telse : \n\t\tlog_dir = args.log_file_path\n\t\tdf.to_csv(log_dir+'test.csv', index=False)\n\n\n\tparameters = dict(\n\t\tvprog = [-0.75]\n\t\t,input_nbr=[60000]\n\t\t,tau_in = [0.06]\n\t\t,tau_out = [0.06]\n\t\t,gain_in = [2]\n\t\t,gain_out = [2]\n\t\t,bias_in = [0]\n\t\t,bias_out = [0]\n\t\t,thr_out = [1]\n\t\t,inhibition_time = [10]\n\t\t, lr = [0.1]\n\t\t, presentation_time = [0.35]\n\t\t, g_var = [0.5,0.6,0.7,0.8,0.9]\n\t\t, seed = [700]\n )\n\tparam_values = [v for v in parameters.values()]\n\n\tnow = time.strftime(\"%Y%m%d-%H%M%S\")\n\tfolder = os.getcwd()+\"/MNIST_VDSP_explorartion\"+now\n\tos.mkdir(folder)\n\n\tfor args.vprog,args.input_nbr,args.tau_in,args.tau_out,args.gain_in,args.gain_out,args.bias_in,args.bias_out,args.thr_out,args.inhibition_time,args.lr,args.presentation_time,args.g_var,args.seed in product(*param_values):\n\n\t\t# args.filename = 'vprog-'+str(args.vprog)+'-g_max-'+str(args.g_max)+'-tau_in-'+str(args.tau_in)+'-tau_out-'+str(args.tau_out)+'-lr-'+str(args.lr)+'-presentation_time-'+str(args.presentation_time)\n\t\t\n\n\t\ttimestr = time.strftime(\"%Y%m%d-%H%M%S\")\n\t\tlog_file_name = 'accuracy_log'+str(timestr)+'.csv'\n\t\tpwd = os.getcwd()\n\t\t# args.vthn = args.vthp\n\t\taccuracy, weights = evaluate_mnist_multiple_var_g(args)\n\n\n\t\t\n\t\tdf = df.append({ \"vprog\":args.vprog,\n\t\t\t\t\t\t \"input_nbr\":args.input_nbr,\n\t\t\t\t\t\t \"tau_in\":args.tau_in,\n\t\t\t\t\t\t \"tau_out\": args.tau_out,\n\t\t\t\t\t\t \"gain_in\":args.gain_in,\n\t\t\t\t\t\t \"gain_out\":args.gain_out,\n\t\t\t\t\t\t \"bias_in\":args.bias_in,\n\t\t\t\t\t\t \"bias_out\":args.bias_out,\n\t\t\t\t\t\t \"thr_out\":args.thr_out,\n\t\t\t\t\t\t \"inhibition_time\":args.inhibition_time,\n\t\t\t\t\t\t \"lr\": args.lr,\n\t\t \"presentation_time\":args.presentation_time,\n\t\t \"g_var\":args.g_var,\n\t\t \"seed\":args.seed,\n\t\t \"accuracy\":accuracy\n\t\t },ignore_index=True)\n\t\t\n\n\n\t\tplot = False\n\t\tif plot : \t\n\t\t\tprint('accuracy', accuracy)\n\t\t\tprint(args.filename)\n\t\t\t# weights = weights[-1]#Taking only the last weight for plotting\n\n\t\t\tcolumns = int(args.n_neurons/5)\n\n\t\t\tfig, axes = plt.subplots(int(args.n_neurons/columns), int(columns), figsize=(20,25))\n\n\t\t\tfor i in range(0,(args.n_neurons)):\n\t\t\t\taxes[int(i/columns)][int(i%columns)].matshow(np.reshape(weights[i],(28,28)),interpolation='nearest', vmax=1, vmin=0)\n\n\t\t\tplt.tight_layout() \n\n\t \n\t\t\t# fig, axes = plt.subplots(1,1, figsize=(3,3))\n\t\t\t# fig = plt.figure()\n\t\t\t# ax1 = fig.add_subplot()\n\t\t\t# cax = ax1.matshow(np.reshape(weights[0],(28,28)),interpolation='nearest', vmax=1, vmin=0)\n\t\t\t# fig.colorbar(cax)\n\t\t\t# plt.tight_layout() \n\n\t\t\tfig.savefig(folder+'/weights'+str(args.filename)+'.png')\n\t\t\tplt.close()\n\n\n\t\t\t# plt.figure(figsize=(12,10))\n\n\t\t\t# plt.subplot(2, 1, 1)\n\t\t\t# plt.title('Input neurons')\n\t\t\t# rasterplot(time_points, p_input_layer)\n\t\t\t# plt.xlabel(\"Time [s]\")\n\t\t\t# plt.ylabel(\"Neuron index\")\n\n\t\t\t# plt.subplot(2, 1, 2)\n\t\t\t# plt.title('Output neurons')\n\t\t\t# rasterplot(time_points, p_layer_1)\n\t\t\t# plt.xlabel(\"Time [s]\")\n\t\t\t# plt.ylabel(\"Neuron index\")\n\n\t\t\t# plt.tight_layout()\n\n\t\t\t# plt.savefig(folder+'/raster'+str(args.filename)+'.png')\n\t\ttimestr = time.strftime(\"%Y%m%d-%H%M%S\")\n\t\tlog_file_name = 'accuracy_log'+'.csv'\n\t\tpwd = os.getcwd()\n\n\t\tif args.log_file_path is None:\n\t\t\tlog_dir = pwd+'/log_dir/'\n\t\telse : \n\t\t\tlog_dir = args.log_file_path\n\t\tdf.to_csv(log_dir+log_file_name, index=False)\n\n\tdf.to_csv(log_file_name, index=False)\n\n\n\tlogger.info('All done.')",
"\nimport itertools\nimport random\nimport logging\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport os\nfrom mnist_vdsp_multiple_baseline import *\nfrom utilis import *\nfrom args_mnist import args as my_args\n# from ax import optimize\nimport pandas as pd\nfrom itertools import product\nimport time\n\n\nif __name__ == '__main__':\n\n\targs = my_args()\n\tprint(args.__dict__)\n\n\tlogging.basicConfig(level=logging.DEBUG)\n\tlogger = logging.getLogger(__name__)\n\n\t# Fix the seed of all random number generator\n\tseed = 50\n\trandom.seed(seed)\n\tnp.random.seed(seed)\n\tpwd = os.getcwd()\n\n\n\tdf = pd.DataFrame({\t\"input_scale\":[],\n\t\t\t\t\t\t\"amp_neuron\":[],\n\t\t\t\t\t\t\"vth\":[],\n\t\t\t\t\t\t\"input_nbr\":[],\n\t\t\t\t\t\t\"tau_in\" :[],\n\t\t\t\t\t\t\"tau_out\":[],\n \"lr\":[],\n \"alpha\":[],\n \"multiplicative\":[],\n \"iterations\":[],\n \"presentation_time\":[],\n \"pause_time\":[],\n \"dt\":[],\n \"n_neurons\":[],\n \"inhibition_time\":[],\n \"tau_ref_in\":[],\n \"tau_ref_out\":[],\n \"inc_n\":[],\n \"tau_n\":[],\n \"synapse_layer_1\":[],\n \"winit_max\":[],\n \"vprog_increment\":[],\n \"voltage_clip_max\":[],\n \"voltage_clip_min\":[],\n \"Vapp_multiplier\":[],\n \"gain_in\":[],\n \"bias_in\":[],\n \"noise_input\":[],\n \"accuracy\":[],\n \"accuracy_2\":[]\n })\n\n\tif args.log_file_path is None:\n\t\tlog_dir = pwd+'/log_dir/'\n\telse : \n\t\tlog_dir = args.log_file_path\n\t\tdf.to_csv(log_dir+'test.csv', index=False)\n\n\tparameters = dict(\n\t\tinput_scale = [1]\n\t\t, amp_neuron=[0.05]\n\t\t,input_nbr=[60000]\n\t\t,tau_in = [0.03]\n\t\t,tau_out = [0.03]\n\t\t, lr = [0.004,0.005,0.006,0.007,0.008,0.009,0.01,0.011]\n\t\t, alpha= [0]\n\t\t, multiplicative = [1]\n\t\t, iterations=[1]\n\t\t, presentation_time = [0.35]\n\t\t, pause_time = [0]\n\t\t, dt = [0.005]\n\t\t, n_neurons = [400]\n\t\t, inhibition_time = [10]\n\t\t, tau_ref_in = [0.005]\n\t\t, tau_ref_out = [0.005]\n\t\t, inc_n = [0.01]\n\t\t, tau_n = [1]\n\t\t, synapse_layer_1=[0.005]\n\t\t, winit_max = [1]\n\t\t, vprog_increment = [0]\n\t\t, voltage_clip_max=[1]\n\t\t, voltage_clip_min = [-1]\n\t\t, Vapp_multiplier = [1]\n\t\t, gain_in = [4]\n\t\t, bias_in = [0.5]\n\t\t, noise_input = [0]\n\t\t, seed =[420]\n )\n\tparam_values = [v for v in parameters.values()]\n\n\tnow = time.strftime(\"%Y%m%d-%H%M%S\")\n\tfolder = os.getcwd()+\"/MNIST_VDSP_explorartion\"+now\n\tos.mkdir(folder)\n\n\tfor args.input_scale,args.amp_neuron,args.input_nbr,args.tau_in,args.tau_out,args.lr,args.alpha,args.multiplicative,args.iterations,args.presentation_time,args.pause_time, args.dt,args.n_neurons,args.inhibition_time,args.tau_ref_in,args.tau_ref_out,args.inc_n,args.tau_n,args.synapse_layer_1,args.winit_max,args.vprog_increment,args.voltage_clip_max,args.voltage_clip_min,args.Vapp_multiplier,args.gain_in,args.bias_in,args.noise_input,args.seed in product(*param_values):\n\n\n\t\t# args.pause_time = 0\n\n\t\t# args.filename = 'vprog-'+str(args.vprog)+'-g_max-'+str(args.g_max)+'-tau_in-'+str(args.tau_in)+'-tau_out-'+str(args.tau_out)+'-lr-'+str(args.lr)+'-presentation_time-'+str(args.presentation_time)\n\t\targs.filename = 'input_scale-'+str(args.input_scale)+'amp_neuron'+str(args.amp_neuron)+'-tau_in-'+str(args.tau_in)+'-tau_out-'+str(args.tau_out)+'-lr-'+str(args.lr)+'-presentation_time-'+str(args.presentation_time)+'pause_time'+str(args.pause_time) + 'dt-'+str(args.dt)+'ref-'+str(args.tau_ref_in)+str(args.tau_ref_out)+'gain-'+str(args.gain_in)+'bias_in'+str(args.bias_in)+'adaptation'+str(args.inc_n)+str(args.tau_n)+'noise'+str(args.noise_input)+'Vapp_multiplier-'+str(args.Vapp_multiplier)+'winit_max'+str(args.winit_max)+str(args.voltage_clip_max)+str(args.voltage_clip_min)+str(args.n_neurons)+str(args.seed)+str(args.alpha)+str(args.iterations)+str(args.multiplicative)\n\n\t\ttimestr = time.strftime(\"%Y%m%d-%H%M%S\")\n\t\tlog_file_name = 'accuracy_log'+'.csv'\n\t\tpwd = os.getcwd()\n\n\t\taccuracy, accuracy_2,weights = evaluate_mnist_multiple_baseline(args)\n\n\t\tdf = df.append({ \"input_scale\":args.input_scale,\n\t\t\t\t\t\t\"amp_neuron\":args.amp_neuron,\n\t\t\t\t\t\t \"vth\":args.vthp,\n\t\t\t\t\t\t \"input_nbr\":args.input_nbr,\n\t\t\t\t\t\t \"tau_in\":args.tau_in,\n\t\t\t\t\t\t \"tau_out\": args.tau_out,\n\t\t\t\t\t\t \"lr\": args.lr,\n\t\t\t\t\t\t \"alpha\": args.alpha,\n\t\t\t\t\t\t \"multiplicative\":args.multiplicative,\n\t\t\t\t\t\t \"iterations\":args.iterations,\n\t\t \"presentation_time\":args.presentation_time,\n\t\t \"pause_time\":args.pause_time,\n\t\t \"dt\":args.dt,\n\t\t \"n_neurons\":args.n_neurons,\n\t\t \"seed\":args.seed,\n\t\t \"inhibition_time\":args.inhibition_time,\n\t\t \"tau_ref_in\":args.tau_ref_in,\n\t\t \"tau_ref_out\":args.tau_ref_out,\n\t\t \"inc_n\":args.inc_n,\n\t\t \"tau_n\":args.tau_n,\n\t\t \"synapse_layer_1\":args.synapse_layer_1,\n\t\t \"winit_max\":args.winit_max,\n\t\t \"vprog_increment\":args.vprog_increment,\n\t\t \"voltage_clip_max\":args.voltage_clip_max,\n\t\t \"voltage_clip_min\":args.voltage_clip_min,\n\t\t \"Vapp_multiplier\":args.Vapp_multiplier,\n\t\t \"gain_in\":args.gain_in,\n\t\t \"bias_in\":args.bias_in,\n\t\t \"noise_input\":args.noise_input,\n\t\t \"accuracy\":accuracy,\n\t\t \"accuracy_2\":accuracy_2\n\t\t },ignore_index=True)\n\t\t\n\n\t\tplot = True\n\t\tif plot : \t\n\t\t\tprint('accuracy', accuracy)\n\t\t\tprint(args.filename)\n\t\t\t# weights = weights[-1]#Taking only the last weight for plotting\n\n\t\t\tcolumns = int(args.n_neurons/5)\n\t\t\trows = int(args.n_neurons/columns)\n\n\t\t\tfig, axes = plt.subplots(int(args.n_neurons/columns), int(columns), figsize=(columns*5,rows*5))\n\n\t\t\tfor i in range(0,(args.n_neurons)):\n\t\t\t\taxes[int(i/columns)][int(i%columns)].matshow(np.reshape(weights[i],(28,28)),interpolation='nearest', vmax=1, vmin=0)\n\t\t\t\taxes[int(i/columns)][int(i%columns)].get_xaxis().set_visible(False)\n\t\t\t\taxes[int(i/columns)][int(i%columns)].get_yaxis().set_visible(False)\n\t\t\tplt.tight_layout() \n\t\t\tplt.axis('off')\n\n\t \n\t\t\t# fig, axes = plt.subplots(1,1, figsize=(3,3))\n\t\t\t# fig = plt.figure()\n\t\t\t# ax1 = fig.add_subplot()\n\t\t\t# cax = ax1.matshow(np.reshape(weights[0],(28,28)),interpolation='nearest', vmax=1, vmin=0)\n\t\t\t# fig.colorbar(cax)\n\t\t\t# plt.tight_layout() \n\n\t\t\tif args.log_file_path is None:\n\t\t\t\tlog_dir = pwd+'/log_dir/'\n\t\t\telse : \n\t\t\t\tlog_dir = args.log_file_path\n\t\t\tdf.to_csv(log_dir+log_file_name, index=False) \n\n\t\t\tfig.savefig(log_dir+args.filename+'weights.png')\n\t\t\tplt.close()\n\n\t\t\tplt.clf()\n\t\t\tplt.hist(weights.flatten())\n\n\t\t\tplt.tight_layout() \n\t\t\tplt.savefig(log_dir+args.filename+'histogram.png')\n\n\n\t\t\t# plt.figure(figsize=(12,10))\n\n\t\t\t# plt.subplot(2, 1, 1)\n\t\t\t# plt.title('Input neurons')\n\t\t\t# rasterplot(time_points, p_input_layer)\n\t\t\t# plt.xlabel(\"Time [s]\")\n\t\t\t# plt.ylabel(\"Neuron index\")\n\n\t\t\t# plt.subplot(2, 1, 2)\n\t\t\t# plt.title('Output neurons')\n\t\t\t# rasterplot(time_points, p_layer_1)\n\t\t\t# plt.xlabel(\"Time [s]\")\n\t\t\t# plt.ylabel(\"Neuron index\")\n\n\t\t\t# plt.tight_layout()\n\n\t\t\t# plt.savefig(folder+'/raster'+str(args.filename)+'.png')\n\t\ttimestr = time.strftime(\"%Y%m%d-%H%M%S\")\n\t\tlog_file_name = 'accuracy_log'+'.csv'\n\t\tpwd = os.getcwd()\n\n\t\tif args.log_file_path is None:\n\t\t\tlog_dir = pwd+'/log_dir/'\n\t\telse : \n\t\t\tlog_dir = args.log_file_path\n\t\tdf.to_csv(log_dir+log_file_name, index=False)\n\n\tdf.to_csv(log_file_name, index=False)\n\n\n\tlogger.info('All done.')"
] |
[
[
"matplotlib.pyplot.tight_layout",
"numpy.random.seed",
"numpy.reshape",
"pandas.DataFrame",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.clf",
"matplotlib.pyplot.close",
"matplotlib.pyplot.axis"
],
[
"matplotlib.pyplot.tight_layout",
"numpy.random.seed",
"numpy.reshape",
"pandas.DataFrame",
"matplotlib.pyplot.close"
],
[
"matplotlib.pyplot.tight_layout",
"numpy.random.seed",
"numpy.reshape",
"pandas.DataFrame",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.clf",
"matplotlib.pyplot.close"
],
[
"matplotlib.pyplot.tight_layout",
"numpy.random.seed",
"numpy.reshape",
"pandas.DataFrame",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.clf",
"matplotlib.pyplot.close",
"matplotlib.pyplot.axis"
],
[
"matplotlib.pyplot.tight_layout",
"numpy.random.seed",
"numpy.reshape",
"pandas.DataFrame",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.clf",
"matplotlib.pyplot.close",
"matplotlib.pyplot.axis"
],
[
"numpy.random.seed",
"numpy.tile",
"numpy.ones",
"numpy.intersect1d",
"numpy.argmax",
"numpy.where",
"numpy.load",
"numpy.zeros",
"numpy.sum"
],
[
"matplotlib.pyplot.tight_layout",
"numpy.random.seed",
"numpy.reshape",
"pandas.DataFrame",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.clf",
"matplotlib.pyplot.close"
],
[
"matplotlib.pyplot.tight_layout",
"numpy.random.seed",
"numpy.reshape",
"pandas.DataFrame",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.clf",
"matplotlib.pyplot.close",
"matplotlib.pyplot.axis"
],
[
"matplotlib.pyplot.tight_layout",
"numpy.random.seed",
"numpy.reshape",
"pandas.DataFrame",
"matplotlib.pyplot.close"
],
[
"matplotlib.pyplot.tight_layout",
"numpy.random.seed",
"numpy.reshape",
"pandas.DataFrame",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.clf",
"matplotlib.pyplot.close",
"matplotlib.pyplot.axis"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
SDJustus/AutoClassifier
|
[
"e9a9fc563a5df32542a6c9410da4e91e62d8ee4e"
] |
[
"split_train.py"
] |
[
"import torch\nimport os\nfrom torchvision import datasets\nfrom torchvision import transforms\n\nclass ImageFolderWithPaths(datasets.ImageFolder):\n \"\"\"Custom dataset that includes image file paths. Extends\n torchvision.datasets.ImageFolder\n \"\"\"\n\n # override the __getitem__ method. this is the method that dataloader calls\n def __getitem__(self, index):\n # this is what ImageFolder normally returns \n original_tuple = super(ImageFolderWithPaths, self).__getitem__(index)\n # the image file path\n path = self.imgs[index][0]\n path = os.path.split(path)[1]\n # make a new tuple that includes original and the path\n tuple_with_path = (original_tuple + (path,))\n return tuple_with_path\n\ndef get_train_valid_loader(batch_size,\n path,\n shuffle=True,\n pin_memory=False):\n \"\"\"\n Utility function for loading and returning train and valid\n multi-process iterators over the CIFAR-10 dataset. A sample\n 9x9 grid of the images can be optionally displayed.\n If using CUDA, num_workers should be set to 1 and pin_memory to True.\n Params\n ------\n - path: path directory to the dataset.\n - batch_size: how many samples per batch to load.\n - augment: whether to apply the data augmentation scheme\n mentioned in the paper. Only applied on the train split.\n - random_seed: fix seed for reproducibility.\n - valid_size: percentage split of the training set used for\n the validation set. Should be a float in the range [0, 1].\n - shuffle: whether to shuffle the train/validation indices.\n - num_workers: number of subprocesses to use when loading the dataset.\n - pin_memory: whether to copy tensors into CUDA pinned memory. Set it to\n True if using GPU.\n Returns\n -------\n - train_loader: training set iterator.\n - valid_loader: validation set iterator.\n \"\"\"\n\n train_transform = transforms.Compose([\n transforms.ToTensor(),\n #transforms.Normalize((0.5,0.5,0.5), (0.5,0.5,0.5))\n ])\n\n # load the dataset\n train_dataset = ImageFolderWithPaths(root=os.path.join(path, \"train/\"), transform=train_transform)\n test_dataset = ImageFolderWithPaths(root=os.path.join(path, \"test/\"), transform=train_transform)\n inference_dataset = ImageFolderWithPaths(root=os.path.join(path, \"inference/\"), transform=train_transform)\n\n\n train_loader = torch.utils.data.DataLoader(\n train_dataset, shuffle=shuffle, batch_size=batch_size, pin_memory=pin_memory,\n )\n test_loader = torch.utils.data.DataLoader(\n test_dataset, shuffle=False, batch_size=batch_size, pin_memory=pin_memory,\n )\n inference_loader = torch.utils.data.DataLoader(\n inference_dataset, shuffle=False, batch_size=batch_size, pin_memory=pin_memory,\n )\n\n #food101_mean, food101_std = online_mean_and_sd(train_loader)\n #print(f'Mean:{food101_mean}, STD:{food101_std}')\n\n return (train_loader, test_loader, inference_loader)\n\n\ndef online_mean_and_sd(loader):\n \"\"\"Compute the mean and sd in an online fashion\n\n Var[x] = E[X^2] - E^2[X]\n \"\"\"\n cnt = 0\n fst_moment = torch.empty(3)\n snd_moment = torch.empty(3)\n\n for images, _ in loader:\n\n b, c, h, w = images.shape\n nb_pixels = b * h * w\n sum_ = torch.sum(images, dim=[0, 2, 3])\n sum_of_square = torch.sum(images ** 2, dim=[0, 2, 3])\n fst_moment = (cnt * fst_moment + sum_) / (cnt + nb_pixels)\n snd_moment = (cnt * snd_moment + sum_of_square) / (cnt + nb_pixels)\n\n cnt += nb_pixels\n\n return fst_moment, torch.sqrt(snd_moment - fst_moment ** 2)"
] |
[
[
"torch.sqrt",
"torch.sum",
"torch.utils.data.DataLoader",
"torch.empty"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
mloo3/stable-baselines3
|
[
"e908583e2a716f51e3f84004c7316205d4f0b230",
"e908583e2a716f51e3f84004c7316205d4f0b230"
] |
[
"stable_baselines3/common/noise.py",
"stable_baselines3/common/evaluation.py"
] |
[
"import copy\nfrom abc import ABC, abstractmethod\nfrom typing import Iterable, List, Optional\n\nimport numpy as np\n\n\nclass ActionNoise(ABC):\n \"\"\"\n The action noise base class\n \"\"\"\n\n def __init__(self):\n super(ActionNoise, self).__init__()\n\n def reset(self) -> None:\n \"\"\"\n call end of episode reset for the noise\n \"\"\"\n pass\n\n @abstractmethod\n def __call__(self) -> np.ndarray:\n raise NotImplementedError()\n\n\nclass NormalActionNoise(ActionNoise):\n \"\"\"\n A Gaussian action noise\n\n :param mean: (np.ndarray) the mean value of the noise\n :param sigma: (np.ndarray) the scale of the noise (std here)\n \"\"\"\n\n def __init__(self, mean: np.ndarray, sigma: np.ndarray):\n self._mu = mean\n self._sigma = sigma\n super(NormalActionNoise, self).__init__()\n\n def __call__(self) -> np.ndarray:\n return np.random.normal(self._mu, self._sigma)\n\n def __repr__(self) -> str:\n return f\"NormalActionNoise(mu={self._mu}, sigma={self._sigma})\"\n\n\nclass OrnsteinUhlenbeckActionNoise(ActionNoise):\n \"\"\"\n An Ornstein Uhlenbeck action noise, this is designed to approximate Brownian motion with friction.\n\n Based on http://math.stackexchange.com/questions/1287634/implementing-ornstein-uhlenbeck-in-matlab\n\n :param mean: (np.ndarray) the mean of the noise\n :param sigma: (np.ndarray) the scale of the noise\n :param theta: (float) the rate of mean reversion\n :param dt: (float) the timestep for the noise\n :param initial_noise: (Optional[np.ndarray]) the initial value for the noise output, (if None: 0)\n \"\"\"\n\n def __init__(\n self,\n mean: np.ndarray,\n sigma: np.ndarray,\n theta: float = 0.15,\n dt: float = 1e-2,\n initial_noise: Optional[np.ndarray] = None,\n ):\n self._theta = theta\n self._mu = mean\n self._sigma = sigma\n self._dt = dt\n self.initial_noise = initial_noise\n self.noise_prev = np.zeros_like(self._mu)\n self.reset()\n super(OrnsteinUhlenbeckActionNoise, self).__init__()\n\n def __call__(self) -> np.ndarray:\n noise = (\n self.noise_prev\n + self._theta * (self._mu - self.noise_prev) * self._dt\n + self._sigma * np.sqrt(self._dt) * np.random.normal(size=self._mu.shape)\n )\n self.noise_prev = noise\n return noise\n\n def reset(self) -> None:\n \"\"\"\n reset the Ornstein Uhlenbeck noise, to the initial position\n \"\"\"\n self.noise_prev = self.initial_noise if self.initial_noise is not None else np.zeros_like(self._mu)\n\n def __repr__(self) -> str:\n return f\"OrnsteinUhlenbeckActionNoise(mu={self._mu}, sigma={self._sigma})\"\n\n\nclass VectorizedActionNoise(ActionNoise):\n \"\"\"\n A Vectorized action noise for parallel environments.\n\n :param base_noise: ActionNoise The noise generator to use\n :param n_envs: (int) The number of parallel environments\n \"\"\"\n\n def __init__(self, base_noise: ActionNoise, n_envs: int):\n try:\n self.n_envs = int(n_envs)\n assert self.n_envs > 0\n except (TypeError, AssertionError):\n raise ValueError(f\"Expected n_envs={n_envs} to be positive integer greater than 0\")\n\n self.base_noise = base_noise\n self.noises = [copy.deepcopy(self.base_noise) for _ in range(n_envs)]\n\n def reset(self, indices: Optional[Iterable[int]] = None) -> None:\n \"\"\"\n Reset all the noise processes, or those listed in indices\n\n :param indices: Optional[Iterable[int]] The indices to reset. Default: None.\n If the parameter is None, then all processes are reset to their initial position.\n \"\"\"\n if indices is None:\n indices = range(len(self.noises))\n\n for index in indices:\n self.noises[index].reset()\n\n def __repr__(self) -> str:\n return f\"VecNoise(BaseNoise={repr(self.base_noise)}), n_envs={len(self.noises)})\"\n\n def __call__(self) -> np.ndarray:\n \"\"\"\n Generate and stack the action noise from each noise object\n \"\"\"\n noise = np.stack([noise() for noise in self.noises])\n return noise\n\n @property\n def base_noise(self) -> ActionNoise:\n return self._base_noise\n\n @base_noise.setter\n def base_noise(self, base_noise: ActionNoise):\n if base_noise is None:\n raise ValueError(\"Expected base_noise to be an instance of ActionNoise, not None\", ActionNoise)\n if not isinstance(base_noise, ActionNoise):\n raise TypeError(\"Expected base_noise to be an instance of type ActionNoise\", ActionNoise)\n self._base_noise = base_noise\n\n @property\n def noises(self) -> List[ActionNoise]:\n return self._noises\n\n @noises.setter\n def noises(self, noises: List[ActionNoise]) -> None:\n noises = list(noises) # raises TypeError if not iterable\n assert len(noises) == self.n_envs, f\"Expected a list of {self.n_envs} ActionNoises, found {len(noises)}.\"\n\n different_types = [i for i, noise in enumerate(noises) if not isinstance(noise, type(self.base_noise))]\n\n if len(different_types):\n raise ValueError(\n f\"Noise instances at indices {different_types} don't match the type of base_noise\", type(self.base_noise)\n )\n\n self._noises = noises\n for noise in noises:\n noise.reset()\n",
"import typing\nfrom typing import Callable, List, Optional, Tuple, Union\n\nimport gym\nimport numpy as np\n\nfrom stable_baselines3.common.vec_env import VecEnv\n\nif typing.TYPE_CHECKING:\n from stable_baselines3.common.base_class import BaseAlgorithm\n\n\ndef evaluate_policy(\n model: \"BaseAlgorithm\",\n env: Union[gym.Env, VecEnv],\n n_eval_episodes: int = 10,\n deterministic: bool = True,\n render: bool = False,\n callback: Optional[Callable] = None,\n reward_threshold: Optional[float] = None,\n return_episode_rewards: bool = False,\n) -> Union[Tuple[float, float], Tuple[List[float], List[int]]]:\n \"\"\"\n Runs policy for ``n_eval_episodes`` episodes and returns average reward.\n This is made to work only with one env.\n\n :param model: (BaseAlgorithm) The RL agent you want to evaluate.\n :param env: (gym.Env or VecEnv) The gym environment. In the case of a ``VecEnv``\n this must contain only one environment.\n :param n_eval_episodes: (int) Number of episode to evaluate the agent\n :param deterministic: (bool) Whether to use deterministic or stochastic actions\n :param render: (bool) Whether to render the environment or not\n :param callback: (callable) callback function to do additional checks,\n called after each step.\n :param reward_threshold: (float) Minimum expected reward per episode,\n this will raise an error if the performance is not met\n :param return_episode_rewards: (Optional[float]) If True, a list of reward per episode\n will be returned instead of the mean.\n :return: (float, float) Mean reward per episode, std of reward per episode\n returns ([float], [int]) when ``return_episode_rewards`` is True\n \"\"\"\n if isinstance(env, VecEnv):\n assert env.num_envs == 1, \"You must pass only one environment when using this function\"\n\n episode_rewards, episode_lengths = [], []\n for i in range(n_eval_episodes):\n # Avoid double reset, as VecEnv are reset automatically\n if not isinstance(env, VecEnv) or i == 0:\n obs = env.reset()\n done, state = False, None\n episode_reward = 0.0\n episode_length = 0\n while not done:\n action, state = model.predict(obs, state=state, deterministic=deterministic)\n obs, reward, done, _info = env.step(action)\n episode_reward += reward\n if callback is not None:\n callback(locals(), globals())\n episode_length += 1\n if render:\n env.render()\n episode_rewards.append(episode_reward)\n episode_lengths.append(episode_length)\n mean_reward = np.mean(episode_rewards)\n std_reward = np.std(episode_rewards)\n if reward_threshold is not None:\n assert mean_reward > reward_threshold, \"Mean reward below threshold: \" f\"{mean_reward:.2f} < {reward_threshold:.2f}\"\n if return_episode_rewards:\n return episode_rewards, episode_lengths\n return mean_reward, std_reward\n"
] |
[
[
"numpy.random.normal",
"numpy.zeros_like",
"numpy.sqrt"
],
[
"numpy.std",
"numpy.mean"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ikassi/menpo
|
[
"ca702fc814a1ad50b27c44c6544ba364d3aa7e31",
"ca702fc814a1ad50b27c44c6544ba364d3aa7e31",
"ca702fc814a1ad50b27c44c6544ba364d3aa7e31"
] |
[
"menpo/shape/pointcloud.py",
"menpo/interpolation/base.py",
"menpo/image/test/image_warp_test.py"
] |
[
"import numpy as np\nfrom scipy.spatial.distance import cdist\nfrom menpo.visualize import PointCloudViewer\nfrom menpo.shape.base import Shape\n\n\nclass PointCloud(Shape):\n r\"\"\"\n An N-dimensional point cloud. This is internally represented as an ndarray\n of shape (``n_points``, ``n_dims``). This class is important for dealing\n with complex functionality such as viewing and representing metadata such\n as landmarks.\n\n Currently only 2D and 3D pointclouds are viewable.\n\n Parameters\n ----------\n points : (N, D) ndarray\n A (``n_points``, ``n_dims``) ndarray representing the points.\n \"\"\"\n\n def __init__(self, points):\n super(PointCloud, self).__init__()\n self.points = np.array(points, copy=True, order='C')\n\n @property\n def n_points(self):\n r\"\"\"\n The number of points in the pointcloud.\n\n :type: int\n \"\"\"\n return self.points.shape[0]\n\n @property\n def n_dims(self):\n r\"\"\"\n The number of dimensions in the pointcloud.\n\n :type: int\n \"\"\"\n return self.points.shape[1]\n\n @property\n def centre(self):\n r\"\"\"\n The mean of all the points in this PointCloud (in the centre of mass\n sense)\n\n :type: (D,) ndarray\n The mean of this PointCloud's points.\n \"\"\"\n return np.mean(self.points, axis=0)\n\n @property\n def centre_of_bounds(self):\n r\"\"\"\n The centre of the absolute bounds of this PointCloud. Contrast with\n centre, which is the mean point position.\n\n :type: (D,) ndarray\n The centre of the bounds of this PointCloud.\n \"\"\"\n min_b, max_b = self.bounds()\n return (min_b + max_b) / 2\n\n def as_vector(self):\n r\"\"\"\n Returns a flattened representation of the pointcloud.\n Note that the flattened representation is of the form\n ``[x0, y0, x1, y1, ....., xn, yn]`` for 2D.\n\n Returns\n -------\n flattened : (N,) ndarray\n The flattened points.\n \"\"\"\n return self.points.flatten()\n\n def tojson(self):\n r\"\"\"\n Convert this `PointCloud` to a dictionary JSON representation.\n\n Returns\n -------\n dict with a 'points' key, the value of which is a list suitable\n for use in the by the `json` standard library package.\n \"\"\"\n return {'points': self.points.tolist()}\n\n def from_vector_inplace(self, vector):\n r\"\"\"\n Updates this PointCloud in-place with a new vector of parameters\n \"\"\"\n self.points = vector.reshape([-1, self.n_dims])\n\n def __str__(self):\n return '{}: n_points: {}, n_dims: {}'.format(type(self).__name__,\n self.n_points,\n self.n_dims)\n\n def bounds(self, boundary=0):\n r\"\"\"\n The minimum to maximum extent of the :class:`PointCloud`.\n An optional boundary argument can be provided to expand the bounds\n by a constant margin.\n\n Parameters\n ----------\n boundary: b float\n A optional padding distance that is added to the bounds. Default\n is zero, meaning the max/min of tightest possible containing\n square/cube/hypercube is returned.\n\n Returns\n --------\n min_b : (D,) ndarray\n The minimum extent of the :class:`PointCloud` and boundary along\n each dimension\n\n max_b : (D,) ndarray\n The maximum extent of the :class:`PointCloud` and boundary along\n each dimension\n \"\"\"\n min_b = np.min(self.points, axis=0) - boundary\n max_b = np.max(self.points, axis=0) + boundary\n return min_b, max_b\n\n def range(self, boundary=0):\n r\"\"\"\n The range of the extent of the :class:`PointCloud`.\n\n Parameters\n ----------\n boundary: b float\n A optional padding distance that is used to extend the bounds\n from which the range is computed. Default is zero, no extension\n is performed.\n\n Returns\n --------\n range : (D,) ndarray\n The range of the :class:`PointCloud`s extent in each dimension.\n \"\"\"\n min_b, max_b = self.bounds(boundary)\n return max_b - min_b\n\n def _view(self, figure_id=None, new_figure=False, **kwargs):\n return PointCloudViewer(figure_id, new_figure,\n self.points).render(**kwargs)\n\n def _transform_self_inplace(self, transform):\n self.points = transform(self.points)\n return self\n\n def distance_to(self, pointcloud, **kwargs):\n r\"\"\"\n Returns a distance matrix between this point cloud and another.\n By default the Euclidian distance is calculated - see\n ``scipy.spatial.distance.cdist`` for valid kwargs to change the metric\n and other properties.\n\n Parameters\n ----------\n pointcloud : :class:`PointCloud`\n The second pointcloud to compute distances between. This must be\n of the same dimension as this PointCloud.\n\n Returns\n -------\n distance_matrix: (N, M) ndarray\n The symmetric pairwise distance matrix between the two PointClouds\n s.t. distance_matrix[i, j] is the distance between the i'th\n point of this PointCloud and the j'th point of the input\n PointCloud.\n \"\"\"\n if self.n_dims != pointcloud.n_dims:\n raise ValueError(\"The two PointClouds must be of the same \"\n \"dimensionality.\")\n return cdist(self.points, pointcloud.points, **kwargs)\n\n def norm(self, **kwargs):\n r\"\"\"\n Returns the norm of this point cloud. This is a translation and\n rotation invariant measure of the point cloud's intrinsic size - in\n other words, it is always taken around the point cloud's centre.\n\n By default, the Frobenius norm is taken, but this can be changed by\n setting kwargs - see numpy.linalg.norm for valid options.\n\n Returns\n -------\n norm: float\n The norm of this :class:`PointCloud`\n \"\"\"\n return np.linalg.norm(self.points - self.centre, **kwargs)\n\n def from_mask(self, mask):\n \"\"\"\n A 1D boolean array with the same number of elements as the number of\n points in the pointcloud. This is then broadcast across the dimensions\n of the pointcloud and returns a new pointcloud containing only those\n points that were `True` in the mask.\n\n Parameters\n ----------\n mask : (N,) ndarray\n 1D array of booleans\n\n Returns\n -------\n pointcloud : :class:`PointCloud`\n A new pointcloud that has been masked.\n \"\"\"\n return PointCloud(self.points[mask, :])\n\n def update_from_mask(self, mask):\n \"\"\"\n A 1D boolean array with the same number of elements as the number of\n points in the pointcloud. This is then broadcast across the dimensions\n of the pointcloud. The same pointcloud is updated in place.\n\n Parameters\n ----------\n mask : (N,) ndarray\n 1D array of booleans\n\n Returns\n -------\n pointcloud : :class:`PointCloud`\n A pointer to self.\n \"\"\"\n self.points = self.points[mask, :]\n return self\n",
"import numpy as np\nfrom scipy.ndimage import map_coordinates\nfrom menpo.interpolation.cinterp import interp2\n\n\ndef c_interpolation(ndimage, points_to_sample, mode='bilinear'):\n r\"\"\"\n C-based interpolator that was designed to be identical when\n used in both Python and Matlab.\n\n Parameters\n ----------\n ndimage : (M, N, ..., C) ndarray\n The image that is to be sampled from. The final axis channels.\n points_to_sample: (K, n_points) ndarray\n The points which should be sampled from pixels\n mode : {'bilinear', 'bicubic', 'nearest'}, optional\n The type of interpolation to be carried out.\n\n Default: bilinear\n\n Returns\n -------\n sampled_image : ndarray\n The pixel information sampled at each of the points.\n \"\"\"\n return interp2(ndimage, points_to_sample[0, :], points_to_sample[1, :],\n mode=mode)\n\n\ndef scipy_interpolation(pixels, points_to_sample, mode='constant', order=1):\n r\"\"\"\n C-based interpolator that was designed to be identical when\n used in both Python and Matlab.\n\n Parameters\n ----------\n ndimage : (M, N, ..., C) ndarray\n The image that is to be sampled from. The final axis channels.\n points_to_sample: (K, n_points) ndarray\n The points which should be sampled from pixels\n mode : {'constant', 'nearest', 'reflect', 'wrap'}, optional\n Points outside the boundaries of the input are filled according to the\n given mode.\n\n Default: 'constant' (0)\n order : int, optional\n The order of the spline interpolation. The order has to be in the\n range 0-5.\n\n Default: 1\n\n Returns\n -------\n sampled_image : ndarray\n The pixel information sampled at each of the points.\n \"\"\"\n sampled_pixel_values = []\n # Loop over every channel in image - we know last axis is always channels\n for i in xrange(pixels.shape[-1]):\n sampled_pixel_values.append(map_coordinates(pixels[..., i],\n points_to_sample,\n mode=mode,\n order=order))\n sampled_pixel_values = [v.reshape([-1, 1]) for v in sampled_pixel_values]\n return np.concatenate(sampled_pixel_values, axis=1)\n",
"import numpy as np\nfrom numpy.testing import assert_allclose\nfrom menpo.transform import AffineTransform\nimport menpo.io as pio\n\n\n# Setup the static assets (the takeo image)\nrgb_image = pio.import_builtin_asset('takeo.ppm')\ngray_image = rgb_image.as_greyscale()\n\ngray_template = gray_image.cropped_copy(np.array([70, 30]),\n np.array([169, 129]))\nrgb_template = rgb_image.cropped_copy(np.array([70, 30]),\n np.array([169, 129]))\ntemplate_mask = gray_template.mask\n\ninitial_params = np.array([0, 0, 0, 0, 70, 30])\nrow_indices, col_indices = np.meshgrid(np.arange(50, 100), np.arange(50, 100),\n indexing='ij')\nrow_indices, col_indices = row_indices.flatten(), col_indices.flatten()\nmulti_expected = rgb_image.cropped_copy([50, 50],\n [100, 100]).pixels.flatten()\n\n\ndef test_scipy_warp_gray():\n target_transform = AffineTransform.identity(2).from_vector(initial_params)\n warped_im = gray_image.warp_to(template_mask, target_transform)\n\n assert(warped_im.shape == gray_template.shape)\n assert_allclose(warped_im.pixels, gray_template.pixels)\n\n\ndef test_scipy_warp_multi():\n target_transform = AffineTransform.identity(2).from_vector(initial_params)\n warped_im = rgb_image.warp_to(template_mask, target_transform)\n\n assert(warped_im.shape == rgb_template.shape)\n assert_allclose(warped_im.pixels, rgb_template.pixels)\n\n\ndef test_c_warp_gray():\n target_transform = AffineTransform.identity(2).from_vector(initial_params)\n warped_im = gray_image.warp_to(template_mask, target_transform,\n interpolator='c')\n\n assert(warped_im.shape == gray_template.shape)\n assert_allclose(warped_im.pixels, gray_template.pixels)\n\n\ndef test_cinterp2_warp_multi():\n target_transform = AffineTransform.identity(2).from_vector(initial_params)\n warped_im = rgb_image.warp_to(template_mask, target_transform,\n interpolator='scipy')\n assert(warped_im.shape == rgb_template.shape)\n assert_allclose(warped_im.pixels, rgb_template.pixels)\n\n\n## TODO: Not 100% on the best way to test this?\n#def test_cinterp2_warp_gray_warp_mask():\n# target_transform = AffineTransform.identity(2).from_vector(initial_params)\n# warped_im = cinterp2_warp(gray_image, gray_template, target_transform,\n# warp_mask=True)\n#\n# assert(warped_im.shape == gray_template.shape)\n# assert_allclose(warped_im.pixels, gray_template.pixels)\n\n## TODO: Not 100% on the best way to test this?\n#def test_scipy_warp_gray_warp_mask():\n# target_transform = AffineTransform.identity(2).from_vector(initial_params)\n# warped_im = gray_image.warp(template_mask, target_transform,\n# warp_mask=True)\n#\n# assert(warped_im.shape == gray_template.shape)\n# assert_allclose(warped_im.pixels, gray_template.pixels)\n"
] |
[
[
"numpy.min",
"scipy.spatial.distance.cdist",
"numpy.linalg.norm",
"numpy.max",
"numpy.mean",
"numpy.array"
],
[
"numpy.concatenate",
"scipy.ndimage.map_coordinates"
],
[
"numpy.arange",
"numpy.array",
"numpy.testing.assert_allclose"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"0.16",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"1.3",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
chalant/trading_calendars
|
[
"ab4e3ba34f12a7f21eed83976dcb6a358990bef7"
] |
[
"tests/test_xtse_calendar.py"
] |
[
"from unittest import TestCase\nimport pandas as pd\n\nfrom .test_trading_calendar import ExchangeCalendarTestBase\nfrom trading_calendars.exchange_calendar_xtse import XTSEExchangeCalendar\n\n\nclass XTSECalendarTestCase(ExchangeCalendarTestBase, TestCase):\n\n answer_key_filename = 'xtse'\n calendar_class = XTSEExchangeCalendar\n\n MAX_SESSION_HOURS = 6.5\n\n def test_2012(self):\n expected_holidays_2012 = [\n pd.Timestamp(\"2012-01-02\", tz='UTC'), # New Year's observed\n pd.Timestamp(\"2012-02-20\", tz='UTC'), # Family Day\n pd.Timestamp(\"2012-04-06\", tz='UTC'), # Good Friday\n pd.Timestamp(\"2012-05-21\", tz='UTC'), # Victoria Day\n pd.Timestamp(\"2012-07-02\", tz='UTC'), # Canada Day\n pd.Timestamp(\"2012-08-06\", tz='UTC'), # Civic Holiday\n pd.Timestamp(\"2012-09-03\", tz='UTC'), # Labour Day\n pd.Timestamp(\"2012-10-08\", tz='UTC'), # Thanksgiving\n pd.Timestamp(\"2012-12-25\", tz='UTC'), # Christmas\n pd.Timestamp(\"2012-12-26\", tz='UTC'), # Boxing Day\n ]\n\n for session_label in expected_holidays_2012:\n self.assertNotIn(session_label, self.calendar.all_sessions)\n\n # early closes we expect:\n early_closes_2012 = [\n pd.Timestamp(\"2012-12-24\", tz='UTC')\n ]\n\n for early_close_session_label in early_closes_2012:\n self.assertIn(early_close_session_label,\n self.calendar.early_closes)\n\n def test_special_holidays(self):\n # 9/11\n # Sept 11, 12, 2001\n self.assertNotIn(pd.Period(\"9/11/2001\"), self.calendar.all_sessions)\n self.assertNotIn(pd.Period(\"9/12/2001\"), self.calendar.all_sessions)\n\n def test_new_years(self):\n \"\"\"\n Check whether the TradingCalendar contains certain dates.\n \"\"\"\n # January 2012\n # Su Mo Tu We Th Fr Sa\n # 1 2 3 4 5 6 7\n # 8 9 10 11 12 13 14\n # 15 16 17 18 19 20 21\n # 22 23 24 25 26 27 28\n # 29 30 31\n\n start_session = pd.Timestamp(\"2012-01-02\", tz='UTC')\n end_session = pd.Timestamp(\"2013-12-31\", tz='UTC')\n sessions = self.calendar.sessions_in_range(start_session, end_session)\n\n day_after_new_years_sunday = pd.Timestamp(\"2012-01-02\", tz='UTC')\n self.assertNotIn(\n day_after_new_years_sunday,\n sessions,\n \"If NYE falls on a weekend, {0} the Monday after is a holiday.\"\n .format(day_after_new_years_sunday)\n )\n\n first_trading_day_after_new_years_sunday = pd.Timestamp(\"2012-01-03\",\n tz='UTC')\n self.assertIn(\n first_trading_day_after_new_years_sunday,\n sessions,\n \"If NYE falls on a weekend, {0} the Tuesday after is the \"\n \"first trading day.\".format(\n first_trading_day_after_new_years_sunday\n )\n )\n\n # January 2013\n # Su Mo Tu We Th Fr Sa\n # 1 2 3 4 5\n # 6 7 8 9 10 11 12\n # 13 14 15 16 17 18 19\n # 20 21 22 23 24 25 26\n # 27 28 29 30 31\n\n new_years_day = pd.Timestamp(\"2013-01-01\", tz='UTC')\n self.assertNotIn(\n new_years_day,\n sessions,\n \"If NYE falls during the week, e.g. {0}, it is a holiday.\"\n .format(new_years_day)\n )\n\n first_trading_day_after_new_years = pd.Timestamp(\"2013-01-02\",\n tz='UTC')\n self.assertIn(\n first_trading_day_after_new_years,\n sessions,\n \"If the day after NYE falls during the week, {0} is the first \"\n \"trading day.\".format(first_trading_day_after_new_years)\n )\n\n def test_christmas_eve_half_day(self):\n # December 2009\n # Su Mo Tu We Th Fr Sa\n # 1 2 3 4 5\n # 6 7 8 9 10 11 12\n # 13 14 15 16 17 18 19\n # 20 21 22 23 24 25 26\n # 27 28 29 30 31\n\n # Christmas eve fell on a weekday in 2009 and 2010, but\n # it is only a half day from 2010 onwards\n christmas_eve09 = pd.Timestamp('2009-12-24')\n christmas_eve09_close = self.calendar.next_close(christmas_eve09)\n self.assertEqual(\n christmas_eve09_close.tz_convert('Canada/Atlantic'),\n pd.Timestamp('2009-12-24 4:00 PM', tz='Canada/Atlantic')\n )\n\n christmas_eve10 = pd.Timestamp('2010-12-24')\n christmas_eve10_close = self.calendar.next_close(christmas_eve10)\n self.assertEqual(\n christmas_eve10_close.tz_convert('Canada/Atlantic'),\n pd.Timestamp('2010-12-24 1:00 PM', tz='Canada/Atlantic')\n )\n\n # December 2012\n # Su Mo Tu We Th Fr Sa\n # 1\n # 2 3 4 5 6 7 8\n # 9 10 11 12 13 14 15\n # 16 17 18 19 20 21 22\n # 23 24 25 26 27 28 29\n # 30 31\n\n # In 2012, 2013, 2014, and 2015, Christmas eve fell on a Monday,\n # Tuesday, Wednesday, and Thursday respectively, so it should\n # be a half day on all of those days\n for year in ['2012', '2013', '2014', '2015']:\n christmas_eve = pd.Timestamp('{}-12-24'.format(year))\n christmas_eve_close = self.calendar.next_close(christmas_eve)\n\n self.assertEqual(\n christmas_eve_close.tz_convert(\"Canada/Atlantic\"),\n pd.Timestamp('{}-12-24 1:00 PM'.format(year),\n tz=\"Canada/Atlantic\"),\n )\n\n def test_christmas(self):\n # December 2015\n # Su Mo Tu We Th Fr Sa\n # 1 2 3 4 5\n # 6 7 8 9 10 11 12\n # 13 14 15 16 17 18 19\n # 20 21 22 23 24 25 26\n # 27 28 29 30 31\n\n # In 2015 Christmas fell on a Friday so Boxing Day should\n # be celebrated the following Monday\n christmas = pd.Timestamp('2015-12-25', tz='UTC')\n boxing_day_observed = pd.Timestamp('2015-12-28', tz='UTC')\n\n self.assertNotIn(christmas, self.calendar.all_sessions)\n self.assertNotIn(boxing_day_observed, self.calendar.all_sessions)\n\n # December 2010\n # Su Mo Tu We Th Fr Sa\n # 1 2 3 4\n # 5 6 7 8 9 10 11\n # 12 13 14 15 16 17 18\n # 19 20 21 22 23 24 25\n # 26 27 28 29 30 31\n\n # Christmas fell on a Saturday in 2010, so the following two trading\n # days should be holidays\n christmas_observed = pd.Timestamp('2016-12-26', tz='UTC')\n boxing_day_observed = pd.Timestamp('2016-12-27', tz='UTC')\n\n self.assertNotIn(christmas, self.calendar.all_sessions)\n self.assertNotIn(boxing_day_observed, self.calendar.all_sessions)\n\n # December 2016\n # Su Mo Tu We Th Fr Sa\n # 1 2 3\n # 4 5 6 7 8 9 10\n # 11 12 13 14 15 16 17\n # 18 19 20 21 22 23 24\n # 25 26 27 28 29 30 31\n\n # Christmas fell on a Sunday in 2016, so the 26th and 27th should\n # be holidays\n christmas_observed = pd.Timestamp('2016-12-26', tz='UTC')\n boxing_day_observed = pd.Timestamp('2016-12-27', tz='UTC')\n\n self.assertNotIn(christmas_observed, self.calendar.all_sessions)\n self.assertNotIn(boxing_day_observed, self.calendar.all_sessions)\n\n def test_victoria_day(self):\n # May 2015\n # Su Mo Tu We Th Fr Sa\n # 1 2\n # 3 4 5 6 7 8 9\n # 10 11 12 13 14 15 16\n # 17 18 19 20 21 22 23\n # 24 25 26 27 28 29 30\n # 31\n\n # Victoria Day is never held on Monday 5/25...\n self.assertIn(\n pd.Timestamp('2015-05-25'),\n self.calendar.all_sessions,\n )\n\n # ...but on the Monday preceding 5/25.\n self.assertNotIn(\n pd.Timestamp('2015-05-18'),\n self.calendar.all_sessions,\n )\n"
] |
[
[
"pandas.Timestamp",
"pandas.Period"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
josephw-ml/model-analysis
|
[
"3105200fd39aa46e4c4d83aa460d92aa08a4b784",
"3105200fd39aa46e4c4d83aa460d92aa08a4b784",
"3105200fd39aa46e4c4d83aa460d92aa08a4b784"
] |
[
"tensorflow_model_analysis/eval_saved_model/encoding.py",
"tensorflow_model_analysis/metrics/multi_class_confusion_matrix_metrics.py",
"tensorflow_model_analysis/api/tfma_unit_test.py"
] |
[
"# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Library for encoding and decoding keys, Tensors, etc in EvalSavedModel.\"\"\"\n\nimport tensorflow as tf\nfrom tensorflow_model_analysis import types\n\nfrom google.protobuf import any_pb2\nfrom tensorflow.core.protobuf import meta_graph_pb2\n\n# Names for the various collections\nTFMA_VERSION_COLLECTION = 'evaluation_only/metadata/tfma_version'\nMETRICS_COLLECTION = 'evaluation_only/metrics'\nPREDICTIONS_COLLECTION = 'evaluation_only/predictions'\nINPUT_EXAMPLE_COLLECTION = 'evaluation_only/label_graph/input_example'\nLABELS_COLLECTION = 'evaluation_only/label_graph/labels'\nFEATURES_COLLECTION = 'evaluation_only/label_graph/features'\nEXAMPLE_REF_COLLECTION = 'evaluation_only/label_graph/example_ref'\n\n# Suffixes for the collection names\nKEY_SUFFIX = 'key'\nNODE_SUFFIX = 'node'\nVALUE_OP_SUFFIX = 'value_op'\nUPDATE_OP_SUFFIX = 'update_op'\n\n# Encoding prefixes for keys\n_TUPLE_KEY_PREFIX = b'$Tuple$'\n_BYTES_KEY_PREFIX = b'$Bytes$'\n\n\ndef with_suffix(name: str, suffix: str) -> str:\n return '%s/%s' % (name, suffix) # pytype: disable=bad-return-type\n\n\ndef encode_key(key: types.FPLKeyType) -> bytes:\n \"\"\"Encode a dictionary key as a string.\n\n For encoding dictionary keys in the prediction, label and feature\n dictionaries. We assume that they are either Tuples of bytes, or bytes.\n\n Implementation details:\n Strings are encoded as $Bytes$<String>\n Tuples of strings are encoded as:\n $Tuple$<len(tuple)>$len(tuple[0])$...$len(tuple[n])$tuple[0]$...$tuple[n]\n e.g. ('apple', 'banana', 'cherry') gets encoded as\n $Tuple$3$5$6$6$apple$banana$cherry\n\n Args:\n key: Dictionary key to encode.\n\n Returns:\n Encoded dictionary key.\n\n Raises:\n TypeError: Dictionary key is not either a Tuple of bytes/unicode,\n or bytes/unicode.\n \"\"\"\n\n if isinstance(key, tuple):\n if not all(isinstance(elem, (bytes, str)) for elem in key):\n raise TypeError('if key is tuple, all elements should be strings. '\n 'key was: %s' % key)\n utf8_keys = [tf.compat.as_bytes(elem) for elem in key]\n length_strs = [tf.compat.as_bytes('%d' % len(key)) for key in utf8_keys]\n return (_TUPLE_KEY_PREFIX + tf.compat.as_bytes('%d' % len(length_strs)) +\n b'$' + b'$'.join(length_strs) + b'$' + b'$'.join(utf8_keys))\n elif isinstance(key, (bytes, str)):\n return b'$Bytes$' + tf.compat.as_bytes(key)\n else:\n raise TypeError('key has unrecognised type: type: %s, value %s' %\n (type(key), key))\n\n\ndef decode_key(encoded_key: bytes) -> types.FPLKeyType:\n \"\"\"Decode an encoded dictionary key encoded with encode_key.\n\n Args:\n encoded_key: Dictionary key, encoded with encode_key.\n\n Returns:\n Decoded dictionary key.\n\n Raises:\n ValueError: We couldn't decode the encoded key.\n \"\"\"\n if encoded_key.startswith(_TUPLE_KEY_PREFIX):\n parts = encoded_key[len(_TUPLE_KEY_PREFIX):].split(b'$', 1)\n if len(parts) != 2:\n raise ValueError('invalid encoding: %s' % encoded_key)\n elem_count = int(parts[0])\n parts = parts[1].split(b'$', elem_count)\n if len(parts) != elem_count + 1:\n raise ValueError('invalid encoding: %s' % encoded_key)\n lengths = map(int, parts[:elem_count])\n parts = parts[elem_count]\n elems = []\n for length in lengths:\n elems.append(parts[:length].decode('utf8'))\n parts = parts[length + 1:] # Add one for the $ delimiter\n return tuple(elems)\n elif encoded_key.startswith(_BYTES_KEY_PREFIX):\n return encoded_key[len(_BYTES_KEY_PREFIX):].decode('utf8')\n else:\n raise ValueError('invalid encoding: %s' % encoded_key)\n\n\ndef encode_tensor_node(node: types.TensorType) -> any_pb2.Any:\n \"\"\"Encode a \"reference\" to a Tensor/SparseTensor as a TensorInfo in an Any.\n\n We put the Tensor / SparseTensor in a TensorInfo, which we then wrap in an\n Any so that it can be added to the CollectionDef.\n\n Args:\n node: Tensor node.\n\n Returns:\n Any proto wrapping a TensorInfo.\n \"\"\"\n any_buf = any_pb2.Any()\n tensor_info = tf.compat.v1.saved_model.utils.build_tensor_info(node)\n any_buf.Pack(tensor_info)\n return any_buf\n\n\ndef decode_tensor_node(graph: tf.Graph,\n encoded_tensor_node: any_pb2.Any) -> types.TensorType:\n \"\"\"Decode an encoded Tensor node encoded with encode_tensor_node.\n\n Decodes the encoded Tensor \"reference\", and returns the node in the given\n graph corresponding to that Tensor.\n\n Args:\n graph: Graph the Tensor\n encoded_tensor_node: Encoded Tensor.\n\n Returns:\n Decoded Tensor.\n \"\"\"\n tensor_info = meta_graph_pb2.TensorInfo()\n encoded_tensor_node.Unpack(tensor_info)\n return tf.compat.v1.saved_model.utils.get_tensor_from_tensor_info(\n tensor_info, graph)\n",
"# Copyright 2020 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Multi-class confusion matrix metrics at thresholds.\"\"\"\n\nfrom typing import Callable, Dict, Iterable, List, Optional, NamedTuple\n\nimport apache_beam as beam\nimport numpy as np\nfrom tensorflow_model_analysis import types\nfrom tensorflow_model_analysis.metrics import metric_types\nfrom tensorflow_model_analysis.metrics import metric_util\nfrom tensorflow_model_analysis.proto import config_pb2\nfrom tensorflow_model_analysis.proto import metrics_for_slice_pb2\n\nMULTI_CLASS_CONFUSION_MATRIX_AT_THRESHOLDS_NAME = (\n 'multi_class_confusion_matrix_at_thresholds')\n\n\nclass MultiClassConfusionMatrixAtThresholds(metric_types.Metric):\n \"\"\"Multi-class confusion matrix metrics at thresholds.\n\n Computes weighted example counts for all combinations of actual / (top)\n predicted classes.\n\n The inputs are assumed to contain a single positive label per example (i.e.\n only one class can be true at a time) while the predictions are assumed to sum\n to 1.0.\n \"\"\"\n\n def __init__(self,\n thresholds: Optional[List[float]] = None,\n name: str = MULTI_CLASS_CONFUSION_MATRIX_AT_THRESHOLDS_NAME):\n \"\"\"Initializes multi-class confusion matrix.\n\n Args:\n thresholds: Optional thresholds, defaults to 0.5 if not specified. If the\n top prediction is less than a threshold then the associated example will\n be assumed to have no prediction associated with it (the\n predicted_class_id will be set to NO_PREDICTED_CLASS_ID).\n name: Metric name.\n \"\"\"\n super().__init__(\n metric_util.merge_per_key_computations(\n _multi_class_confusion_matrix_at_thresholds),\n thresholds=thresholds,\n name=name) # pytype: disable=wrong-arg-types\n\n\nmetric_types.register_metric(MultiClassConfusionMatrixAtThresholds)\n\n\ndef _multi_class_confusion_matrix_at_thresholds(\n thresholds: Optional[List[float]] = None,\n name: str = MULTI_CLASS_CONFUSION_MATRIX_AT_THRESHOLDS_NAME,\n eval_config: Optional[config_pb2.EvalConfig] = None,\n model_name: str = '',\n output_name: str = '',\n example_weighted: bool = False) -> metric_types.MetricComputations:\n \"\"\"Returns computations for multi-class confusion matrix at thresholds.\"\"\"\n if not thresholds:\n thresholds = [0.5]\n\n key = metric_types.MetricKey(\n name=name,\n model_name=model_name,\n output_name=output_name,\n example_weighted=example_weighted)\n\n # Make sure matrices are calculated.\n matrices_computations = multi_class_confusion_matrices(\n thresholds=thresholds,\n eval_config=eval_config,\n model_name=model_name,\n output_name=output_name,\n example_weighted=example_weighted)\n matrices_key = matrices_computations[-1].keys[-1]\n\n def result(\n metrics: Dict[metric_types.MetricKey,\n metrics_for_slice_pb2.MultiClassConfusionMatrixAtThresholds]\n ) -> Dict[metric_types.MetricKey,\n metrics_for_slice_pb2.MultiClassConfusionMatrixAtThresholds]:\n return {key: metrics[matrices_key]}\n\n derived_computation = metric_types.DerivedMetricComputation(\n keys=[key], result=result)\n computations = matrices_computations\n computations.append(derived_computation)\n return computations\n\n\nMULTI_CLASS_CONFUSION_MATRICES = '_multi_class_confusion_matrices'\n\n_EPSILON = 1e-7\n\n# Class ID used when no prediction was made because a threshold was given and\n# the top prediction was less than the threshold.\nNO_PREDICTED_CLASS_ID = -1\n\n\ndef multi_class_confusion_matrices(\n thresholds: Optional[List[float]] = None,\n num_thresholds: Optional[int] = None,\n name: str = MULTI_CLASS_CONFUSION_MATRICES,\n eval_config: Optional[config_pb2.EvalConfig] = None,\n model_name: str = '',\n output_name: str = '',\n example_weighted: bool = False) -> metric_types.MetricComputations:\n \"\"\"Returns computations for multi-class confusion matrices.\n\n Args:\n thresholds: A specific set of thresholds to use. The caller is responsible\n for marking the bondaires with +/-epsilon if desired. Only one of\n num_thresholds or thresholds should be used.\n num_thresholds: Number of thresholds to use. Thresholds will be calculated\n using linear interpolation between 0.0 and 1.0 with equidistant values and\n bondardaries at -epsilon and 1.0+epsilon. Values must be > 0. Only one of\n num_thresholds or thresholds should be used.\n name: Metric name.\n eval_config: Eval config.\n model_name: Optional model name (if multi-model evaluation).\n output_name: Optional output name (if multi-output model type).\n example_weighted: True if example weights should be applied.\n\n Raises:\n ValueError: If both num_thresholds and thresholds are set at the same time.\n \"\"\"\n if num_thresholds is not None and thresholds is not None:\n raise ValueError(\n 'only one of thresholds or num_thresholds can be set at a time')\n if num_thresholds is None and thresholds is None:\n thresholds = [0.0]\n if num_thresholds is not None:\n thresholds = [\n (i + 1) * 1.0 / (num_thresholds - 1) for i in range(num_thresholds - 2)\n ]\n thresholds = [-_EPSILON] + thresholds + [1.0 + _EPSILON]\n\n key = metric_types.MetricKey(\n name=name,\n model_name=model_name,\n output_name=output_name,\n example_weighted=example_weighted)\n return [\n metric_types.MetricComputation(\n keys=[key],\n preprocessor=None,\n combiner=_MultiClassConfusionMatrixCombiner(\n key=key,\n eval_config=eval_config,\n example_weighted=example_weighted,\n thresholds=thresholds))\n ]\n\n\nMatrixEntryKey = NamedTuple('MatrixEntryKey', [('actual_class_id', int),\n ('predicted_class_id', int)])\n\n\nclass Matrices(types.StructuredMetricValue, dict):\n \"\"\"A Matrices object wraps a Dict[float, Dict[MatrixEntryKey, float]].\n\n A specific confusion matrix entry can be accessed for a threshold,\n actual_class and predicted_class with\n\n instance[threshold][MatrixEntryKey(actual_class_id, predicted_class_id)]\n \"\"\"\n\n def _apply_binary_op_elementwise(\n self, other: 'Matrices', op: Callable[[float, float],\n float]) -> 'Matrices':\n result = Matrices()\n all_thresholds = set(self.keys()).union(other.keys())\n for threshold in all_thresholds:\n self_entries = self.get(threshold, {})\n other_entries = other.get(threshold, {})\n result[threshold] = {}\n all_entry_keys = set(self_entries.keys()).union(set(other_entries.keys()))\n for entry_key in all_entry_keys:\n self_count = self_entries.get(entry_key, 0)\n other_count = other_entries.get(entry_key, 0)\n result[threshold][entry_key] = op(self_count, other_count)\n return result\n\n def _apply_binary_op_broadcast(\n self, other: float, op: Callable[[float, float], float]) -> 'Matrices':\n result = Matrices()\n for threshold, self_entries in self.items():\n result[threshold] = {}\n for entry_key, self_count in self_entries.items():\n result[threshold][entry_key] = op(self_count, other)\n return result\n\n def to_proto(self) -> metrics_for_slice_pb2.MetricValue:\n result = metrics_for_slice_pb2.MetricValue()\n multi_class_confusion_matrices_at_thresholds_proto = (\n result.multi_class_confusion_matrix_at_thresholds)\n for threshold in sorted(self.keys()):\n # Convert -epsilon and 1.0+epsilon back to 0.0 and 1.0.\n if threshold == -_EPSILON:\n t = 0.0\n elif threshold == 1.0 + _EPSILON:\n t = 1.0\n else:\n t = threshold\n matrix = multi_class_confusion_matrices_at_thresholds_proto.matrices.add(\n threshold=t)\n for k in sorted(self[threshold].keys()):\n matrix.entries.add(\n actual_class_id=k.actual_class_id,\n predicted_class_id=k.predicted_class_id,\n num_weighted_examples=self[threshold][k])\n return result\n\n\nclass _MultiClassConfusionMatrixCombiner(beam.CombineFn):\n \"\"\"Creates multi-class confusion matrix at thresholds from standard inputs.\"\"\"\n\n def __init__(self, key: metric_types.MetricKey,\n eval_config: Optional[config_pb2.EvalConfig],\n example_weighted: bool, thresholds: List[float]):\n self._key = key\n self._eval_config = eval_config\n self._example_weighted = example_weighted\n self._thresholds = thresholds or [0.0]\n\n def create_accumulator(self) -> Matrices:\n return Matrices()\n\n def add_input(self, accumulator: Matrices,\n element: metric_types.StandardMetricInputs) -> Matrices:\n label, predictions, example_weight = next(\n metric_util.to_label_prediction_example_weight(\n element,\n eval_config=self._eval_config,\n model_name=self._key.model_name,\n output_name=self._key.output_name,\n example_weighted=self._example_weighted,\n flatten=False,\n require_single_example_weight=True)) # pytype: disable=wrong-arg-types\n if not label.shape:\n raise ValueError(\n 'Label missing from example: StandardMetricInputs={}'.format(element))\n if predictions.shape in ((), (1,)):\n raise ValueError(\n 'Predictions shape must be > 1 for multi-class confusion matrix: '\n 'shape={}, StandardMetricInputs={}'.format(predictions.shape,\n element))\n actual_class_id = np.argmax(label) if label.size > 1 else int(label)\n predicted_class_id = np.argmax(predictions)\n example_weight = float(example_weight)\n for threshold in self._thresholds:\n if threshold not in accumulator:\n accumulator[threshold] = {}\n if predictions[predicted_class_id] <= threshold:\n predicted_class_id = NO_PREDICTED_CLASS_ID\n matrix_key = MatrixEntryKey(actual_class_id, predicted_class_id)\n if matrix_key in accumulator[threshold]:\n accumulator[threshold][matrix_key] += example_weight\n else:\n accumulator[threshold][matrix_key] = example_weight\n return accumulator\n\n def merge_accumulators(self, accumulators: Iterable[Matrices]) -> Matrices:\n accumulators = iter(accumulators)\n result = next(accumulators)\n for accumulator in accumulators:\n for threshold, matrix in accumulator.items():\n if threshold not in result:\n result[threshold] = {}\n for k, v in matrix.items():\n if k in result[threshold]:\n result[threshold][k] += v\n else:\n result[threshold][k] = v\n return result\n\n def extract_output(\n self, accumulator: Matrices) -> Dict[metric_types.MetricKey, Matrices]:\n return {self._key: accumulator}\n",
"# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Test for using the tfma_unit library.\"\"\"\n\nimport os\n\nimport apache_beam as beam\nimport tensorflow as tf\nfrom tensorflow_model_analysis.api import tfma_unit\nfrom tensorflow_model_analysis.eval_saved_model.example_trainers import fixed_prediction_estimator\nfrom tensorflow_model_analysis.eval_saved_model.example_trainers import fixed_prediction_estimator_extra_fields\nfrom tensorflow_model_analysis.post_export_metrics import metric_keys\nfrom tensorflow_model_analysis.post_export_metrics import post_export_metrics\nfrom tensorflow_model_analysis.slicer import slicer_lib as slicer\n\n\nclass TFMAUnitTest(tfma_unit.TestCase):\n\n def _getEvalExportDir(self):\n return os.path.join(self._getTempDir(), 'eval_export_dir')\n\n def testAssertMetricsComputedWithoutBeamAre(self):\n temp_eval_export_dir = self._getEvalExportDir()\n _, eval_export_dir = (\n fixed_prediction_estimator.simple_fixed_prediction_estimator(\n None, temp_eval_export_dir))\n examples = [\n self.makeExample(prediction=0.0, label=1.0),\n self.makeExample(prediction=0.7, label=0.0),\n self.makeExample(prediction=0.8, label=1.0),\n self.makeExample(prediction=1.0, label=1.0)\n ]\n self.assertMetricsComputedWithoutBeamAre(\n eval_saved_model_path=eval_export_dir,\n serialized_examples=examples,\n expected_metrics={'average_loss': (1.0 + 0.49 + 0.04 + 0.00) / 4.0})\n\n def testBoundedValueChecks(self):\n temp_eval_export_dir = self._getEvalExportDir()\n _, eval_export_dir = (\n fixed_prediction_estimator.simple_fixed_prediction_estimator(\n None, temp_eval_export_dir))\n examples = [\n self.makeExample(prediction=0.8, label=1.0),\n ]\n\n self.assertMetricsComputedWithBeamAre(\n eval_saved_model_path=eval_export_dir,\n serialized_examples=examples,\n expected_metrics={'average_loss': 0.04})\n\n self.assertMetricsComputedWithoutBeamAre(\n eval_saved_model_path=eval_export_dir,\n serialized_examples=examples,\n expected_metrics={\n 'average_loss':\n tfma_unit.BoundedValue(lower_bound=0.03, upper_bound=0.05)\n })\n\n with self.assertRaisesRegex(\n AssertionError, 'expecting key average_loss to have value between'):\n self.assertMetricsComputedWithoutBeamAre(\n eval_saved_model_path=eval_export_dir,\n serialized_examples=examples,\n expected_metrics={\n 'average_loss': tfma_unit.BoundedValue(upper_bound=0.01)\n })\n\n with self.assertRaisesRegex(\n AssertionError, 'expecting key average_loss to have value between'):\n self.assertMetricsComputedWithoutBeamAre(\n eval_saved_model_path=eval_export_dir,\n serialized_examples=examples,\n expected_metrics={\n 'average_loss': tfma_unit.BoundedValue(lower_bound=0.10)\n })\n\n def testAssertMetricsComputedWithBeamAre(self):\n temp_eval_export_dir = self._getEvalExportDir()\n _, eval_export_dir = (\n fixed_prediction_estimator.simple_fixed_prediction_estimator(\n None, temp_eval_export_dir))\n examples = [\n self.makeExample(prediction=0.0, label=1.0),\n self.makeExample(prediction=0.7, label=0.0),\n self.makeExample(prediction=0.8, label=1.0),\n self.makeExample(prediction=1.0, label=1.0)\n ]\n self.assertMetricsComputedWithBeamAre(\n eval_saved_model_path=eval_export_dir,\n serialized_examples=examples,\n expected_metrics={'average_loss': (1.0 + 0.49 + 0.04 + 0.00) / 4.0})\n\n def testAssertGeneralMetricsComputedWithBeamAre(self):\n temp_eval_export_dir = self._getEvalExportDir()\n _, eval_export_dir = (\n fixed_prediction_estimator_extra_fields\n .simple_fixed_prediction_estimator_extra_fields(None,\n temp_eval_export_dir))\n examples = [\n self.makeExample(\n prediction=0.0,\n label=0.0,\n fixed_string='negative_slice',\n fixed_float=0.0,\n fixed_int=0),\n self.makeExample(\n prediction=0.2,\n label=0.0,\n fixed_string='negative_slice',\n fixed_float=0.0,\n fixed_int=0),\n self.makeExample(\n prediction=0.4,\n label=0.0,\n fixed_string='negative_slice',\n fixed_float=0.0,\n fixed_int=0),\n self.makeExample(\n prediction=0.8,\n label=1.0,\n fixed_string='positive_slice',\n fixed_float=0.0,\n fixed_int=0),\n self.makeExample(\n prediction=0.9,\n label=1.0,\n fixed_string='positive_slice',\n fixed_float=0.0,\n fixed_int=0),\n self.makeExample(\n prediction=1.0,\n label=1.0,\n fixed_string='positive_slice',\n fixed_float=0.0,\n fixed_int=0),\n ]\n expected_slice_metrics = {}\n expected_slice_metrics[()] = {\n 'average_loss': (0.00 + 0.04 + 0.16 + 0.04 + 0.01 + 0.00) / 6.0,\n 'mae':\n 0.15,\n # Note that we don't check the exact value because of numerical errors.\n metric_keys.AUC:\n tfma_unit.BoundedValue(0.98, 1.00),\n }\n # We don't check AUC for the positive / negative only slices because\n # it's not clear what the value should be.\n expected_slice_metrics[(('fixed_string', 'negative_slice'),)] = {\n 'average_loss': (0.00 + 0.04 + 0.16) / 3.0,\n 'mae': 0.2,\n }\n expected_slice_metrics[(('fixed_string', 'positive_slice'),)] = {\n 'average_loss': (0.04 + 0.01 + 0.00) / 3.0,\n 'mae': 0.1,\n }\n\n def add_metrics(features, predictions, labels):\n del features\n return {\n 'mae': tf.compat.v1.metrics.mean_absolute_error(labels, predictions),\n }\n\n with beam.Pipeline() as pipeline:\n examples_pcollection = pipeline | 'Create' >> beam.Create(examples)\n self.assertGeneralMetricsComputedWithBeamAre(\n eval_saved_model_path=eval_export_dir,\n examples_pcollection=examples_pcollection,\n slice_spec=[\n slicer.SingleSliceSpec(),\n slicer.SingleSliceSpec(columns=['fixed_string'])\n ],\n add_metrics_callbacks=[add_metrics,\n post_export_metrics.auc()],\n expected_slice_metrics=expected_slice_metrics)\n\n\nif __name__ == '__main__':\n tf.test.main()\n"
] |
[
[
"tensorflow.compat.as_bytes",
"tensorflow.compat.v1.saved_model.utils.get_tensor_from_tensor_info",
"tensorflow.core.protobuf.meta_graph_pb2.TensorInfo",
"tensorflow.compat.v1.saved_model.utils.build_tensor_info"
],
[
"numpy.argmax"
],
[
"tensorflow.compat.v1.metrics.mean_absolute_error",
"tensorflow.test.main"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
mrsaicharan1/SecTra
|
[
"2798d9364f306d7780fe6c7e59958b2c4da93dbc"
] |
[
"object_detect.py"
] |
[
"\n# coding: utf-8\n\n# # Object Detection Demo\n# Welcome to the object detection inference walkthrough! This notebook will walk you step by step through the process of using a pre-trained model to detect objects in an image. Make sure to follow the [installation instructions](https://github.com/tensorflow/models/blob/master/research/object_detection/g3doc/installation.md) before you start.\n\n# # Imports\n\n# In[1]:\n\n\nimport numpy as np\nimport os\nimport six.moves.urllib as urllib\nimport sys\nimport tarfile\nimport tensorflow as tf\nimport zipfile\nimport cv2\n\nfrom distutils.version import StrictVersion\nfrom collections import defaultdict\nfrom io import StringIO\nfrom matplotlib import pyplot as plt\nfrom PIL import Image\n\n# This is needed since the notebook is stored in the object_detection folder.\nsys.path.append(\"..\")\nfrom object_detection.utils import ops as utils_ops\n\nif StrictVersion(tf.__version__) < StrictVersion('1.9.0'):\n raise ImportError('Please upgrade your TensorFlow installation to v1.9.* or later!')\n\n\n# ## Env setup\n\n# In[2]:\n\n\n# This is needed to display the images.\nget_ipython().run_line_magic('matplotlib', 'inline')\n\n\n# ## Object detection imports\n# Here are the imports from the object detection module.\n\n# In[3]:\n\n\nfrom utils import label_map_util\n\nfrom utils import visualization_utils as vis_util\n\n\n# # Model preparation \n\n# ## Variables\n# \n# Any model exported using the `export_inference_graph.py` tool can be loaded here simply by changing `PATH_TO_FROZEN_GRAPH` to point to a new .pb file. \n# \n# By default we use an \"SSD with Mobilenet\" model here. See the [detection model zoo](https://github.com/tensorflow/models/blob/master/research/object_detection/g3doc/detection_model_zoo.md) for a list of other models that can be run out-of-the-box with varying speeds and accuracies.\n\n# In[ ]:\n\n\n# What model to download.\nMODEL_NAME = 'ssd_mobilenet_v1_coco_2017_11_17'\nMODEL_FILE = MODEL_NAME + '.tar.gz'\nDOWNLOAD_BASE = 'http://download.tensorflow.org/models/object_detection/'\n\n# Path to frozen detection graph. This is the actual model that is used for the object detection.\nPATH_TO_FROZEN_GRAPH = MODEL_NAME + '/frozen_inference_graph.pb'\n\n# List of the strings that is used to add correct label for each box.\nPATH_TO_LABELS = os.path.join('data', 'mscoco_label_map.pbtxt')\n\n\n# ## Download Model\n\n# In[ ]:\nclass objectclass:\n def __init__(self, _id, name):\n self.id = _id\n self.name = name\n\nopener = urllib.request.URLopener()\nopener.retrieve(DOWNLOAD_BASE + MODEL_FILE, MODEL_FILE)\ntar_file = tarfile.open(MODEL_FILE)\nfor file in tar_file.getmembers():\n file_name = os.path.basename(file.name)\n if 'frozen_inference_graph.pb' in file_name:\n tar_file.extract(file, os.getcwd())\n\n\n# ## Load a (frozen) Tensorflow model into memory.\n\n# In[ ]:\n\n\ndetection_graph = tf.Graph()\nwith detection_graph.as_default():\n od_graph_def = tf.GraphDef()\n with tf.gfile.GFile(PATH_TO_FROZEN_GRAPH, 'rb') as fid:\n serialized_graph = fid.read()\n od_graph_def.ParseFromString(serialized_graph)\n tf.import_graph_def(od_graph_def, name='')\n\n\n# ## Loading label map\n# Label maps map indices to category names, so that when our convolution network predicts `5`, we know that this corresponds to `airplane`. Here we use internal utility functions, but anything that returns a dictionary mapping integers to appropriate string labels would be fine\n\n# In[ ]:\n\n\ncategory_index = label_map_util.create_category_index_from_labelmap(PATH_TO_LABELS, use_display_name=True)\n\n\n# ## Helper code\n\n# In[ ]:\n\n\ndef load_image_into_numpy_array(image):\n (im_width, im_height) = image.size\n return np.array(image.getdata()).reshape(\n (im_height, im_width, 3)).astype(np.uint8)\n\n\n# # Detection\n\n# In[ ]:\n\n\n# For the sake of simplicity we will use only 2 images:\n# image1.jpg\n# image2.jpg\n# If you want to test the code with your images, just add path to the images to the TEST_IMAGE_PATHS.\nPATH_TO_TEST_IMAGES_DIR = 'test_images'\nN=4 \nTEST_IMAGE_PATHS = [ os.path.join(PATH_TO_TEST_IMAGES_DIR, 'image{}.jpg'.format(i)) for i in range(1, N) ]\n\n# Size, in inches, of the output images.\nIMAGE_SIZE = (12, 8)\n\n\n# In[ ]:\n\n\ndef run_inference_for_single_image(image, graph):\n with graph.as_default():\n with tf.Session() as sess:\n # Get handles to input and output tensors\n ops = tf.get_default_graph().get_operations()\n all_tensor_names = {output.name for op in ops for output in op.outputs}\n tensor_dict = {}\n for key in [\n 'num_detections', 'detection_boxes', 'detection_scores',\n 'detection_classes', 'detection_masks'\n ]:\n tensor_name = key + ':0'\n if tensor_name in all_tensor_names:\n tensor_dict[key] = tf.get_default_graph().get_tensor_by_name(\n tensor_name)\n if 'detection_masks' in tensor_dict:\n # The following processing is only for single image\n detection_boxes = tf.squeeze(tensor_dict['detection_boxes'], [0])\n detection_masks = tf.squeeze(tensor_dict['detection_masks'], [0])\n # Reframe is required to translate mask from box coordinates to image coordinates and fit the image size.\n real_num_detection = tf.cast(tensor_dict['num_detections'][0], tf.int32)\n detection_boxes = tf.slice(detection_boxes, [0, 0], [real_num_detection, -1])\n detection_masks = tf.slice(detection_masks, [0, 0, 0], [real_num_detection, -1, -1])\n detection_masks_reframed = utils_ops.reframe_box_masks_to_image_masks(\n detection_masks, detection_boxes, image.shape[0], image.shape[1])\n detection_masks_reframed = tf.cast(\n tf.greater(detection_masks_reframed, 0.5), tf.uint8)\n # Follow the convention by adding back the batch dimension\n tensor_dict['detection_masks'] = tf.expand_dims(\n detection_masks_reframed, 0)\n image_tensor = tf.get_default_graph().get_tensor_by_name('image_tensor:0')\n\n # Run inference\n output_dict = sess.run(tensor_dict,\n feed_dict={image_tensor: np.expand_dims(image, 0)})\n\n # all outputs are float32 numpy arrays, so convert types as appropriate\n output_dict['num_detections'] = int(output_dict['num_detections'][0])\n output_dict['detection_classes'] = output_dict[\n 'detection_classes'][0].astype(np.uint8)\n output_dict['detection_boxes'] = output_dict['detection_boxes'][0]\n output_dict['detection_scores'] = output_dict['detection_scores'][0]\n if 'detection_masks' in output_dict:\n output_dict['detection_masks'] = output_dict['detection_masks'][0]\n return output_dict\n\n\n# In[ ]:\n\ncap = cv2.VideoCapture(0)\ncategories = []\nsess = tf.Session(graph=detection_graph)\nwhile(True):\n # the array based representation of the image will be used later in order to prepare the\n # result image with boxes and labels on it.\n ret,frame = cap.read()\n image_np = frame\n # Expand dimensions since the model expects images to have shape: [1, None, None, 3]\n image_np_expanded = np.expand_dims(image_np, axis=0)\n # Actual detection.\n image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')\n # Each box represents a part of the image where a particular object was detected.\n boxes = detection_graph.get_tensor_by_name('detection_boxes:0')\n scores = detection_graph.get_tensor_by_name('detection_scores:0')\n classes = detection_graph.get_tensor_by_name('detection_classes:0')\n num_detections = detection_graph.get_tensor_by_name('num_detections:0')\n (boxes, scores, classes, num_detections) = sess.run(\n [boxes, scores, classes, num_detections],\n feed_dict={image_tensor: image_np_expanded})\n \n \n output_dict = run_inference_for_single_image(image_np, detection_graph)\n # this will load the labels and categories along with category index\n \n label_map = label_map_util.load_labelmap(PATH_TO_LABELS)\n\n categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=50, use_display_name=True)\n\n category_index = label_map_util.create_category_index(categories)\n \n # Visualization of the results of a detection.\n vis_util.visualize_boxes_and_labels_on_image_array(\n image_np,\n output_dict['detection_boxes'],\n output_dict['detection_classes'],\n output_dict['detection_scores'],\n category_index,\n instance_masks=output_dict.get('detection_masks'),\n use_normalized_coordinates=True,\n line_thickness=8)\n cv2.imshow('object detection', cv2.resize(image_np, (800,600)))\n min_score_thresh = 0.8\n categories = [category_index.get(i) for i in classes[0]]\n for category in categories:\n\n if category !=None:\n print(category['name'])\n if category['name']=='knife' or category['name']=='scissors' or category['name']=='bottle':\n print(category['name']+' is rejected by Jet Airways and Indigo Airlines')\n if cv2.waitKey(1) & 0xFF == ord('q'):\n cv2.destroyAllWindows()\n break\n \n"
] |
[
[
"tensorflow.Graph",
"numpy.expand_dims",
"tensorflow.import_graph_def",
"tensorflow.greater",
"tensorflow.slice",
"tensorflow.gfile.GFile",
"tensorflow.cast",
"tensorflow.squeeze",
"tensorflow.expand_dims",
"tensorflow.Session",
"tensorflow.get_default_graph",
"tensorflow.GraphDef"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ioanacroi/collaborative-experts
|
[
"ffd6c0bc8dc3c0375e40982bf5dce2c35359f1b6"
] |
[
"test.py"
] |
[
"import copy\nimport pickle\nimport random\nimport logging\nimport argparse\nfrom typing import Tuple, Dict\nfrom pathlib import Path\n\nimport numpy as np\nimport torch\nfrom mergedeep import Strategy, merge\nfrom typeguard import typechecked\n\nimport model as module_arch\nimport model.metric as module_metric\nimport utils.visualizer as module_vis\nimport data_loader.data_loaders as module_data\nfrom trainer import verbose, ctxt_mgr\nfrom utils.util import compute_dims, compute_trn_config, update_src_web_video_dir\nfrom parse_config import ConfigParser\n\n\n@typechecked\ndef compress_predictions(query_masks: np.ndarray, sims: np.ndarray, topk: int = 10):\n \"\"\"We store the indices of the top-k predictions, rather than the full similarity\n matrix, to reduce storage requirements.\n\n NOTE: The similarity matrix contains `num_queries x num_videos` elements, where\n `num_queries = num_videos x max_num_queries_per_video`. We first mask out\n locations in the similarity matrix that correspond to invalid queries (these are\n produced by videos with fewer than `max_num_queries_per_video` descriptions).\n \"\"\"\n\n # validate the input shapes\n assert query_masks.ndim == 2, \"Expected query_masks to be a matrix\"\n query_num_videos, query_max_per_video = query_masks.shape\n sims_queries, sims_num_videos = sims.shape\n msg = (f\"Expected sims and query masks to represent the same number of videos \"\n f\"(found {sims_num_videos} v {query_num_videos}\")\n assert query_num_videos == sims_num_videos, msg\n msg = (f\"Expected sims and query masks to represent the same number of queries \"\n f\"(found {sims_queries} v {query_num_videos * query_max_per_video}\")\n assert query_max_per_video * query_num_videos == sims_queries, msg\n\n valid_sims = sims[query_masks.flatten().astype(np.bool)]\n ranks = np.argsort(-valid_sims, axis=1)\n return ranks[:, :topk]\n\n\n@typechecked\ndef get_model_and_data_loaders(\n config: ConfigParser,\n logger: logging.Logger,\n ckpt_path: Path,\n) -> Tuple[torch.nn.Module, module_data.ExpertDataLoader]:\n expert_dims, raw_input_dims, text_dim = compute_dims(config)\n\n data_loaders = config.init(\n name='data_loader',\n module=module_data,\n logger=logger,\n raw_input_dims=raw_input_dims,\n challenge_mode=config.get(\"challenge_mode\", False),\n text_dim=text_dim,\n text_feat=config[\"experts\"][\"text_feat\"],\n text_agg=config[\"experts\"][\"text_agg\"],\n use_zeros_for_missing=config[\"experts\"].get(\"use_zeros_for_missing\", False),\n task=config.get(\"task\", \"retrieval\"),\n eval_only=True,\n distil_params=config.get(\"distil_params\", None),\n training_file=config.get(\"training_file\", None),\n caption_masks=config.get(\"caption_masks\", None),\n ce_shared_dim=config[\"experts\"].get(\"ce_shared_dim\", None),\n )\n\n trn_config = compute_trn_config(config)\n model = config.init(\n name='arch',\n module=module_arch,\n trn_config=trn_config,\n expert_dims=expert_dims,\n text_dim=text_dim,\n disable_nan_checks=config[\"disable_nan_checks\"],\n task=config.get(\"task\", \"retrieval\"),\n ce_shared_dim=config[\"experts\"].get(\"ce_shared_dim\", None),\n feat_aggregation=config[\"data_loader\"][\"args\"][\"feat_aggregation\"],\n trn_cat=config[\"data_loader\"][\"args\"].get(\"trn_cat\", 0),\n )\n ckpt_path = config._args.resume\n logger.info(f\"Loading checkpoint: {ckpt_path} ...\")\n checkpoint = torch.load(ckpt_path)\n state_dict = checkpoint['state_dict']\n if config['n_gpu'] > 1:\n model = torch.nn.DataParallel(model)\n # support backwards compatibility\n deprecated = [\"ce.moe_fc_bottleneck1\", \"ce.moe_cg\", \"ce.moe_fc_proj\"]\n for mod in deprecated:\n for suffix in (\"weight\", \"bias\"):\n key = f\"{mod}.{suffix}\"\n if key in state_dict:\n print(f\"WARNING: Removing deprecated key {key} from model\")\n state_dict.pop(key)\n model.load_state_dict(state_dict)\n\n return model, data_loaders\n\ndef evaluation(config, logger=None, trainer=None):\n\n if logger is None:\n logger = config.get_logger('test')\n\n if getattr(config._args, \"eval_from_training_config\", False):\n eval_conf = copy.deepcopy(config)\n merge(eval_conf._config, config[\"eval_settings\"], strategy=Strategy.REPLACE)\n config = eval_conf\n\n logger.info(\"Running evaluation with configuration:\")\n logger.info(config)\n\n # Set the random initial seeds\n seed = config[\"seed\"]\n logger.info(f\"Setting experiment random seed to {seed}\")\n random.seed(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n\n model, data_loaders = get_model_and_data_loaders(\n config=config,\n logger=logger,\n ckpt_path=Path(config._args.resume),\n )\n logger.info(model)\n\n update_src_web_video_dir(config)\n visualizer = config.init(\n name='visualizer',\n module=module_vis,\n exp_name=config._exper_name,\n web_dir=config._web_log_dir,\n )\n\n metrics = [getattr(module_metric, met) for met in config['metrics']]\n challenge_mode = config.get(\"challenge_mode\", False)\n challenge_msg = (\n \"\\n\"\n \"Evaluation ran on challenge features. To obtain a score, upload the similarity\"\n \"matrix for each dataset to the test server after running the \"\n \"`misc/cvpr2020-challenge/prepare_submission.py` script and following the \"\n \"instructions at: \"\n \"https://www.robots.ox.ac.uk/~vgg/challenges/video-pentathlon/\"\n \"\\n\"\n )\n\n # prepare model for testing. Note that some datasets fail to fit the retrieval\n # set on the GPU, so we run them on the CPU\n if torch.cuda.is_available() and not config.get(\"disable_gpu\", True):\n device = \"cuda\"\n else:\n device = \"cpu\"\n logger.info(f\"Running evaluation on {device}\")\n\n model = model.to(device)\n model.eval()\n\n with torch.no_grad():\n samples, meta = data_loaders[\"retrieval\"]\n\n # To use the nan-checks safely, we need make temporary copies of the data\n disable_nan_checks = config._config[\"disable_nan_checks\"]\n with ctxt_mgr(samples, device, disable_nan_checks) as valid:\n output = model(**valid)\n\n sims = output[\"cross_view_conf_matrix\"].data.cpu().float().numpy()\n dataset = data_loaders.dataset_name\n if challenge_mode:\n split = data_loaders.dataloaders[\"dataset\"].split_name\n prediction_path = config._log_dir / f\"{dataset}-{split}-predictions.csv\"\n compressed_preds = compress_predictions(\n query_masks=meta[\"query_masks\"],\n sims=sims,\n )\n np.savetxt(prediction_path, compressed_preds, delimiter=',', fmt=\"%d\")\n print(f\"Saved similarity matrix predictions to {prediction_path}\")\n print(challenge_msg)\n return\n\n nested_metrics = {}\n for metric in metrics:\n metric_name = metric.__name__\n res = metric(sims, query_masks=meta[\"query_masks\"])\n verbose(epoch=0, metrics=res, name=dataset, mode=metric_name)\n if trainer is not None:\n if not trainer.mini_train:\n trainer.writer.set_step(step=0, mode=\"val\")\n # avoid tensboard folding by prefixing\n metric_name_ = f\"test_{metric_name}\"\n trainer.log_metrics(res, metric_name=metric_name_, mode=\"val\")\n nested_metrics[metric_name] = res\n\n if data_loaders.num_test_captions == 1:\n visualizer.visualize_ranking(\n sims=sims,\n meta=meta,\n epoch=0,\n nested_metrics=nested_metrics,\n )\n log = {}\n for subkey, subval in nested_metrics.items():\n for subsubkey, subsubval in subval.items():\n log[f\"test_{subkey}_{subsubkey}\"] = subsubval\n for key, value in log.items():\n logger.info(\" {:15s}: {}\".format(str(key), value))\n\n\nif __name__ == '__main__':\n args = argparse.ArgumentParser(description='PyTorch Template')\n args.add_argument('--config', default=None, type=str, help=\"config file path\")\n args.add_argument('--resume', type=Path, help='path to checkpoint for evaluation')\n args.add_argument('--device', help='indices of GPUs to enable')\n args.add_argument('--eval_from_training_config', action=\"store_true\",\n help=\"if true, evaluate directly from a training config file.\")\n args.add_argument(\"--custom_args\", help=\"qualified key,val pairs\")\n eval_config = ConfigParser(args)\n\n cfg_msg = \"For evaluation, a model checkpoint must be specified via the --resume flag\"\n assert eval_config._args.resume, cfg_msg\n evaluation(eval_config)\n"
] |
[
[
"numpy.random.seed",
"torch.load",
"torch.manual_seed",
"torch.no_grad",
"torch.cuda.is_available",
"numpy.savetxt",
"numpy.argsort",
"torch.nn.DataParallel"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
weiyx16/mmsegmentation
|
[
"6d35d76195f173fbc6b119a7d7815e67d78024c6",
"6d35d76195f173fbc6b119a7d7815e67d78024c6",
"6d35d76195f173fbc6b119a7d7815e67d78024c6",
"6d35d76195f173fbc6b119a7d7815e67d78024c6",
"6d35d76195f173fbc6b119a7d7815e67d78024c6",
"6d35d76195f173fbc6b119a7d7815e67d78024c6"
] |
[
"tests/test_models/test_necks/test_fpn.py",
"tools/model_converters/swin2mmseg.py",
"tools/convert_datasets/pascal_context.py",
"tests/test_models/test_losses/test_ce_loss.py",
"tests/test_models/test_heads/test_cc_head.py",
"mmseg/models/decode_heads/dm_head.py"
] |
[
"# Copyright (c) OpenMMLab. All rights reserved.\nimport torch\n\nfrom mmseg.models import FPN\n\n\ndef test_fpn():\n in_channels = [256, 512, 1024, 2048]\n inputs = [\n torch.randn(1, c, 56 // 2**i, 56 // 2**i)\n for i, c in enumerate(in_channels)\n ]\n\n fpn = FPN(in_channels, 256, len(in_channels))\n outputs = fpn(inputs)\n assert outputs[0].shape == torch.Size([1, 256, 56, 56])\n assert outputs[1].shape == torch.Size([1, 256, 28, 28])\n assert outputs[2].shape == torch.Size([1, 256, 14, 14])\n assert outputs[3].shape == torch.Size([1, 256, 7, 7])\n",
"# Copyright (c) OpenMMLab. All rights reserved.\nimport argparse\nimport os.path as osp\nfrom collections import OrderedDict\n\nimport mmcv\nimport torch\nfrom mmcv.runner import CheckpointLoader\n\n\ndef convert_swin(ckpt):\n new_ckpt = OrderedDict()\n\n def correct_unfold_reduction_order(x):\n out_channel, in_channel = x.shape\n x = x.reshape(out_channel, 4, in_channel // 4)\n x = x[:, [0, 2, 1, 3], :].transpose(1,\n 2).reshape(out_channel, in_channel)\n return x\n\n def correct_unfold_norm_order(x):\n in_channel = x.shape[0]\n x = x.reshape(4, in_channel // 4)\n x = x[[0, 2, 1, 3], :].transpose(0, 1).reshape(in_channel)\n return x\n\n for k, v in ckpt.items():\n if k.startswith('head'):\n continue\n elif k.startswith('layers'):\n new_v = v\n if 'attn.' in k:\n new_k = k.replace('attn.', 'attn.w_msa.')\n elif 'mlp.' in k:\n if 'mlp.fc1.' in k:\n new_k = k.replace('mlp.fc1.', 'ffn.layers.0.0.')\n elif 'mlp.fc2.' in k:\n new_k = k.replace('mlp.fc2.', 'ffn.layers.1.')\n else:\n new_k = k.replace('mlp.', 'ffn.')\n elif 'downsample' in k:\n new_k = k\n if 'reduction.' in k:\n new_v = correct_unfold_reduction_order(v)\n elif 'norm.' in k:\n new_v = correct_unfold_norm_order(v)\n else:\n new_k = k\n new_k = new_k.replace('layers', 'stages', 1)\n elif k.startswith('patch_embed'):\n new_v = v\n if 'proj' in k:\n new_k = k.replace('proj', 'projection')\n else:\n new_k = k\n else:\n new_v = v\n new_k = k\n\n new_ckpt[new_k] = new_v\n\n return new_ckpt\n\n\ndef main():\n parser = argparse.ArgumentParser(\n description='Convert keys in official pretrained swin models to'\n 'MMSegmentation style.')\n parser.add_argument('src', help='src model path or url')\n # The dst path must be a full path of the new checkpoint.\n parser.add_argument('dst', help='save path')\n args = parser.parse_args()\n\n checkpoint = CheckpointLoader.load_checkpoint(args.src, map_location='cpu')\n if 'state_dict' in checkpoint:\n state_dict = checkpoint['state_dict']\n elif 'model' in checkpoint:\n state_dict = checkpoint['model']\n else:\n state_dict = checkpoint\n weight = convert_swin(state_dict)\n mmcv.mkdir_or_exist(osp.dirname(args.dst))\n torch.save(weight, args.dst)\n\n\nif __name__ == '__main__':\n main()\n",
"# Copyright (c) OpenMMLab. All rights reserved.\nimport argparse\nimport os.path as osp\nfrom functools import partial\n\nimport mmcv\nimport numpy as np\nfrom detail import Detail\nfrom PIL import Image\n\n_mapping = np.sort(\n np.array([\n 0, 2, 259, 260, 415, 324, 9, 258, 144, 18, 19, 22, 23, 397, 25, 284,\n 158, 159, 416, 33, 162, 420, 454, 295, 296, 427, 44, 45, 46, 308, 59,\n 440, 445, 31, 232, 65, 354, 424, 68, 326, 72, 458, 34, 207, 80, 355,\n 85, 347, 220, 349, 360, 98, 187, 104, 105, 366, 189, 368, 113, 115\n ]))\n_key = np.array(range(len(_mapping))).astype('uint8')\n\n\ndef generate_labels(img_id, detail, out_dir):\n\n def _class_to_index(mask, _mapping, _key):\n # assert the values\n values = np.unique(mask)\n for i in range(len(values)):\n assert (values[i] in _mapping)\n index = np.digitize(mask.ravel(), _mapping, right=True)\n return _key[index].reshape(mask.shape)\n\n mask = Image.fromarray(\n _class_to_index(detail.getMask(img_id), _mapping=_mapping, _key=_key))\n filename = img_id['file_name']\n mask.save(osp.join(out_dir, filename.replace('jpg', 'png')))\n return osp.splitext(osp.basename(filename))[0]\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(\n description='Convert PASCAL VOC annotations to mmsegmentation format')\n parser.add_argument('devkit_path', help='pascal voc devkit path')\n parser.add_argument('json_path', help='annoation json filepath')\n parser.add_argument('-o', '--out_dir', help='output path')\n args = parser.parse_args()\n return args\n\n\ndef main():\n args = parse_args()\n devkit_path = args.devkit_path\n if args.out_dir is None:\n out_dir = osp.join(devkit_path, 'VOC2010', 'SegmentationClassContext')\n else:\n out_dir = args.out_dir\n json_path = args.json_path\n mmcv.mkdir_or_exist(out_dir)\n img_dir = osp.join(devkit_path, 'VOC2010', 'JPEGImages')\n\n train_detail = Detail(json_path, img_dir, 'train')\n train_ids = train_detail.getImgs()\n\n val_detail = Detail(json_path, img_dir, 'val')\n val_ids = val_detail.getImgs()\n\n mmcv.mkdir_or_exist(\n osp.join(devkit_path, 'VOC2010/ImageSets/SegmentationContext'))\n\n train_list = mmcv.track_progress(\n partial(generate_labels, detail=train_detail, out_dir=out_dir),\n train_ids)\n with open(\n osp.join(devkit_path, 'VOC2010/ImageSets/SegmentationContext',\n 'train.txt'), 'w') as f:\n f.writelines(line + '\\n' for line in sorted(train_list))\n\n val_list = mmcv.track_progress(\n partial(generate_labels, detail=val_detail, out_dir=out_dir), val_ids)\n with open(\n osp.join(devkit_path, 'VOC2010/ImageSets/SegmentationContext',\n 'val.txt'), 'w') as f:\n f.writelines(line + '\\n' for line in sorted(val_list))\n\n print('Done!')\n\n\nif __name__ == '__main__':\n main()\n",
"# Copyright (c) OpenMMLab. All rights reserved.\nimport pytest\nimport torch\n\n\ndef test_ce_loss():\n from mmseg.models import build_loss\n\n # use_mask and use_sigmoid cannot be true at the same time\n with pytest.raises(AssertionError):\n loss_cfg = dict(\n type='CrossEntropyLoss',\n use_mask=True,\n use_sigmoid=True,\n loss_weight=1.0)\n build_loss(loss_cfg)\n\n # test loss with class weights\n loss_cls_cfg = dict(\n type='CrossEntropyLoss',\n use_sigmoid=False,\n class_weight=[0.8, 0.2],\n loss_weight=1.0,\n loss_name='loss_ce')\n loss_cls = build_loss(loss_cls_cfg)\n fake_pred = torch.Tensor([[100, -100]])\n fake_label = torch.Tensor([1]).long()\n assert torch.allclose(loss_cls(fake_pred, fake_label), torch.tensor(40.))\n\n # test loss with class weights from file\n import os\n import tempfile\n import mmcv\n import numpy as np\n tmp_file = tempfile.NamedTemporaryFile()\n\n mmcv.dump([0.8, 0.2], f'{tmp_file.name}.pkl', 'pkl') # from pkl file\n loss_cls_cfg = dict(\n type='CrossEntropyLoss',\n use_sigmoid=False,\n class_weight=f'{tmp_file.name}.pkl',\n loss_weight=1.0,\n loss_name='loss_ce')\n loss_cls = build_loss(loss_cls_cfg)\n assert torch.allclose(loss_cls(fake_pred, fake_label), torch.tensor(40.))\n\n np.save(f'{tmp_file.name}.npy', np.array([0.8, 0.2])) # from npy file\n loss_cls_cfg = dict(\n type='CrossEntropyLoss',\n use_sigmoid=False,\n class_weight=f'{tmp_file.name}.npy',\n loss_weight=1.0,\n loss_name='loss_ce')\n loss_cls = build_loss(loss_cls_cfg)\n assert torch.allclose(loss_cls(fake_pred, fake_label), torch.tensor(40.))\n tmp_file.close()\n os.remove(f'{tmp_file.name}.pkl')\n os.remove(f'{tmp_file.name}.npy')\n\n loss_cls_cfg = dict(\n type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)\n loss_cls = build_loss(loss_cls_cfg)\n assert torch.allclose(loss_cls(fake_pred, fake_label), torch.tensor(200.))\n\n loss_cls_cfg = dict(\n type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0)\n loss_cls = build_loss(loss_cls_cfg)\n assert torch.allclose(loss_cls(fake_pred, fake_label), torch.tensor(100.))\n\n fake_pred = torch.full(size=(2, 21, 8, 8), fill_value=0.5)\n fake_label = torch.ones(2, 8, 8).long()\n assert torch.allclose(\n loss_cls(fake_pred, fake_label), torch.tensor(0.9503), atol=1e-4)\n fake_label[:, 0, 0] = 255\n assert torch.allclose(\n loss_cls(fake_pred, fake_label, ignore_index=255),\n torch.tensor(0.9354),\n atol=1e-4)\n\n # test cross entropy loss has name `loss_ce`\n loss_cls_cfg = dict(\n type='CrossEntropyLoss',\n use_sigmoid=False,\n loss_weight=1.0,\n loss_name='loss_ce')\n loss_cls = build_loss(loss_cls_cfg)\n assert loss_cls.loss_name == 'loss_ce'\n # TODO test use_mask\n",
"# Copyright (c) OpenMMLab. All rights reserved.\nimport pytest\nimport torch\n\nfrom mmseg.models.decode_heads import CCHead\nfrom .utils import to_cuda\n\n\ndef test_cc_head():\n head = CCHead(in_channels=32, channels=16, num_classes=19)\n assert len(head.convs) == 2\n assert hasattr(head, 'cca')\n if not torch.cuda.is_available():\n pytest.skip('CCHead requires CUDA')\n inputs = [torch.randn(1, 32, 45, 45)]\n head, inputs = to_cuda(head, inputs)\n outputs = head(inputs)\n assert outputs.shape == (1, head.num_classes, 45, 45)\n",
"# Copyright (c) OpenMMLab. All rights reserved.\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom mmcv.cnn import ConvModule, build_activation_layer, build_norm_layer\n\nfrom ..builder import HEADS\nfrom .decode_head import BaseDecodeHead\n\n\nclass DCM(nn.Module):\n \"\"\"Dynamic Convolutional Module used in DMNet.\n\n Args:\n filter_size (int): The filter size of generated convolution kernel\n used in Dynamic Convolutional Module.\n fusion (bool): Add one conv to fuse DCM output feature.\n in_channels (int): Input channels.\n channels (int): Channels after modules, before conv_seg.\n conv_cfg (dict | None): Config of conv layers.\n norm_cfg (dict | None): Config of norm layers.\n act_cfg (dict): Config of activation layers.\n \"\"\"\n\n def __init__(self, filter_size, fusion, in_channels, channels, conv_cfg,\n norm_cfg, act_cfg):\n super(DCM, self).__init__()\n self.filter_size = filter_size\n self.fusion = fusion\n self.in_channels = in_channels\n self.channels = channels\n self.conv_cfg = conv_cfg\n self.norm_cfg = norm_cfg\n self.act_cfg = act_cfg\n self.filter_gen_conv = nn.Conv2d(self.in_channels, self.channels, 1, 1,\n 0)\n\n self.input_redu_conv = ConvModule(\n self.in_channels,\n self.channels,\n 1,\n conv_cfg=self.conv_cfg,\n norm_cfg=self.norm_cfg,\n act_cfg=self.act_cfg)\n\n if self.norm_cfg is not None:\n self.norm = build_norm_layer(self.norm_cfg, self.channels)[1]\n else:\n self.norm = None\n self.activate = build_activation_layer(self.act_cfg)\n\n if self.fusion:\n self.fusion_conv = ConvModule(\n self.channels,\n self.channels,\n 1,\n conv_cfg=self.conv_cfg,\n norm_cfg=self.norm_cfg,\n act_cfg=self.act_cfg)\n\n def forward(self, x):\n \"\"\"Forward function.\"\"\"\n generated_filter = self.filter_gen_conv(\n F.adaptive_avg_pool2d(x, self.filter_size))\n x = self.input_redu_conv(x)\n b, c, h, w = x.shape\n # [1, b * c, h, w], c = self.channels\n x = x.view(1, b * c, h, w)\n # [b * c, 1, filter_size, filter_size]\n generated_filter = generated_filter.view(b * c, 1, self.filter_size,\n self.filter_size)\n pad = (self.filter_size - 1) // 2\n if (self.filter_size - 1) % 2 == 0:\n p2d = (pad, pad, pad, pad)\n else:\n p2d = (pad + 1, pad, pad + 1, pad)\n x = F.pad(input=x, pad=p2d, mode='constant', value=0)\n # [1, b * c, h, w]\n output = F.conv2d(input=x, weight=generated_filter, groups=b * c)\n # [b, c, h, w]\n output = output.view(b, c, h, w)\n if self.norm is not None:\n output = self.norm(output)\n output = self.activate(output)\n\n if self.fusion:\n output = self.fusion_conv(output)\n\n return output\n\n\[email protected]_module()\nclass DMHead(BaseDecodeHead):\n \"\"\"Dynamic Multi-scale Filters for Semantic Segmentation.\n\n This head is the implementation of\n `DMNet <https://openaccess.thecvf.com/content_ICCV_2019/papers/\\\n He_Dynamic_Multi-Scale_Filters_for_Semantic_Segmentation_\\\n ICCV_2019_paper.pdf>`_.\n\n Args:\n filter_sizes (tuple[int]): The size of generated convolutional filters\n used in Dynamic Convolutional Module. Default: (1, 3, 5, 7).\n fusion (bool): Add one conv to fuse DCM output feature.\n \"\"\"\n\n def __init__(self, filter_sizes=(1, 3, 5, 7), fusion=False, **kwargs):\n super(DMHead, self).__init__(**kwargs)\n assert isinstance(filter_sizes, (list, tuple))\n self.filter_sizes = filter_sizes\n self.fusion = fusion\n dcm_modules = []\n for filter_size in self.filter_sizes:\n dcm_modules.append(\n DCM(filter_size,\n self.fusion,\n self.in_channels,\n self.channels,\n conv_cfg=self.conv_cfg,\n norm_cfg=self.norm_cfg,\n act_cfg=self.act_cfg))\n self.dcm_modules = nn.ModuleList(dcm_modules)\n self.bottleneck = ConvModule(\n self.in_channels + len(filter_sizes) * self.channels,\n self.channels,\n 3,\n padding=1,\n conv_cfg=self.conv_cfg,\n norm_cfg=self.norm_cfg,\n act_cfg=self.act_cfg)\n\n def forward(self, inputs):\n \"\"\"Forward function.\"\"\"\n x = self._transform_inputs(inputs)\n dcm_outs = [x]\n for dcm_module in self.dcm_modules:\n dcm_outs.append(dcm_module(x))\n dcm_outs = torch.cat(dcm_outs, dim=1)\n output = self.bottleneck(dcm_outs)\n output = self.cls_seg(output)\n return output\n"
] |
[
[
"torch.randn",
"torch.Size"
],
[
"torch.save"
],
[
"numpy.array",
"numpy.unique"
],
[
"torch.ones",
"torch.full",
"torch.Tensor",
"torch.tensor",
"numpy.array"
],
[
"torch.randn",
"torch.cuda.is_available"
],
[
"torch.cat",
"torch.nn.functional.conv2d",
"torch.nn.Conv2d",
"torch.nn.ModuleList",
"torch.nn.functional.adaptive_avg_pool2d",
"torch.nn.functional.pad"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
NixonZ/QNetwork-RL
|
[
"acf34dd8d598104267da88f3eacc3e44f06265a7"
] |
[
"environment/metalog.py"
] |
[
"from typing import Callable, List, Tuple\nimport numpy as np\nfrom random import random\n\nU = lambda : random()\nExp = lambda lmbda: -1.0*np.log(U())/lmbda\n\nclass metalog():\n\n def __init__(self,b: int,quantiles: List[Tuple], n_terms: int = 15,bounds:Tuple[float,float] = (-np.inf,np.inf)):\n # Properties of a meta-logistic distribution\n\n self.b = b # no. of quantiles\n self.quantiles = quantiles # List of quantiles\n self.n_terms = n_terms\n assert( self.n_terms >= 2 )\n assert( len(quantiles) == b )\n\n kind = None\n\n if bounds[0] >= bounds[1]:\n raise Exception(\"Lower bound cannot be greater or equal to Upper bound!\")\n\n if np.isneginf(bounds[0]):\n if np.isposinf(bounds[1]):\n kind = 'unbounded'\n elif np.isneginf(bounds[1]):\n raise Exception(\"Upper bound cannot be negative infinity!\")\n else:\n kind = 'upperbounded'\n elif np.isposinf(bounds[0]):\n raise Exception(\"Lower bound cannot be infinity!\")\n else:\n if np.isposinf(bounds[1]):\n kind = 'lowerbounded'\n elif np.isneginf(bounds[1]):\n raise Exception(\"Upper bound cannot be negative infinity!\")\n else:\n kind = 'bounded'\n\n self.kind = kind\n self.bl = bounds[0]\n self.bu = bounds[1]\n\n # Estimating parameters using OLS.\n Y = []\n X = []\n\n for quantile in quantiles:\n if self.kind == 'unbounded':\n X.append(quantile[0])\n elif self.kind == 'lowerbounded':\n X.append( np.log(quantile[0]-self.bl) )\n elif self.kind == 'upperbounded':\n X.append( -1*np.log(self.bu-quantile[0]) )\n elif self.kind == 'bounded':\n X.append( np.log( (quantile[0]-self.bl)/(self.bu-quantile[0]) ) )\n\n y = quantile[1]\n lny = np.log(y/(1-y))\n y_ = y - 0.5\n\n row = [1]\n row.append( lny )\n if self.n_terms == 2:\n Y.append(row)\n continue\n\n row.append( y_*lny )\n if self.n_terms == 3:\n Y.append(row)\n continue\n\n row.append( y_ )\n if self.n_terms == 4:\n Y.append(row)\n continue\n\n for i in range(5,self.n_terms+1):\n if i%2:\n row.append( np.power( y_, (i-1)//2 ) )\n else:\n row.append( np.power( y_, i//2-1 )*lny )\n\n Y.append(row)\n\n X = np.array(X)\n Y = np.array(Y)\n temp = np.dot( np.linalg.inv(np.dot(Y.T,Y)) , Y.T)\n self.a = np.dot(temp,X)\n self.err = np.linalg.norm( X - np.dot(Y,self.a),ord=2)\n\n @property\n def quantile_val(self):\n return [quantile[0] for quantile in self.quantiles]\n\n @classmethod\n def from_sampler(self,b: int,sampler: Callable[[],float],n_terms:int = 15,bounds:Tuple[float,float] = (-np.inf,np.inf),num_data: int = 10000):\n # Generating data from a distribution\n data = [ sampler() for _ in range(num_data) ]\n return self.from_data(b,data,n_terms,bounds)\n\n @classmethod\n def from_data(self,b: int,data,n_terms:int = 15,bounds:Tuple[float,float] = (-np.inf,np.inf)):\n # Generating Quantiles from \n quantiles = [ ( np.quantile(data,i/(b+1)) , i/(b+1) ) for i in range(1,b+1) ]\n return metalog(b,quantiles,n_terms,bounds)\n\n def sample_transform(self,sample:float):\n if self.kind == 'unbounded':\n return sample\n elif self.kind == 'lowerbounded':\n return self.bl + np.exp(sample)\n elif self.kind == 'upperbounded':\n return self.bu - np.exp(-1*sample)\n elif self.kind == 'bounded':\n return (self.bl + self.bu*np.exp(sample))/(1+np.exp(sample))\n \n\n def sampler(self,kind = 'metalog'):\n # Sampling from a linear piecewise CDF.\n if kind == \"piecewise linear\":\n rn = U()\n idx = int(self.b*rn)\n if idx == self.b-1:\n return self.quantiles[self.b-1][0]\n else:\n return (self.quantiles[idx+1][0] - self.quantiles[idx][0])*(self.b*rn-idx) + self.quantiles[idx][0]\n \n elif kind == \"metalog\":\n rn = U()\n\n if rn == 0 and (self.kind == 'lowerbounded' or self.kind == 'bounded'):\n return self.bl\n if rn == 1 and (self.kind == 'upperbounded' or self.kind == 'bounded'):\n return self.bu\n\n lny = np.log(rn/(1-rn))\n y_ = rn - 0.5\n sample = 0.0\n a = self.a\n \n sample += a[0] + a[1]*lny\n if self.n_terms == 2:\n return self.sample_transform(sample)\n\n sample += a[2]*y_*lny\n if self.n_terms == 3:\n return self.sample_transform(sample)\n\n sample += a[3]*y_\n if self.n_terms == 4:\n return self.sample_transform(sample)\n\n for i in range(5,self.n_terms+1):\n if i%2:\n sample += a[i-1]*np.power( y_, (i-1)//2)\n else:\n sample += a[i-1]*np.power( y_, i//2-1 )*lny\n\n return self.sample_transform(sample)\n \n def distance(self,dist):\n assert(self.b == dist.b)\n distance = 0.0\n for i in range(self.b):\n temp = self.quantile_val[i] - dist.quantile_val[i]\n distance += np.abs(temp)\n return distance/self.b"
] |
[
[
"numpy.dot",
"numpy.log",
"numpy.abs",
"numpy.power",
"numpy.isposinf",
"numpy.quantile",
"numpy.isneginf",
"numpy.exp",
"numpy.array"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
nimRobotics/fnirslib
|
[
"0273c0da5f4a41d7cf4dac0fc9686c38f2c7b0cd"
] |
[
"examples/EC2.py"
] |
[
"\"\"\"\nauthor: @nimrobotics\ndescription: calculates the effective connectivity between regions and plots them\n\"\"\"\n\nimport numpy as np\nimport scipy.io\nimport glob\nimport sys\nsys.path.append('../utils')\nfrom plots import plotData\n\ndir = \"./process3/\" #directory of the data\noutdir = 'process3/' #directory to save the plots\nregions = 3 #number of regions\nfiles = glob.glob(dir+'/*_.mat') # get all the files in the directory\nfor file in files:\n print('Processing condition: ', file)\n data = scipy.io.loadmat(file) #load data from the directory\n fval = data['fval'] #fval\n pval = data['pval'] #pval\n sig = data['sig'] #sig\n cd = data['cd'] #cd\n print('fval shape: ',fval.shape)\n print('\\nfval \\n',fval)\n print('pval shape: ',pval.shape)\n print('sig shape: ',sig.shape)\n print('\\nsig \\n',sig)\n print(cd.shape)\n\n # elementwise multiplication of fval and sig(0/1)\n fval_sig = np.multiply(fval, sig)\n print(fval_sig.shape)\n print('\\nfval_sig \\n',fval_sig)\n\n # fval_sig = np.mean(fval_sig, axis=2) # average over files\n # print(fval_sig.shape)\n # fval = np.mean(fval, axis=2)\n\n labels = ['PFC', 'PM-MC', 'VC'] #labels for the regions\n condition = file.split('/')[-1].split('.')[0] #get the condition name\n plot = plotData(fval_sig, labels, outdir, colormap='viridis', dpi=300, title='EC: '+condition, filename='EC_'+condition +'.png') \n plot.matrixPlot()\n plot.circularPlot()\n"
] |
[
[
"numpy.multiply"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
jkkummerfeld/dstc7-noesis
|
[
"05a1952e2e92f690e4b81528bbc4ed45a9767a6e",
"05a1952e2e92f690e4b81528bbc4ed45a9767a6e"
] |
[
"noesis-tf/scripts/prepare_data.py",
"noesis-tf/model.py"
] |
[
"import os\n\nimport ijson\nimport functools\n\nimport tensorflow as tf\n\ntf.flags.DEFINE_integer(\n \"min_word_frequency\", 1, \"Minimum frequency of words in the vocabulary\")\n\ntf.flags.DEFINE_integer(\"max_sentence_len\", 160, \"Maximum Sentence Length\")\n\ntf.flags.DEFINE_string(\"train_in\", None, \"Path to input data file\")\ntf.flags.DEFINE_string(\"validation_in\", None, \"Path to validation data file\")\n\ntf.flags.DEFINE_string(\"train_out\", None, \"Path to output train tfrecords file\")\ntf.flags.DEFINE_string(\"validation_out\", None, \"Path to output validation tfrecords file\")\n\ntf.flags.DEFINE_string(\"vocab_path\", None, \"Path to save vocabulary txt file\")\ntf.flags.DEFINE_string(\"vocab_processor\", None, \"Path to save vocabulary processor\")\n\nFLAGS = tf.flags.FLAGS\n\nTRAIN_PATH = os.path.join(FLAGS.train_in)\nVALIDATION_PATH = os.path.join(FLAGS.validation_in)\n\ndef tokenizer_fn(iterator):\n return (x.split(\" \") for x in iterator)\n\n\ndef process_dialog(dialog):\n \"\"\"\n Add EOU and EOT tags between utterances and create a single context string.\n :param dialog:\n :return:\n \"\"\"\n\n row = []\n utterances = dialog['messages-so-far']\n\n # Create the context\n context = \"\"\n speaker = None\n for msg in utterances:\n if speaker is None:\n context += msg['utterance'] + \" __eou__ \"\n speaker = msg['speaker']\n elif speaker != msg['speaker']:\n context += \"__eot__ \" + msg['utterance'] + \" __eou__ \"\n speaker = msg['speaker']\n else:\n context += msg['utterance'] + \" __eou__ \"\n\n context += \"__eot__\"\n row.append(context)\n\n # Create the next utterance options and the target label\n correct_answer = dialog['options-for-correct-answers'][0]\n target_id = correct_answer['candidate-id']\n target_index = None\n for i, utterance in enumerate(dialog['options-for-next']):\n if utterance['candidate-id'] == target_id:\n target_index = i\n row.append(utterance['utterance'] + \" __eou__ \")\n\n if target_index is None:\n print('Correct answer not found in options-for-next - example {}. Setting 0 as the correct index'.format(dialog['example-id']))\n else:\n row.append(target_index)\n\n return row\n\n\ndef create_dialog_iter(filename):\n \"\"\"\n Returns an iterator over a JSON file.\n :param filename:\n :return:\n \"\"\"\n with open(filename, 'rb') as f:\n json_data = ijson.items(f, 'item')\n for entry in json_data:\n row = process_dialog(entry)\n yield row\n\ndef create_utterance_iter(input_iter):\n \"\"\"\n Returns an iterator over every utterance (context and candidates) for the VocabularyProcessor.\n :param input_iter:\n :return:\n \"\"\"\n for row in input_iter:\n all_utterances = []\n context = row[0]\n next_utterances = row[1:101]\n all_utterances.append(context)\n all_utterances.extend(next_utterances)\n for utterance in all_utterances:\n yield utterance\n\ndef create_vocab(input_iter, min_frequency):\n \"\"\"\n Creates and returns a VocabularyProcessor object with the vocabulary\n for the input iterator.\n \"\"\"\n vocab_processor = tf.contrib.learn.preprocessing.VocabularyProcessor(\n FLAGS.max_sentence_len,\n min_frequency=min_frequency,\n tokenizer_fn=tokenizer_fn)\n vocab_processor.fit(input_iter)\n return vocab_processor\n\n\ndef transform_sentence(sequence, vocab_processor):\n \"\"\"\n Maps a single sentence into the integer vocabulary. Returns a python array.\n \"\"\"\n return next(vocab_processor.transform([sequence])).tolist()\n\n\ndef create_example_new_format(row, vocab):\n \"\"\"\n Creates an example as a tensorflow.Example Protocol Buffer object.\n :param row:\n :param vocab:\n :return:\n \"\"\"\n context = row[0]\n next_utterances = row[1:101]\n target = row[-1]\n\n context_transformed = transform_sentence(context, vocab)\n context_len = len(next(vocab._tokenizer([context])))\n\n # New Example\n example = tf.train.Example()\n example.features.feature[\"context\"].int64_list.value.extend(context_transformed)\n example.features.feature[\"context_len\"].int64_list.value.extend([context_len])\n example.features.feature[\"target\"].int64_list.value.extend([target])\n\n # Distractor sequences\n for i, utterance in enumerate(next_utterances):\n opt_key = \"option_{}\".format(i)\n opt_len_key = \"option_{}_len\".format(i)\n # Utterance Length Feature\n opt_len = len(next(vocab._tokenizer([utterance])))\n example.features.feature[opt_len_key].int64_list.value.extend([opt_len])\n # Distractor Text Feature\n opt_transformed = transform_sentence(utterance, vocab)\n example.features.feature[opt_key].int64_list.value.extend(opt_transformed)\n return example\n\n\ndef create_tfrecords_file(input_filename, output_filename, example_fn):\n \"\"\"\n Creates a TFRecords file for the given input data and\n example transofmration function\n \"\"\"\n writer = tf.python_io.TFRecordWriter(output_filename)\n print(\"Creating TFRecords file at {}...\".format(output_filename))\n for i, row in enumerate(create_dialog_iter(input_filename)):\n x = example_fn(row)\n writer.write(x.SerializeToString())\n writer.close()\n print(\"Wrote to {}\".format(output_filename))\n\n\ndef write_vocabulary(vocab_processor, outfile):\n \"\"\"\n Writes the vocabulary to a file, one word per line.\n \"\"\"\n vocab_size = len(vocab_processor.vocabulary_)\n with open(outfile, \"w\") as vocabfile:\n for id in range(vocab_size):\n word = vocab_processor.vocabulary_._reverse_mapping[id]\n vocabfile.write(word + \"\\n\")\n print(\"Saved vocabulary to {}\".format(outfile))\n\n\nif __name__ == \"__main__\":\n print(\"Creating vocabulary...\")\n input_iter = create_dialog_iter(TRAIN_PATH)\n input_iter = create_utterance_iter(input_iter)\n vocab = create_vocab(input_iter, min_frequency=FLAGS.min_word_frequency)\n print(\"Total vocabulary size: {}\".format(len(vocab.vocabulary_)))\n\n # Create vocabulary.txt file\n write_vocabulary(\n vocab, os.path.join(FLAGS.vocab_path))\n\n # Save vocab processor\n vocab.save(os.path.join(FLAGS.vocab_processor))\n\n # Create train.tfrecords\n create_tfrecords_file(\n input_filename=TRAIN_PATH,\n output_filename=os.path.join(FLAGS.train_out),\n example_fn=functools.partial(create_example_new_format, vocab=vocab))\n\n # Create validation.tfrecords\n create_tfrecords_file(\n input_filename=VALIDATION_PATH,\n output_filename=os.path.join(FLAGS.validation_out),\n example_fn=functools.partial(create_example_new_format, vocab=vocab))",
"import tensorflow as tf\n\n\ndef get_id_feature(features, key, len_key, max_len):\n ids = features[key]\n ids_len = tf.squeeze(features[len_key], [1])\n ids_len = tf.minimum(ids_len, tf.constant(max_len, dtype=tf.int64))\n return ids, ids_len\n\n\ndef create_train_op(loss, hparams):\n def exp_decay(learning_rate, global_step):\n return tf.train.exponential_decay(learning_rate, global_step, decay_steps=hparams.decay_steps, decay_rate=hparams.decay_rate,\n staircase=hparams.staircase, name=\"lr_decay\")\n train_op = tf.contrib.layers.optimize_loss(\n loss=loss,\n global_step=tf.contrib.framework.get_global_step(),\n learning_rate=hparams.learning_rate,\n clip_gradients=10.0,\n optimizer=hparams.optimizer,\n learning_rate_decay_fn=exp_decay\n )\n return train_op\n\n\ndef create_model_fn(hparams, model_impl):\n def model_fn(features, targets, mode):\n context, context_len = get_id_feature(\n features, \"context\", \"context_len\", hparams.max_context_len)\n\n all_utterances = []\n all_utterances_lens = []\n\n for i in range(100):\n option, option_len = get_id_feature(features,\n \"option_{}\".format(i),\n \"option_{}_len\".format(i),\n hparams.max_utterance_len)\n all_utterances.append(option)\n all_utterances_lens.append(option_len)\n\n if mode == tf.contrib.learn.ModeKeys.TRAIN:\n probs, loss = model_impl(\n hparams,\n mode,\n context,\n context_len,\n all_utterances,\n tf.transpose(tf.stack(all_utterances_lens, axis=0)),\n targets,\n hparams.batch_size)\n train_op = create_train_op(loss, hparams)\n return probs, loss, train_op\n\n if mode == tf.contrib.learn.ModeKeys.INFER:\n\n probs, loss = model_impl(\n hparams,\n mode,\n tf.concat(0, context),\n tf.concat(0, context_len),\n tf.concat(0, all_utterances),\n tf.concat(0, all_utterances_lens),\n None,\n hparams.eval_batch_size)\n\n split_probs = tf.split(0, features[\"len\"], probs)\n probs = tf.concat(1, split_probs)\n\n return probs, 0.0, None\n\n if mode == tf.contrib.learn.ModeKeys.EVAL:\n probs, loss = model_impl(\n hparams,\n mode,\n context,\n context_len,\n all_utterances,\n tf.transpose(tf.stack(all_utterances_lens, axis=0)),\n targets,\n hparams.eval_batch_size)\n\n shaped_probs = probs\n\n return shaped_probs, loss, None\n\n return model_fn\n"
] |
[
[
"tensorflow.train.Example",
"tensorflow.flags.DEFINE_string",
"tensorflow.python_io.TFRecordWriter",
"tensorflow.contrib.learn.preprocessing.VocabularyProcessor",
"tensorflow.flags.DEFINE_integer"
],
[
"tensorflow.constant",
"tensorflow.concat",
"tensorflow.stack",
"tensorflow.contrib.framework.get_global_step",
"tensorflow.squeeze",
"tensorflow.train.exponential_decay",
"tensorflow.split"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
xy6g13/xscale
|
[
"a0c5809b6005a2016ab85849fa33e24c3fc19518"
] |
[
"xscale/_utils.py"
] |
[
"\"\"\"This is where useful internal functions are stored.\n\"\"\"\n# Python 2/3 compatibility\nfrom __future__ import absolute_import, division, print_function\nfrom collections import Iterable\n# Pandas\nimport pandas as pd\n# Numpy\nimport numpy as np\n# Warnings\nimport warnings\n\n\ndef is_dict_like(value):\n\treturn hasattr(value, '__getitem__') and hasattr(value, 'keys')\n\n\ndef is_scalar(value):\n\t\"\"\" Whether to treat a value as a scalar. Any non-iterable, string, or 0-D array \"\"\"\n\treturn (getattr(value, 'ndim', None) == 0\n\t or isinstance(value, str)\n\t or not isinstance(value, Iterable))\n\n\ndef is_iterable(value):\n\treturn isinstance(value, Iterable) and not isinstance(value, str)\n\n\ndef homogeneous_type(seq):\n iseq = iter(seq)\n first_type = type(next(iseq))\n return first_type if all((type(x) is first_type) for x in iseq) else False\n\n\ndef infer_n_and_dims(obj, n, dims):\n\t\"\"\"Logic for setting the window properties\"\"\"\n\t#TODO: Finish this function\n\tif n is None:\n\t\tif dims is None:\n\t\t\tnew_n = obj.shape\n\t\t\tnew_dims = obj.dims\n\t\telif isinstance(dims, str):\n\t\t\tnew_n = (obj.shape[obj.get_axis_num(dims)], )\n\t\t\tnew_dims = (dims, )\n\t\telse:\n\t\t\tnew_n = tuple()\n\t\t\tnew_dims = tuple()\n\t\t\tfor di in dims:\n\t\t\t\tif di in obj.dims:\n\t\t\t\t\tnew_n += (obj.shape[obj.get_axis_num(di)], )\n\t\t\t\t\tnew_dims += (di, )\n\t\t\t\telse:\n\t\t\t\t warnings.warn(\"Cannot find dimension %s in DataArray\" % di)\n\telif is_dict_like(n):\n\t\tnew_n = tuple(n.values())\n\t\tnew_dims = tuple(n.keys())\n\telif isinstance(n, int):\n\t\tif dims is None:\n\t\t\tnew_n = tuple([n for number in range(obj.ndim)])\n\t\t\tnew_dims = obj.dims\n\t\telif isinstance(dims, str):\n\t\t\tif dims in obj.dims:\n\t\t\t\tnew_n = (n, )\n\t\t\t\tnew_dims = (dims, )\n\t\t\telse:\n\t\t\t\twarnings.warn(\"Cannot find dimension %s in DataArray\" % dims)\n\t\telif isinstance(dims, Iterable):\n\t\t\tnew_n = tuple()\n\t\t\tnew_dims = tuple()\n\t\t\tfor di in dims:\n\t\t\t\tif di in obj.dims:\n\t\t\t\t\tnew_n += (n, )\n\t\t\t\t\tnew_dims += (di,)\n\t\t\t\telse:\n\t\t\t\t\twarnings.warn(\"Cannot find dimension %s in DataArray\" % di)\n\t\telse:\n\t\t\traise TypeError(\"This type of option is not supported for the \"\n\t\t\t \"second argument\")\n\telif is_iterable(n):\n\t\tif is_iterable(dims):\n\t\t\tif len(n) == len(dims):\n\t\t\t\tnew_n = tuple()\n\t\t\t\tnew_dims = tuple()\n\t\t\t\tfor i, di in zip(n, dims):\n\t\t\t\t\tif di in obj.dims:\n\t\t\t\t\t\tnew_n += (i,)\n\t\t\t\t\t\tnew_dims += (di,)\n\t\t\t\t\telse:\n\t\t\t\t\t\twarnings.warn(\"Cannot find dimension %s in \"\n\t\t\t\t\t\t \"DataArray\" % di)\n\t\t\telse:\n\t\t\t\traise ValueError(\"Dimensions must have the same length as the \"\n\t\t\t\t \"first argument\")\n\t\telse:\n\t\t\traise TypeError(\"Dimensions must be specificed with an Iterable\")\n\telse:\n\t\traise TypeError(\"This type of option is not supported for the first \"\n\t\t \"argument\")\n\treturn new_n, new_dims\n\n\ndef infer_arg(arg, dims, default_value=None):\n\tnew_arg = dict()\n\tif arg is None:\n\t\tif isinstance(dims, str):\n\t\t\tnew_arg[dims] = default_value\n\t\telse:\n\t\t\tnew_arg = {di: default_value for di in dims}\n\telif is_scalar(arg):\n\t\tif isinstance(dims, str):\n\t\t\tnew_arg[dims] = arg\n\t\telse:\n\t\t\tnew_arg = {di: arg for di in dims}\n\telif is_dict_like(arg):\n\t\tif isinstance(dims, str):\n\t\t\tnew_arg[dims] = arg[dims]\n\t\telse:\n\t\t\tfor di in dims:\n\t\t\t\ttry:\n\t\t\t\t\tnew_arg[di] = arg[di]\n\t\t\t\texcept (KeyError, IndexError):\n\t\t\t\t\tnew_arg[di] = default_value\n\telif isinstance(arg, Iterable) and not isinstance(arg, str):\n\t\tif isinstance(dims, str):\n\t\t\tif len(arg) == 1:\n\t\t\t\tnew_arg[dims] = arg[0]\n\t\t\telif not homogeneous_type(arg):\n\t\t\t\tnew_arg[dims] = arg\n\t\t\telse:\n\t\t\t\traise ValueError(\"The two arguments do not coincide\")\n\t\telse:\n\t\t\tif homogeneous_type(arg):\n\t\t\t\tfor i, di in enumerate(dims):\n\t\t\t\t\t# if not len(dims) == len(arg):\n\t\t\t\t\ttry:\n\t\t\t\t\t\tnew_arg[di] = arg[i]\n\t\t\t\t\texcept (KeyError, IndexError):\n\t\t\t\t\t\tnew_arg[di] = default_value\n\t\t\t\t\texcept TypeError:\n\t\t\t\t\t\tnew_arg[dims[di]] = arg\n\t\t\telse:\n\t\t\t\tfor i, di in enumerate(dims):\n\t\t\t\t\ttry:\n\t\t\t\t\t\tnew_arg[di] = arg\n\t\t\t\t\texcept TypeError:\n\t\t\t\t\t\tnew_arg[dims[di]] = arg\n\telse:\n\t\traise TypeError(\"This type of argument is not supported for the second \"\n\t\t \"argument\")\n\treturn new_arg\n\n\ndef get_dx(obj, dim, unit='s'):\n\t\"\"\"Get the resolution over one the dimension dim.\n\tWarns the user if the coordinate is not evenly spaced.\n\n\tParameters\n\t----------\n\tobj: `xarray.DataSet` or `xarray.DataArray`\n\t\tSelf-described data with coordinates corresponding to the dimensions\n\tdim:\n\t\tDimension along which compute the delta\n\tunit: {'D', 'h', 'm', 's', 'ms', 'us', 'ns'}, optional\n\t\tIf the fit the coordinates associated to the dimension is a\n\t\tnumpy.datetime object, the unit of the time delta may be specified here\n\n\tReturns\n\t-------\n\tdx: float\n\t\tThe resolution of the coordinates associated to the dimension\n\t\"\"\"\n\tx = np.asarray(obj[dim])\n\tif pd.core.dtypes.common.is_datetime64_dtype(x):\n\t\tdx = pd.Series(x[1:]) - pd.Series(x[:-1])\n\t\tdx /= np.timedelta64(1, unit)\n\telse:\n\t\tdx = np.diff(x)\n\t#TODO: Small issue this the function commented below\n\t#if not np.allclose(dx, dx[0]):\n\t#\twarnings.warn(\"Coordinate %s is not evenly spaced\" % dim)\n\treturn dx[0]\n"
] |
[
[
"pandas.Series",
"numpy.asarray",
"numpy.timedelta64",
"numpy.diff",
"pandas.core.dtypes.common.is_datetime64_dtype"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
NunoEdgarGFlowHub/cleverhans
|
[
"c8fa1510cf00039404956a1b63192f1b759fc625"
] |
[
"tutorials/mnist_tutorial_tf.py"
] |
[
"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport keras\nfrom keras import backend\n\nimport tensorflow as tf\nfrom tensorflow.python.platform import app\nfrom tensorflow.python.platform import flags\n\nfrom cleverhans.utils_mnist import data_mnist\nfrom cleverhans.utils_tf import model_train, model_eval, batch_eval\nfrom cleverhans.attacks import fgsm\nfrom cleverhans.utils import cnn_model\n\nFLAGS = flags.FLAGS\n\nflags.DEFINE_integer('nb_epochs', 6, 'Number of epochs to train model')\nflags.DEFINE_integer('batch_size', 128, 'Size of training batches')\nflags.DEFINE_float('learning_rate', 0.1, 'Learning rate for training')\n\n\ndef main(argv=None):\n \"\"\"\n MNIST cleverhans tutorial\n :return:\n \"\"\"\n\n # Set TF random seed to improve reproducibility\n tf.set_random_seed(1234)\n\n if not hasattr(backend, \"tf\"):\n raise RuntimeError(\"This tutorial requires keras to be configured\"\n \" to use the TensorFlow backend.\")\n\n # Image dimensions ordering should follow the Theano convention\n if keras.backend.image_dim_ordering() != 'tf':\n keras.backend.set_image_dim_ordering('tf')\n print(\"INFO: '~/.keras/keras.json' sets 'image_dim_ordering' to \"\n \"'th', temporarily setting to 'tf'\")\n\n # Create TF session and set as Keras backend session\n sess = tf.Session()\n keras.backend.set_session(sess)\n\n # Get MNIST test data\n X_train, Y_train, X_test, Y_test = data_mnist()\n\n assert Y_train.shape[1] == 10.\n label_smooth = .1\n Y_train = Y_train.clip(label_smooth / 9., 1. - label_smooth)\n\n # Define input TF placeholder\n x = tf.placeholder(tf.float32, shape=(None, 28, 28, 1))\n y = tf.placeholder(tf.float32, shape=(None, 10))\n\n # Define TF model graph\n model = cnn_model()\n predictions = model(x)\n print(\"Defined TensorFlow model graph.\")\n\n def evaluate():\n # Evaluate the accuracy of the MNIST model on legitimate test examples\n eval_params = {'batch_size': FLAGS.batch_size}\n accuracy = model_eval(sess, x, y, predictions, X_test, Y_test,\n args=eval_params)\n assert X_test.shape[0] == 10000, X_test.shape\n print('Test accuracy on legitimate test examples: ' + str(accuracy))\n\n # Train an MNIST model\n train_params = {\n 'nb_epochs': FLAGS.nb_epochs,\n 'batch_size': FLAGS.batch_size,\n 'learning_rate': FLAGS.learning_rate\n }\n model_train(sess, x, y, predictions, X_train, Y_train,\n evaluate=evaluate, args=train_params)\n\n # Craft adversarial examples using Fast Gradient Sign Method (FGSM)\n adv_x = fgsm(x, predictions, eps=0.3)\n eval_params = {'batch_size': FLAGS.batch_size}\n X_test_adv, = batch_eval(sess, [x], [adv_x], [X_test], args=eval_params)\n assert X_test_adv.shape[0] == 10000, X_test_adv.shape\n\n # Evaluate the accuracy of the MNIST model on adversarial examples\n accuracy = model_eval(sess, x, y, predictions, X_test_adv, Y_test,\n args=eval_params)\n print('Test accuracy on adversarial examples: ' + str(accuracy))\n\n print(\"Repeating the process, using adversarial training\")\n # Redefine TF model graph\n model_2 = cnn_model()\n predictions_2 = model_2(x)\n adv_x_2 = fgsm(x, predictions_2, eps=0.3)\n predictions_2_adv = model_2(adv_x_2)\n\n def evaluate_2():\n # Evaluate the accuracy of the adversarialy trained MNIST model on\n # legitimate test examples\n eval_params = {'batch_size': FLAGS.batch_size}\n accuracy = model_eval(sess, x, y, predictions_2, X_test, Y_test,\n args=eval_params)\n print('Test accuracy on legitimate test examples: ' + str(accuracy))\n\n # Evaluate the accuracy of the adversarially trained MNIST model on\n # adversarial examples\n accuracy_adv = model_eval(sess, x, y, predictions_2_adv, X_test,\n Y_test, args=eval_params)\n print('Test accuracy on adversarial examples: ' + str(accuracy_adv))\n\n # Perform adversarial training\n model_train(sess, x, y, predictions_2, X_train, Y_train,\n predictions_adv=predictions_2_adv, evaluate=evaluate_2,\n args=train_params)\n\n\nif __name__ == '__main__':\n app.run()\n"
] |
[
[
"tensorflow.python.platform.app.run",
"tensorflow.placeholder",
"tensorflow.python.platform.flags.DEFINE_float",
"tensorflow.python.platform.flags.DEFINE_integer",
"tensorflow.Session",
"tensorflow.set_random_seed"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"0.12",
"1.0",
"1.2",
"1.4"
]
}
] |
s-jun/OSS_Term_Project
|
[
"47747a92944f7f94f1393c9072f7ee9034de090a",
"47747a92944f7f94f1393c9072f7ee9034de090a"
] |
[
"predict_run.py",
"user_main.py"
] |
[
"import data_manager\nimport os\nimport pandas as pd\n\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\n\n\nif __name__ == '__main__':\n interval_list = ['day', 'hour12', 'hour6', 'hour']\n\n for interval in interval_list:\n print(interval)\n df = data_manager.get_price(interval)\n pred_file = f'./data/{interval}_predict.csv'\n\n\n time = str(df.index[-1])\n\n if os.path.exists(pred_file):\n predict = pd.read_csv(pred_file)\n if predict.time.iloc[-1] == time:\n print('already predicted')\n else:\n print('predict yet')\n data_manager.update_predict(interval)\n else:\n print('predict yet')\n data_manager.update_predict(interval)\n",
"import sys\nfrom PyQt5.QtWidgets import *\nfrom PyQt5 import uic\nimport matplotlib.pyplot as plt\nfrom matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas\nimport chart\nfrom data_manager import read_predict\n\nform_class = uic.loadUiType(\"user.ui\")[0]\n\nclass WindowClass(QMainWindow, form_class):\n def __init__(self):\n super().__init__()\n self.setupUi(self)\n self.initUI()\n self.comboBox.activated[str].connect(self.clicked)\n\n def initUI(self):\n self.fig = plt.Figure()\n combo = self.comboBox.currentText()\n self.canvas = FigureCanvas(self.fig)\n self.chart.addWidget(self.canvas)\n chart.draw_chart(self, combo)\n self.canvas.draw()\n self.prediction.append(read_predict(combo))\n def clicked(self, text):\n self.clear()\n self.prediction.append(read_predict(text))\n self.fig = plt.Figure()\n self.canvas = FigureCanvas(self.fig)\n self.chart.addWidget(self.canvas)\n chart.draw_chart(self, text)\n self.canvas.draw()\n\n def clear(self):\n self.prediction.clear()\n\n tmp = self.chart\n if tmp is not None:\n while tmp.count():\n item = tmp.takeAt(0)\n\n widget = item.widget()\n\n if widget is not None:\n widget.deleteLater()\n else:\n self.clearvbox(item.layout())\n\nif __name__== \"__main__\" :\n app = QApplication(sys.argv)\n\n myWindow = WindowClass()\n\n myWindow.show()\n\n app.exec_()"
] |
[
[
"pandas.read_csv"
],
[
"matplotlib.backends.backend_qt5agg.FigureCanvasQTAgg",
"matplotlib.pyplot.Figure"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
zabaras/bayesmultiscale
|
[
"c2f7d36e8ff08a28e5da0809029143a9dd0e2777",
"c2f7d36e8ff08a28e5da0809029143a9dd0e2777",
"c2f7d36e8ff08a28e5da0809029143a9dd0e2777"
] |
[
"HM-DenseED/plot/velocity_src/utils.py",
"Bayesian-HM-DenseED/models/model_train.py",
"HM-DenseED/utils/mcs_data_upload.py"
] |
[
"'''\nReference: https://github.com/adsodemelk/PRST\n'''\n# -*- coding: utf-8 -*-\nfrom __future__ import print_function, division\nimport copy\n\n__all__ = [\"rldecode\", \"rlencode\", \"units\", \"mcolon\", \"recursive_diff\", \"gridtools\"]\n\nimport plot.velocity_src.gridtools\n\nimport numpy as np\nimport scipy.sparse\nfrom scipy.sparse import csr_matrix\nimport scipy.sparse as sps\n\nclass Struct(dict):\n \"\"\"\n MATLAB-struct-like object.\n\n Source: http://stackoverflow.com/questions/35988/\n\n \"\"\"\n def __init__(self, **kwargs):\n super(Struct, self).__init__(**kwargs)\n self.__dict__ = self\n\n\ndef rlencode(A, axis=0):\n \"\"\"\n Compute run length encoding of array A along axis.\n\n Synopsis:\n A, n = rlencode(A)\n A, n = rlencode(A, axis)\n\n Arguments:\n A (np.ndarray): Array to be encoded.\n axis (Optional[int]): Axis of A where run length encoding is done.\n Default value: axis=0\n\n Example (default axis):\n >>> A = np.array([\n ... [1, 2, 3, 4],\n ... [1, 2, 3, 4],\n ... [3, 4, 5, 6],\n ... [3, 3, 3, 3],\n ... [3, 3, 4, 5],\n ... [3, 3, 4, 5]])\n >>> A, n = rlencode(A, 0)\n >>> print(A)\n [[1 2 3 4]\n [3 4 5 6]\n [3 3 3 3]\n [3 3 4 5]]\n >>> print(n)\n [2 1 1 2]\n\n Example (j-axis):\n >>> A = np.array([\n ... [1,1,3,3,3,3],\n ... [2,2,4,3,3,3],\n ... [3,3,5,3,4,4],\n ... [4,4,6,3,5,5]])\n >>> A, n = rlencode(A, 1)\n >>> print(A)\n [[1 3 3 3]\n [2 4 3 3]\n [3 5 3 4]\n [4 6 3 5]]\n >>> print(n)\n [2 1 1 2]\n \"\"\"\n # Let the relevant axis be the first axis\n B = np.swapaxes(A, 0, axis)\n\n # Flatten axes that are normal to the encoding axis\n B = B.reshape([B.shape[0],-1])\n\n # Pick indices where the next index is different\n i = np.append(np.where(np.any(B[:-1] != B[1:], axis=1)), B.shape[0]-1)\n\n # Find the number of repetitions\n n = np.diff(np.insert(i, 0, -1))\n\n # Pick necessary slices of the encoding axis\n return A.take(i, axis=axis), n\n\n\ndef rldecode(A, n, axis=0):\n \"\"\"\n Decompresses run length encoding of array A along axis.\n\n Synopsis:\n B = rldecode(A, n, axis)\n B = rldecode(A, n) # axis assumed to be 0\n\n Arguments:\n A (np.ndarray): Encoded array\n n (np.ndarray): Repetition of each layer along an axis.\n axis (Optional[int]): Axis of A where run length decoding is done.\n\n Returns:\n Uncompressed matrix\n\n Example (1D-array) along default axis:\n >>> A = np.array([1,4,5])\n >>> n = np.array([4,2,1])\n >>> print(rldecode(A, n))\n [1 1 1 1 4 4 5]\n\n Example (2D-array) along j-axis:\n >>> A = np.array([\n ... [1,3,3,3],\n ... [2,4,3,3],\n ... [3,5,3,4],\n ... [4,6,3,5]])\n >>> n = np.array([2,1,1,2])\n >>> print(rldecode(A, n, axis=1))\n [[1 1 3 3 3 3]\n [2 2 4 3 3 3]\n [3 3 5 3 4 4]\n [4 4 6 3 5 5]]\n \"\"\"\n assert n.size > 0, \"Length array was empty.\"\n # repeat functions take 1d array\n if n.ndim != 1:\n assert n.ndim <= 2\n assert n.shape[0] == 1 or n.shape[1] == 1\n n = n.ravel()\n return A.repeat(n, axis=axis)\n\ndef mcolon(lo, hi, s=None):\n \"\"\"\n Compute concatenated ranges.\n\n Synopsis:\n mcolon(lo, hi)\n mcolon(lo, hi, stride)\n\n Arguments:\n lo (ndarray):\n 1d array of lower bounds\n hi (ndarray):\n 1d array of upper bounds\n s (Optional[ndarray]):\n 1d array of strides. Default = np.ones(lo.shape) (unit strides).\n\n Returns:\n np.r_[lo[0]:hi[0], ..., lo[-1]:hi[-1]]\n np.r_[lo[0]:hi[0]:s[0], ..., lo[-1]:hi[-1]:s[-1]]\n (The NumPy r_ index trick builds a concatenated array of ranges.)\n\n Example:\n >>> lo = np.array([0,0,0,0])\n >>> hi = np.array([2,3,4,5])\n >>> ind = mcolon(lo, hi)\n >>> np.array_equal(ind, np.array([0,1,0,1,2,0,1,2,3,0,1,2,3,4]))\n True\n \"\"\"\n if s is None:\n ranges = [range(l,h) for (l,h) in zip(lo,hi)]\n else:\n ranges = [range(l,h,st) for (l,h,st) in zip(lo,hi,s)]\n return np.concatenate(ranges)\n\ndef recursive_diff(A, B, indent=0):\n \"\"\"\n Shows which attributes differ between two objects. Recursive.\n\n Synopsis:\n recursive_diff(A, B)\n\n Example:\n >> from prst.gridprocessing import cartGrid\n >> G, V = cartGrid([3,3,3]), cartGrid([3,3,4])\n >> recursive_diff(G, V)\n ====== Recursive comparison ======\n gridType\n Equal, (list,list)\n cells\n facePos\n NOT EQUAL, (ndarray,ndarray)\n num\n NOT EQUAL, (int,int)\n indexMap\n NOT EQUAL, (ndarray,ndarray)\n ...\n\n \"\"\"\n def pprint(*args, **kwargs):\n print(\" \"*indent, *args, **kwargs)\n\n if indent == 0:\n print()\n print(\"====== Recursive comparison ======\")\n\n # For classes, try to get dict attribute\n try:\n A = A.__dict__\n except:\n pass\n try:\n B = B.__dict__\n except:\n pass\n if isinstance(A, dict) and isinstance(B, dict):\n # Descend into attributes which exist in both and are dicts. Print them first.\n pass\n inA = set(A.keys())\n inB = set(B.keys())\n notInA = inB - inA\n notInB = inA - inB\n inBoth = inA & inB\n # Print attributes only in A\n if notInA:\n pprint(\"A MISSING ATTRIBUTES:\", notInA)\n # Print attributes only in B\n if notInB:\n pprint(\"B MISSING ATTRIBUTES:\", notInB)\n # Recursively do the same with common attributes\n for attr in inBoth:\n pprint(attr)\n recursive_diff(A[attr], B[attr], indent+2)\n\n else:\n # Compare A, B for equality\n equal = False\n try:\n equal = None\n close = None\n if isinstance(A, np.ndarray) and isinstance(B, np.ndarray):\n equal = np.array_equal(A, B)\n close = np.allclose(A, B)\n else:\n equal = A == B\n if equal:\n pprint(\"Equal, \", end=\"\")\n else:\n pprint(\"NOT EQUAL, \", end=\"\")\n if close:\n print(\"(BUT APPROXIMATELY EQUAL)\", end=\"\")\n except:\n pprint(\"NOT COMPARABLE, \", end=\"\")\n\n print(\"(\"+A.__class__.__name__+\",\"+B.__class__.__name__+\")\")\n\n\nclass ADI(object):\n \"\"\"ADI: Automatic DIfferentiation\n\n Simple implementation of automatic differentiation for easy construction\n of Jacobian matrices.\n\n Synopsis:\n x = ADI(value, jacobian)\n\n Arguments:\n value(np.ndarray):\n The numerical value of the object. Must be a NumPy column array.\n Not compatible with matrices (neither np.matrix nor\n scipy.sparse.spmatrix).\n\n jacobian(list[scipy.sparse.csr_matrix]):\n The Jacobian of the object. Split into parts to improve\n performance.\n\n Comment:\n This class is typically instantiated for a set of variables using\n initVariablesADI, not by itself.\n\n Many methods found in `np.ndarray` are also implemented by ADI. Example:\n\n x, = initVariablesADI(np.array([[2, 3, 4]]).T)\n y = x.log()\n z = x.sum()\n\n Using \"np.\" methods is not supported yet, e.g., `np.dot(A, x)` where x\n is an ADI object will not work as expected, and is not recommended. A\n compatability layer, `prst.utils.npad` is provided. `npad.dot(A, x)`\n will work correctly for any number of AD arguments, and uses `np.dot(A,\n x)` if neither arguments are AD objects. Future versions of NumPy\n (>0.12) will most likely deprecate `npad` with the __numpy_ufunc__\n functionality.\n\n See also:\n initVariablesADI\n \"\"\"\n # Requires __numpy_ufunc__ for syntactical sugar. Hopefully will be added to NumPy 1.12...\n # https://github.com/numpy/numpy/issues/7519\n\n __array_priority__ = 10000\n ndim = 2\n\n def __init__(self, val, jac):\n self.val = val\n self.jac = jac\n if not isinstance(self.jac, list):\n self.jac = [self.jac,]\n\n def __repr__(self):\n jacstring = str([block.shape for block in self.jac])\n return \"(val: {0}.T, jac block sizes: {1})\".format(self.val.T, jacstring)\n\n def pprint(self, name=None):\n \"\"\"\n Pretty-print full matrices with limited decimals.\n\n Example:\n\n import numpy as np\n from prst.utils import initVariablesADI\n\n x0 = np.array([[1,2,3,2,3]]).T\n x, = initVariablesADI(x0)\n y = x**2\n y.pprint()\n\n Output:\n\n ADI properties\n val: [[1 4 9 4 9]].T\n\n jac[0] [[ 2. 0. 0. 0. 0.]\n [ 0. 4. 0. 0. 0.]\n [ 0. 0. 6. 0. 0.]\n [ 0. 0. 0. 4. 0.]\n [ 0. 0. 0. 0. 6.]]\n \"\"\"\n namestr = \"\"\n if name:\n namestr = name + \" \"\n lines = [\n namestr + \"ADI properties\",\n \"\\tval: \" + str(self.val.T) + \".T\",\n ]\n for i, j in enumerate(self.jac):\n lines.append(\"\\n\\tjac[\" + str(i) + \"]\" + \"\\t\" + str(j.toarray()).replace(\"\\n\", \"\\n\\t\\t\"))\n lines.append(\"\")\n print(\"\\n\".join(lines))\n\n def copy(self):\n return copy.deepcopy(self)\n\n #def __len__(self):\n #raise NotImplementedError(\"Use shape[0]. See http://stackoverflow.com/questions/37529715/\")\n\n @property\n def shape(self):\n return self.val.shape\n\n def __ge__(u, v):\n try:\n return u.val >= v.val\n except AttributeError:\n return u.val >= v\n\n def __gt__(u, v):\n try:\n return u.val > v.val\n except AttributeError:\n return u.val > v\n\n def __le__(u, v):\n try:\n return u.val <= v.val\n except AttributeError:\n return u.val <= v\n\n def __lt__(u, v):\n try:\n return u.val < v.val\n except AttributeError:\n return u.val < v\n\n def __pos__(u): # +u\n return u.copy()\n\n def __neg__(u): # -u\n return ADI(-u.val, [-j for j in u.jac])\n\n def __add__(u, v): # u + v\n if isinstance(v, ADI):\n if u.val.shape[0] == v.val.shape[0]:\n return ADI(u.val + v.val, [ju+jv for (ju,jv) in zip(u.jac, v.jac)])\n if v.val.shape[0] == 1:\n # Tile v.jac to same length as u.jac since sparse matrices\n # don't broadcast properly.\n # https://github.com/scipy/scipy/issues/2128\n vjac = [sps.bmat([[j]]*len(u.val)) for j in v.jac]\n retjac = [ju+jv for (ju,jv) in zip(u.jac, vjac)]\n return ADI(u.val+v.val, retjac)\n if u.val.shape[0] == 1:\n # Vice versa, this time tile u instead\n ujac = [sps.bmat([[j]]*len(v.val)) for j in u.jac]\n retjac = [ju+jv for (ju,jv) in zip(ujac, v.jac)]\n return ADI(u.val+v.val, retjac)\n raise ValueError(\"Dimension mismatch\")\n # v isn't AD object\n v = np.atleast_2d(v)\n return ADI(u.val + v, copy.deepcopy(u.jac))\n\n def __radd__(v, u): # u + v\n return v.__add__(u)\n\n def __sub__(u, v):\n return u.__add__(-v)\n\n def __rsub__(v, u): # u - v\n return (-v).__add__(u)\n\n # mul\n def __mul__(u, v):\n \"\"\"Hadamard product u*v.\"\"\"\n if isinstance(v, ADI):\n if len(u.val) == len(v.val):\n # Note: scipy.sparse.diags has changed parameters between\n # versions 0.16x and 0.17x. This code is only tested on 0.16x.\n # TODO test code in SciPy 0.17x\n uJv = [sps.diags([u.val.flat],[0])*jv for jv in v.jac] # MATRIX multiplication\n vJu = [sps.diags([v.val.flat],[0])*ju for ju in u.jac] # MATRIX multiplication\n jac = [a+b for (a,b) in zip(uJv, vJu)]\n return ADI(u.val*v.val, jac)\n if len(v.val) == 1:\n # Fix dimensions and recurse\n vval = np.tile(v.val, (u.val.shape[0],1) )\n vjac = [sps.bmat([[j]]*len(u.val)) for j in v.jac]\n return u.__mul__(ADI(vval, vjac))\n if len(u.val) == 1:\n # Fix dimensions and recurse\n uval = np.tile(u.val, (v.val.shape[0],1) )\n ujac = [sps.bmat([[j]]*len(v.val)) for j in u.jac]\n return ADI(uval, ujac).__mul__(v)\n raise ValueError(\"Dimension mismatch\")\n else:\n v = np.atleast_2d(v)\n if len(u.val) == 1:\n val = u.val * v\n jac = [sps.diags(v.flat,0)*sps.bmat([[j]]*len(v)) for j in u.jac]\n return ADI(val, jac)\n if len(v) == 1:\n return ADI(u.val*v, [v.flat[0]*ju for ju in u.jac])\n if len(u.val) == len(v):\n vJu = [sps.diags(v.flat, 0)*ju for ju in u.jac] # MATRIX multiplication\n return ADI(u.val*v, vJu)\n raise ValueError(\"Dimension mismatch\")\n\n def __rmul__(v, u):\n # u * v = v * u\n return v.__mul__(u)\n\n def dot(u, A): # u x A\n return _dot(u, A)\n\n def __pow__(u, v):\n return u._pow(u, v)\n\n # This method is static so that it can be called with non-ADI u\n # E.g. when calculating 2**u, where u is ADI.\n @staticmethod\n def _pow(u, v):\n \"\"\"Elementwise power, u**v.\"\"\"\n if not isinstance(v, ADI): # u is AD, v is a scalar or vector\n v = np.atleast_2d(v)\n tmp = v*u.val**(v-1)\n uvJac = [_spdiag(tmp)*ju for ju in u.jac]\n return ADI(u.val**v, uvJac)\n elif not isinstance(u, ADI): # u is a scalar, v is AD\n u = np.atleast_2d(u)\n tmp = u**v.val*np.log(u)\n uvJac = [sps.diags(tmp.flat, 0)*jv for jv in v.jac]\n return ADI(u**v.val, uvJac)\n else: # u and v are ADI objects of same length\n if len(u.val) != len(v.val):\n raise ValueError(\"Must be same length\")\n # d(u^v)/dx = diag(u^v o (v / u))*\n # + diag(u^v o log(u))*J\n tmp1 = u.val**v.val * v.val/u.val\n tmp2 = u.val**v.val * np.log(u.val)\n uvJacPart1 = [sps.diags(tmp1.flat, 0)*ju for ju in u.jac]\n uvJacPart2 = [sps.diags(tmp2.flat, 0)*jv for jv in v.jac]\n uvJac = [a+b for (a,b) in zip(uvJacPart1, uvJacPart2)]\n return ADI(u.val**v.val, uvJac)\n\n def __rpow__(v, u):\n \"\"\"u**v where u is not ADI.\"\"\"\n return v._pow(u, v)\n\n def __div__(u, v):\n raise DeprecationWarning(\"Add 'from __future__ import division'.\")\n\n def __truediv__(u, v):\n return u * v**(-1.0)\n\n def __rdiv__(v, u):\n raise DeprecationWarning(\"Add 'from __future__ import division'.\")\n\n def __rtruediv__(v, u):\n return u * v**(-1.0)\n\n def __getitem__(u, s):\n \"\"\"\n Slices the column array using NumPy syntax.\n\n Examples: (x is ADI object)\n\n x[(2,1),:]\n x[1]\n x[1,:]\n x[np.array([True,True,False])]\n x[np.array([False,False,False]),:]\n x[np.array([2,1,0]),:]\n x[np.array([2]),:]\n x[::-1]\n \"\"\"\n val = np.atleast_2d(u.val[s])\n if val.shape[0] != 1 and val.shape[1] != 1:\n raise ValueError(\"Slice type not supported\")\n if val.shape[1] != 1:\n val = val.T\n try:\n s = s[0]\n except TypeError:\n pass\n jac = [j[s,:] for j in u.jac]\n return ADI(val, jac)\n\n def __setitem__(u, s, v):\n \"\"\"\n Sets values in ADI vector.\n\n If the right side is non-ADI, the corresponding Jacobian rows are set to zero.\n If the right side is ADI, the corresponding Jacobian rows are overwritten.\n \"\"\"\n if isinstance(v, ADI):\n # This part is not so pretty, and could probably\n # be improved.\n if u.val[s].ndim <= 1:\n u.val[s] = v.val.ravel()\n elif u.val[s].ndim == 2:\n u.val[s] = v.val\n else:\n raise ValueError(\"This should never happen.\")\n try:\n s = s[0]\n except TypeError:\n pass\n for i in range(len(u.jac)):\n u.jac[i][s] = v.jac[i]\n else:\n u.val[s] = v\n try:\n s = s[0]\n except TypeError:\n pass\n for i in range(len(u.jac)):\n u.jac[i][s] = 0\n\n def max(u):\n \"\"\"Return the maximum element in the array.\"\"\"\n i = np.argmax(u.val)\n return ADI(np.atleast_2d(u.val[i,:]), [j[i,:] for j in u.jac])\n\n def min(u):\n \"\"\"Return the minimum element in the array.\"\"\"\n i = np.argmin(u.val)\n return ADI(np.atleast_2d(u.val[i,:]), [j[i,:] for j in u.jac])\n\n def sum(u):\n \"\"\"Return the sum of the array elements.\"\"\"\n val = u.val.sum(keepdims=True)\n jac = [sps.csr_matrix(j.sum(axis=0)) for j in u.jac]\n return ADI(val, jac)\n\n def sin(u):\n \"\"\"Return element-wise sine of array.\"\"\"\n val = np.sin(u.val)\n cosval = np.cos(u.val)\n jac = [sps.diags(cosval.flat, 0)*j for j in u.jac]\n return ADI(val, jac)\n\n def cos(u):\n \"\"\"Return element-wise cosine of array.\"\"\"\n val = np.cos(u.val)\n msinval = -np.sin(u.val)\n jac = [sps.diags(msinval.flat, 0)*j for j in u.jac]\n return ADI(val, jac)\n\n def exp(u):\n val = np.exp(u.val)\n jac = [sps.diags(val.flat, 0)*j for j in u.jac]\n return ADI(val, jac)\n\n def log(u):\n val = np.log(u.val)\n m = sps.diags((1/u.val).flat, 0)\n jac = [m*j for j in u.jac]\n return ADI(val, jac)\n\n def sign(u):\n return np.sign(u.val)\n\n def abs(u):\n val = np.abs(u.val)\n sgn = np.sign(u.val)\n jac = [sps.diags(sgn.flat, 0)*j for j in u.jac]\n return ADI(val, jac)\n\n def __numpy_ufunc__(self, func, method, pos, inputs, **kwargs):\n \"\"\"Placeholder method for future NumPy versions.\"\"\"\n raise NotImplementedError(\"NumPy has finally added __numpy_ufunc__ support, but \"\n \"PRST has not added support yet.\")\n\n# NumPy binary ufunc wrappers\ndef _dot(u, v):\n \"\"\"Matrix multiplication.\"\"\"\n if isinstance(u, ADI) and isinstance(v, ADI):\n # u_ad, v_ad\n assert u.val.shape[0] == v.val.shape[0] == 1, \"dot(ad,ad) only valid for 1x1 arguments\"\n return u * v\n elif isinstance(u, ADI) and not isinstance(v, ADI):\n # u_ad, v\n v = np.atleast_2d(v)\n assert v.shape[0] == 1, \"dot(ad,vec) only valid for 1x1 vec.\"\n return u*v\n elif not isinstance(u, ADI) and isinstance(v, ADI):\n # u, v_ad\n if not hasattr(u, \"dot\"):\n u = np.atleast_2d(u)\n u_sp = sps.csr_matrix(u)\n return ADI(u.dot(v.val), [u_sp*j for j in v.jac])\n else:\n # u, v\n if hasattr(u, \"dot\"):\n return u.dot(v)\n return np.dot(u, v)\n\ndef _tile(A, reps):\n if isinstance(A, ADI):\n if len(reps) != 2 or reps[1] != 1:\n raise TypeError(\"AD vectors can only be tiled vertically.\")\n val = np.tile(A.val, reps)\n jac = [sps.bmat([[j]]*reps[0]) for j in A.jac]\n return ADI(val, jac)\n else:\n return np.tile(A, reps)\n\n# Numpy unary ufunc wrappers\n# The unary wrappers are all following the same formula, and can possibly be\n# removed entirely by making `npad` more magic with __getattr__.\ndef _sign(u):\n if isinstance(u, ADI):\n return u.sign()\n else:\n return np.sign(u)\n\ndef _abs(u):\n \"\"\"np.abs for AD array.\"\"\"\n if isinstance(u, ADI):\n return u.abs()\n else:\n return np.abs(u)\n\ndef _exp(u):\n \"\"\"np.exp for AD array.\"\"\"\n if isinstance(u, ADI):\n return u.exp()\n else:\n return np.abs(u)\n\n# NumPy n-ary functions\n\ndef _vstack(tup):\n \"\"\"np.vstack for AD array.\"\"\"\n vals = np.vstack((u.val for u in tup))\n jacs = []\n num_jacs = len(tup[0].jac)\n for j in range(num_jacs):\n jacs.append(sps.bmat([[u.jac[j]] for u in tup]))\n return ADI(vals, jacs)\n\ndef _concatenate(tup, axis):\n \"\"\"np.concatenate for AD array.\"\"\"\n if axis != 0:\n raise TypeError(\"ADI objects can only be concatenated vertically.\")\n return _vstack(tup)\n\n# Register ufunc wrappers so they can be easily imported.\nnpad = Struct()\n# n-ary\nnpad.vstack = _vstack\nnpad.concatenate = _concatenate\n# binary\nnpad.dot = _dot\nnpad.tile = _tile\n# unary\nnpad.sign = _sign\nnpad.abs = _abs\n\ndef initVariablesADI(*variables):\n \"\"\"\n Returns AD (automatic differentiation) variables.\n\n See `help(prst.utils.ADI)` for documentation.\n \"\"\"\n # Convert all inputs to column arrays\n vals = list(variables)\n for i in range(len(vals)):\n vals[i] = np.atleast_2d(vals[i])\n if vals[i].shape[1] == 0:\n vals[i] = vals[i].reshape(-1,1)\n elif vals[i].shape[1] != 1:\n raise ValueError(\"AD variables must be column vectors.\")\n\n numvals = np.array([len(val) for val in vals])\n n = len(vals)\n\n ret = [None]*n\n for i in range(n):\n nrows = numvals[i]\n # Set Jacobians wrt other variables to zero-matrices\n jac = [None]*n\n for j in np.r_[0:i, (i+1):n]:\n ncols = numvals[j]\n jac[j] = scipy.sparse.csr_matrix((nrows, ncols))\n\n # Set Jacobian of current variable wrt itself to the identity matrix.\n jac[i] = scipy.sparse.identity(nrows, format=\"csr\")\n\n ret[i] = ADI(vals[i], jac)\n return ret\n\ndef _spdiag(val_column):\n \"\"\"Improved version of scipy.sparse.diags.\"\"\"\n if val_column.shape[0] == 0:\n return sps.csr_matrix((1,0))\n return sps.diags(val_column.flat, 0, format=\"csr\")\n",
"\"\"\"\nStein Variational Gradient Descent forHM-DenseED.\nReference: https://github.com/zabaras/cnn-surrogate/tree/master/models\n\"\"\" \n\nimport torch\nimport numpy as np\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.nn.parameter import Parameter\nfrom torch.distributions import Gamma\nfrom torch.optim.lr_scheduler import ReduceLROnPlateau\nfrom utils.misc import log_sum_exp, parameters_to_vector, vector_to_parameters\nimport matplotlib.pyplot as plt\nimport matplotlib.ticker as ticker\nplt.switch_backend('agg')\nfrom time import time\nimport copy\nimport sys\nimport os\nimport gc\nimport math\nimport scipy.io as io\nfrom args import args, device\nfrom models.bdsmm import bdsmm\nfrom torch.autograd import Variable\nn_samples = args.n_samples\nlr = args.lr\nlr_noise = args.lr_noise\nntrain = args.ntrain\ndir = './models'\n\nclass Bayesian_model_train(object):\n \"\"\"Base class for Stein Variational Gradient Descent, with for-loops...\n The Bayesian neural network is defined in `Bayesian_model_NN.BayesNN` class. \n\n References:\n Liu, Qiang, and Dilin Wang. \"Stein variational gradient descent:\n A general purpose bayesian inference algorithm.\"\n Advances In Neural Information Processing Systems. 2016.\n\n Args:\n model (nn.Module): The model to be instantiated `n_samples` times\n data_loader (utils.data.DataLoader): For training and testing\n n_samples (int): Number of samples for uncertain parameters\n \"\"\"\n\n def __init__(self, Bayesian_model_NN, train_loader):\n \"\"\"\n For-loop implementation of SVGD.\n\n Args:\n Bayesian_model_NN (nn.Module): Bayesian NN\n train_loader (utils.data.DataLoader): Training data loader\n logger (dict)\n\n \"\"\"\n self.Bayesian_model_NN = Bayesian_model_NN\n self.train_loader = train_loader\n self.n_samples = n_samples\n self.optimizers, self.schedulers = self._optimizers_schedulers(\n lr, lr_noise)\n\n def train(self, epoch):\n print('epoch..............................................',epoch)\n\n self.Bayesian_model_NN.train()\n mse2 = 0.\n mse_train = 0.\n for batch_idx, (input,basis_patch,A_matrix, B_matrix,target_P, q_matrix) in enumerate(self.train_loader):\n input_rr12,output_basis12,A1_transformed12,B1_transformed12, target_pressure12, q1_transformed12 = input.to(device),basis_patch.to(device),A_matrix.to(device),B_matrix.to(device), target_P.to(device), q_matrix.to(device)\n input_rr,output_basis,A1_transformed1,B1_transformed, target_pressure, q1_transformed = input_rr12.float(),output_basis12.float(),A1_transformed12.float(),B1_transformed12.float(), target_pressure12.float(), q1_transformed12.float()\n #================================================================================\n output_basis = output_basis.view(144,1,15,15)\n input_rr = input_rr.view(144,1,15,15)\n A_app = []\n for i in range(1):\n A_torch = A1_transformed1[i,:,:]\n A_torch1 = A_torch[:,0:2]\n A_torch2 = A_torch[:,2]\n A_torch1 = A_torch1.type(torch.LongTensor).to(device)\n A_torch_final = torch.sparse.FloatTensor(A_torch1.t(), A_torch2, torch.Size([16384,16384]))\n A_app.append(A_torch_final)\n A1_transformed = torch.stack(A_app,dim=0)\n #================================================\n C = io.loadmat(dir+'/matlab_index_save_1.mat')\n C = C['basis_save']\n C = np.squeeze(C)\n X = np.empty((C.shape[0], C[0].shape[0], C[0].shape[1]))\n for i in range(X.shape[0]):\n X[i] = C[i]\n # -1 because of matlab and python\n X1 = X.reshape(144,225)-1\n\n X2 = np.zeros((144,225))\n for i in range(144):\n var2 = np.zeros((15,15))\n ele = X1[i,0]\n for varu in range(15):\n var1 = ele+128*(varu)\n for vm in range(15):\n var2[varu,vm] = var1+vm\n var3 = var2.reshape(1,225)\n X2[i,:] = var3 \n X2 = torch.Tensor(X2)\n target_BB = output_basis\n output_pr = []\n # all gradients of log joint probability: (S, P)\n grad_log_joint = []\n # all model parameters (particles): (S, P)\n theta = []\n # store the joint probabilities\n log_joint = 0.\n for i in range(self.n_samples):\n B1_transformed = Variable(B1_transformed,requires_grad=False)\n self.Bayesian_model_NN[i].zero_grad()\n\n X2_i = X2\n q1_transformed_i = q1_transformed\n B1_transformed_i = B1_transformed\n output_i = self.Bayesian_model_NN[i].forward(input_rr) \n output_i = output_i.view(1,144,225)\n target_BB = target_BB.view(1,144,225)\n loss1 = F.mse_loss(output_i, target_BB, size_average=False) \n\n #============================================================================\n for RRRR in range (1):\n output_RR_i = output_i[RRRR,:,:] \n output_RR_i = output_RR_i.reshape(144,225)\n B1_p_out_i = B1_transformed[RRRR,:,:]\n B1_p_out_i = B1_p_out_i.reshape(16384, 256)\n ss_i = 0\n for ii in range(12):\n jjj = 35+16*ii\n for k in range (jjj,jjj+12):\n ss_i =ss_i+1\n s_i = ss_i-1\n basis_temp_i = X2_i[s_i,:]\n tem_i = B1_p_out_i[:,k-1]\n basis_temp_i = basis_temp_i.type(torch.LongTensor)\n tem_i = tem_i.type(torch.cuda.FloatTensor)\n temp_variable_i = output_RR_i[143-s_i,:]/torch.max(output_RR_i[143-s_i,:])\n tem_i[basis_temp_i] = temp_variable_i\n B1_p_out_i[:,k-1] = tem_i \n B1_transformed[RRRR,:,:] = B1_p_out_i\n #====================================================\n temp11 = []\n for kkk in range(1):\n B1_transformed_temp = B1_transformed[kkk,:,:]\n B1_transformed1 = torch.transpose(B1_transformed_temp,0,1)\n dim = torch.sum(B1_transformed1,dim=0)\n B1_transformed2 = torch.div(B1_transformed1,dim)\n B1_transformed22 = torch.transpose(B1_transformed2,0,1)\n temp11.append(B1_transformed22)\n B1_transformed = temp11\n B1_transformed = torch.stack(temp11,dim=0)\n #============================================ \n\n R1_transformed = torch.transpose(B1_transformed,1,2) #check here\n A1_transformed = torch.transpose(A1_transformed,1,2)\n R1_transformed = torch.transpose(R1_transformed,1,2)\n A_c_transformed = torch.matmul(torch.transpose(bdsmm(A1_transformed,R1_transformed),1,2),B1_transformed)\n R1_transformed = torch.transpose(R1_transformed,1,2) #transform back to the old R1_transformed\n temp1_transformed = torch.matmul(R1_transformed,q1_transformed)\n temp2_transformed,LU = torch.solve(temp1_transformed,A_c_transformed)\n temp3_transformed = torch.matmul(B1_transformed,temp2_transformed)\n predict_pressure = temp3_transformed\n target_pressure = target_pressure.view(1,1,128,128)\n predict_pressure = predict_pressure.view(1,1,128,128)\n loss2 = F.mse_loss(predict_pressure,target_pressure, size_average=False)\n predict_pressure12 = predict_pressure.cpu().detach()\n output_pr.append(predict_pressure12)\n #==========\n predict_pressure_i = predict_pressure\n log_joint_i = self.Bayesian_model_NN._log_joint(i, predict_pressure_i, target_pressure, ntrain)\n\n # backward to compute gradients of log joint probabilities\n log_joint_i.backward()\n # monitoring purpose\n log_joint += log_joint_i.item()\n # for name, param in self.Bayesian_model_NN[i].named_parameters():\n # if param.requires_grad:\n # print (name)\n # backward frees memory for computation graph\n # computation below does not build computation graph\n # extract parameters and their gradients out from models\n vec_param, vec_grad_log_joint = parameters_to_vector(\n self.Bayesian_model_NN[i].parameters(), both=True)\n grad_log_joint.append(vec_grad_log_joint.unsqueeze(0))\n theta.append(vec_param.unsqueeze(0))\n output_pr = torch.stack(output_pr,dim=0)\n # calculating the kernel matrix and its gradients\n theta = torch.cat(theta)\n Kxx, dxKxx = self._Kxx_dxKxx(theta)\n grad_log_joint = torch.cat(grad_log_joint)\n # this line needs S x P memory\n grad_logp = torch.mm(Kxx, grad_log_joint)\n # negate grads here!!!\n grad_theta = - (grad_logp + dxKxx) / self.n_samples\n # explicitly deleting variables does not release memory :(\n \n # update param gradients\n for i in range(self.n_samples):\n vector_to_parameters(grad_theta[i],\n self.Bayesian_model_NN[i].parameters(), grad=True)\n self.optimizers[i].step()\n # WEAK: no loss function to suggest when to stop or\n # approximation performance\n output_tr = target_pressure.cpu().detach()\n mse2 += F.mse_loss(output_pr.mean(0), output_tr).item()\n rmse_train2 = np.sqrt(mse2 / len(self.train_loader))\n return rmse_train2\n\n def _squared_dist(self, X):\n \"\"\"Computes squared distance between each row of `X`, ||X_i - X_j||^2\n\n Args:\n X (Tensor): (S, P) where S is number of samples, P is the dim of \n one sample\n\n Returns:\n (Tensor) (S, S)\n \"\"\"\n XXT = torch.mm(X, X.t())\n XTX = XXT.diag()\n return -2.0 * XXT + XTX + XTX.unsqueeze(1)\n\n\n def _Kxx_dxKxx(self, X):\n \"\"\"\n Computes covariance matrix K(X,X) and its gradient w.r.t. X\n for RBF kernel with design matrix X, as in the second term in eqn (8)\n of reference SVGD paper.\n\n Args:\n X (Tensor): (S, P), design matrix of samples, where S is num of\n samples, P is the dim of each sample which stacks all params\n into a (1, P) row. Thus P could be 1 millions.\n \"\"\"\n squared_dist = self._squared_dist(X)\n l_square = 0.5 * squared_dist.median() / math.log(self.n_samples)\n Kxx = torch.exp(-0.5 / l_square * squared_dist)\n # matrix form for the second term of optimal functional gradient\n # in eqn (8) of SVGD paper\n dxKxx = (Kxx.sum(1).diag() - Kxx).matmul(X) / l_square\n\n return Kxx, dxKxx\n\n \n def _optimizers_schedulers(self, lr, lr_noise):\n \"\"\"Initialize Adam optimizers and schedulers (ReduceLROnPlateau)\n\n Args:\n lr (float): learning rate for NN parameters `w`\n lr_noise (float): learning rate for noise precision `log_beta`\n \"\"\"\n optimizers = []\n schedulers = []\n for i in range(self.n_samples):\n parameters = [{'params': [self.Bayesian_model_NN[i].log_beta], 'lr': lr_noise},\n {'params': self.Bayesian_model_NN[i].features.parameters()}]\n optimizer_i = torch.optim.Adam(parameters, lr=lr)\n optimizers.append(optimizer_i)\n schedulers.append(ReduceLROnPlateau(optimizer_i, \n mode='min', factor=0.1, patience=10, verbose=True))\n return optimizers, schedulers",
"import numpy as np\nimport torch\nfrom torch.utils.data import Dataset, DataLoader\nfrom torchvision import transforms, utils\nimport scipy.io as io\nimport sys\nimport h5py\nfrom args import args, device\n\nclass Dataset(torch.utils.data.Dataset):\n def __len__(self):\n return args.nmcs\n def __getitem__(self, idx):\n #print(idx)\n idx = idx+1\n dir = '/KLE_100_mcs'\n #input\n self.input = io.loadmat(dir+'/input_1_test/input_1_%d.mat'%idx)\n self.input = self.input['input_1']\n self.input = np.transpose(self.input)\n if args.data == 'KLE':\n self.input = np.log(self.input)\n elif args.data == 'channel':\n self.input = self.input\n self.input = torch.from_numpy(self.input)\n #basis_patch\n self.basis_patch = io.loadmat(dir+'/output/output_1_%d.mat'%idx)\n self.basis_patch = self.basis_patch['output_1'] \n self.basis_patch = np.transpose(self.basis_patch)\n self.basis_patch = torch.from_numpy(self.basis_patch) \n #A-matrix\n self.A_matrix = io.loadmat(dir+'/A_matrix_test/A_matrix1_%d.mat'%idx)\n self.A_matrix = torch.from_numpy(self.A_matrix['A_matrix1'])\n #q-matrix\n self.q_matrix = io.loadmat(dir+'/q_matrix_test/q_matrix1_%d.mat'%idx)\n self.q_matrix = self.q_matrix['q_matrix1']\n #B-matrix\n self.B_matrix = io.loadmat(dir+'/B_matrix_test/B_matrix_%d.mat'%idx)\n self.B_matrix = self.B_matrix['Extract_B']\n #target-matrix\n self.target_P = io.loadmat(dir+'/P_value_test/P_value_%d.mat'%idx)\n self.target_P = self.target_P['P_value']\n #velocity\n self.ft = io.loadmat(dir+'/flux/ft_value_%d.mat'%idx)\n self.ft = self.ft['ft']\n\n self.T_val = io.loadmat(dir+'/flux/T_value_%d.mat'%idx)\n self.T_val = self.T_val['T_value']\n return self.input, self.basis_patch, self.A_matrix, self.B_matrix, self.target_P, self.q_matrix, self.T_val, self.ft\n\ndef mcs_load_data():\n \n\n kwargs = {'num_workers': 4,\n 'pin_memory': True} if torch.cuda.is_available() else {}\n s=Dataset()\n loader= torch.utils.data.DataLoader(s,\n batch_size=args.mcs_batch, shuffle=False,\n **kwargs)\n return loader\n\n"
] |
[
[
"numpy.dot",
"numpy.concatenate",
"numpy.argmin",
"numpy.any",
"numpy.exp",
"numpy.swapaxes",
"numpy.allclose",
"scipy.sparse.diags",
"numpy.sin",
"numpy.argmax",
"numpy.insert",
"scipy.sparse.bmat",
"numpy.log",
"scipy.sparse.csr_matrix",
"numpy.atleast_2d",
"numpy.abs",
"numpy.array_equal",
"numpy.cos",
"numpy.tile",
"numpy.sign",
"numpy.vstack"
],
[
"torch.transpose",
"torch.max",
"torch.cat",
"numpy.squeeze",
"torch.sum",
"torch.autograd.Variable",
"torch.Size",
"torch.mm",
"torch.optim.lr_scheduler.ReduceLROnPlateau",
"torch.solve",
"scipy.io.loadmat",
"numpy.zeros",
"torch.optim.Adam",
"torch.div",
"matplotlib.pyplot.switch_backend",
"torch.exp",
"torch.nn.functional.mse_loss",
"torch.stack",
"torch.Tensor",
"torch.matmul",
"numpy.empty"
],
[
"numpy.log",
"torch.utils.data.Dataset",
"scipy.io.loadmat",
"torch.utils.data.DataLoader",
"torch.from_numpy",
"torch.cuda.is_available",
"numpy.transpose"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"0.16",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"1.3",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
}
] |
hustzxd/MobulaOP
|
[
"49e4062f6578b31918ddcc613e38e0fbb92bb015"
] |
[
"examples/TVMOp.py"
] |
[
"\"\"\" Example for using TVM generated function \"\"\"\nimport sys\nsys.path.append('../') # Add MobulaOP path\nimport mobula\n\nimport tvm\nimport topi\nfrom tvm.contrib.mxnet import to_mxnet_func\nfrom tvm.contrib.dlpack import to_pytorch_func\n\n\ndef get_tvm_add():\n # define compute\n n = tvm.var('n')\n A = tvm.placeholder(n, name='A', dtype='float32')\n B = tvm.placeholder(n, name='B', dtype='float32')\n C = tvm.compute((n,), lambda i: A[i] + B[i], name='C')\n\n # build function (with parallel support)\n with tvm.target.create('llvm'):\n s = topi.generic.schedule_injective([C])\n func_cpu = tvm.build(s, [A, B, C])\n\n with tvm.target.create('cuda'):\n s = topi.generic.schedule_injective([C])\n func_gpu = tvm.build(s, [A, B, C])\n\n return func_cpu, func_gpu\n\n\[email protected]\nclass TVMAddOp:\n def __init__(self):\n func_cpu, func_gpu = get_tvm_add()\n\n self.func = {\n 'mx': {\n 'cpu': to_mxnet_func(func_cpu, const_loc=[0, 1]),\n 'gpu': to_mxnet_func(func_gpu, const_loc=[0, 1]),\n },\n 'th': {\n 'cpu': to_pytorch_func(func_cpu),\n 'gpu': to_pytorch_func(func_gpu),\n }\n }\n\n def forward(self, x, y):\n glue_mod = mobula.glue.backend.get_var_glue(x)\n backend = (glue_mod.__name__.split('.')[-1])\n device_type = 'cpu' if glue_mod.dev_id(x) is None else 'gpu'\n\n self.func[backend][device_type](x, y, self.Y[0])\n\n def backward(self, dy):\n return [dy, dy]\n\n def infer_shape(self, in_shape):\n assert in_shape[0] == in_shape[1]\n return in_shape, [in_shape[0]]\n\n\ntry:\n import mxnet as mx\n print('===== MXNet =====')\n for ctx in [mx.cpu(), mx.gpu()]:\n print(ctx)\n a = mx.nd.array([1.0, 2.0, 3.0], ctx=ctx)\n b = mx.nd.array([4.0, 5.0, 6.0], ctx=ctx)\n c = TVMAddOp(a, b)\n print('a + b = c\\n{} + {} = {}\\n'.format(a.asnumpy(),\n b.asnumpy(), c.asnumpy())) # [5.0, 7.0, 9.0]\nexcept ImportError:\n pass\n\ntry:\n import torch\n print('===== PyTorch =====')\n for device in [torch.device('cpu'), torch.device('cuda')]:\n print(device)\n a = torch.tensor([1.0, 2.0, 3.0], device=device)\n b = torch.tensor([4.0, 5.0, 6.0], device=device)\n c = TVMAddOp(a, b)\n print('a + b = c\\n{} + {} = {}\\n'.format(a, b, c)) # [5.0, 7.0, 9.0]\nexcept ImportError:\n pass\n"
] |
[
[
"torch.device",
"torch.tensor"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
xiaoeric/bmusegan
|
[
"3b54448bb488d7426c1fc4c0f9a65d373dc8c05f"
] |
[
"musegan/bmusegan/components.py"
] |
[
"\"\"\"Classes for the components of the model, including the generator, the\ndiscriminator and the refiner.\n\"\"\"\nfrom collections import OrderedDict\nimport tensorflow.compat.v1 as tf\ntf.disable_v2_behavior()\nfrom musegan.utils.neuralnet import NeuralNet\n\nclass Component(object):\n \"\"\"Base class for components.\"\"\"\n def __init__(self, tensor_in, condition, slope_tensor=None):\n if not isinstance(tensor_in, tf.Tensor):\n raise TypeError(\"`tensor_in` must be of tf.Tensor type\")\n\n self.tensor_in = tensor_in\n self.condition = condition\n self.slope_tensor = slope_tensor\n\n self.scope = None\n self.tensor_out = tensor_in\n self.nets = OrderedDict()\n self.vars = None\n\n def __repr__(self):\n return \"Component({}, input_shape={}, output_shape={})\".format(\n self.scope.name, self.tensor_in.get_shape(),\n str(self.tensor_out.get_shape()))\n\n def get_summary(self):\n \"\"\"Return the summary string.\"\"\"\n cleansed_nets = []\n for net in self.nets.values():\n if isinstance(net, NeuralNet):\n if net.scope is not None:\n cleansed_nets.append(net)\n if isinstance(net, list):\n if net[0].scope is not None:\n cleansed_nets.append(net[0])\n return '\\n'.join(\n [\"{:-^80}\".format(' ' + self.scope.name + ' '),\n \"{:49} {}\".format('Input', self.tensor_in.get_shape())]\n + ['-' * 80 + '\\n' + (x.get_summary()) for x in cleansed_nets])\n\nclass Generator(Component):\n \"\"\"Class that defines the generator.\"\"\"\n def __init__(self, tensor_in, config, condition=None, name='Generator',\n reuse=None):\n super().__init__(tensor_in, condition)\n with tf.variable_scope(name, reuse=reuse) as scope:\n self.scope = scope\n self.tensor_out, self.nets = self.build(config)\n self.vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,\n self.scope.name)\n\n def build(self, config):\n \"\"\"Build the generator.\"\"\"\n nets = OrderedDict()\n\n nets['shared'] = NeuralNet(self.tensor_in, config['net_g']['shared'],\n name='shared')\n\n nets['pitch_time_private'] = [\n NeuralNet(nets['shared'].tensor_out,\n config['net_g']['pitch_time_private'],\n name='pt_'+str(idx))\n for idx in range(config['num_track'])\n ]\n\n nets['time_pitch_private'] = [\n NeuralNet(nets['shared'].tensor_out,\n config['net_g']['time_pitch_private'],\n name='tp_'+str(idx))\n for idx in range(config['num_track'])\n ]\n\n nets['merged_private'] = [\n NeuralNet(tf.concat([nets['pitch_time_private'][idx].tensor_out,\n nets['time_pitch_private'][idx].tensor_out],\n -1),\n config['net_g']['merged_private'],\n name='merged_'+str(idx))\n for idx in range(config['num_track'])\n ]\n\n tensor_out = tf.concat([nn.tensor_out for nn in nets['merged_private']],\n -1)\n return tensor_out, nets\n\nclass Discriminator(Component):\n \"\"\"Class that defines the discriminator.\"\"\"\n def __init__(self, tensor_in, config, condition=None, name='Discriminator',\n reuse=None):\n super().__init__(tensor_in, condition)\n with tf.variable_scope(name, reuse=reuse) as scope:\n self.scope = scope\n self.tensor_out, self.nets = self.build(config)\n self.vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,\n self.scope.name)\n\n def build(self, config):\n \"\"\"Build the discriminator.\"\"\"\n nets = OrderedDict()\n\n # main stream\n nets['pitch_time_private'] = [\n NeuralNet(tf.expand_dims(self.tensor_in[..., idx], -1),\n config['net_d']['pitch_time_private'],\n name='pt_' + str(idx))\n for idx in range(config['num_track'])\n ]\n\n nets['time_pitch_private'] = [\n NeuralNet(tf.expand_dims(self.tensor_in[..., idx], -1),\n config['net_d']['time_pitch_private'],\n name='tp_' + str(idx))\n for idx in range(config['num_track'])\n ]\n\n nets['merged_private'] = [\n NeuralNet(\n tf.concat([x.tensor_out,\n nets['time_pitch_private'][idx].tensor_out], -1),\n config['net_d']['merged_private'], name='merged_' + str(idx))\n for idx, x in enumerate(nets['pitch_time_private'])\n ]\n\n nets['shared'] = NeuralNet(\n tf.concat([nn.tensor_out for nn in nets['merged_private']], -1),\n config['net_d']['shared'], name='shared'\n )\n\n # chroma stream\n reshaped = tf.reshape(\n self.tensor_in, (-1, config['num_bar'], config['num_beat'],\n config['beat_resolution'], config['num_pitch']//12,\n 12, config['num_track'])\n )\n self.chroma = tf.reduce_sum(reshaped, axis=(3, 4))\n nets['chroma'] = NeuralNet(self.chroma, config['net_d']['chroma'],\n name='chroma')\n\n # onset stream\n padded = tf.pad(self.tensor_in[:, :, :-1, :, 1:],\n [[0, 0], [0, 0], [1, 0], [0, 0], [0, 0]])\n self.onset = tf.concat([tf.expand_dims(self.tensor_in[..., 0], -1),\n self.tensor_in[..., 1:] - padded], -1)\n nets['onset'] = NeuralNet(self.onset, config['net_d']['onset'],\n name='onset')\n\n if (config['net_d']['chroma'] is not None\n or config['net_d']['onset'] is not None):\n to_concat = [nets['shared'].tensor_out]\n if config['net_d']['chroma'] is not None:\n to_concat.append(nets['chroma'].tensor_out)\n if config['net_d']['onset'] is not None:\n to_concat.append(nets['onset'].tensor_out)\n concated = tf.concat(to_concat, -1)\n else:\n concated = nets['shared'].tensor_out\n\n # merge streams\n nets['merged'] = NeuralNet(concated, config['net_d']['merged'],\n name='merged')\n\n return nets['merged'].tensor_out, nets\n\nclass Refiner(Component):\n \"\"\"Class that defines the refiner.\"\"\"\n def __init__(self, tensor_in, config, condition=None, slope_tensor=None,\n name='Refiner', reuse=None):\n super().__init__(tensor_in, condition, slope_tensor)\n with tf.variable_scope(name, reuse=reuse) as scope:\n self.scope = scope\n self.tensor_out, self.nets, self.preactivated = self.build(config)\n self.vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,\n self.scope.name)\n\n def build(self, config):\n \"\"\"Build the refiner.\"\"\"\n nets = OrderedDict()\n\n nets['private'] = [\n NeuralNet(tf.expand_dims(self.tensor_in[..., idx], -1),\n config['net_r']['private'],\n slope_tensor=self.slope_tensor, name='private'+str(idx))\n for idx in range(config['num_track'])\n ]\n\n return (tf.concat([nn.tensor_out for nn in nets['private']], -1), nets,\n tf.concat([nn.layers[-1].preactivated\n for nn in nets['private']], -1))\n\nclass End2EndGenerator(Component):\n \"\"\"Class that defines the end-to-end generator.\"\"\"\n def __init__(self, tensor_in, config, condition=None, slope_tensor=None,\n name='End2EndGenerator', reuse=None):\n super().__init__(tensor_in, condition, slope_tensor)\n with tf.variable_scope(name, reuse=reuse) as scope:\n self.scope = scope\n self.tensor_out, self.nets, self.preactivated = self.build(config)\n self.vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,\n self.scope.name)\n\n def build(self, config):\n \"\"\"Build the end-to-end generator.\"\"\"\n nets = OrderedDict()\n\n nets['shared'] = NeuralNet(self.tensor_in, config['net_g']['shared'],\n name='shared')\n\n nets['pitch_time_private'] = [\n NeuralNet(nets['shared'].tensor_out,\n config['net_g']['pitch_time_private'],\n name='pt_'+str(idx))\n for idx in range(config['num_track'])\n ]\n\n nets['time_pitch_private'] = [\n NeuralNet(nets['shared'].tensor_out,\n config['net_g']['time_pitch_private'],\n name='tp_'+str(idx))\n for idx in range(config['num_track'])\n ]\n\n nets['merged_private'] = [\n NeuralNet(tf.concat([nets['pitch_time_private'][idx].tensor_out,\n nets['time_pitch_private'][idx].tensor_out],\n -1),\n config['net_g']['merged_private'],\n name='merged_'+str(idx))\n for idx in range(config['num_track'])\n ]\n\n nets['refiner_private'] = [\n NeuralNet(nets['merged_private'][idx].tensor_out,\n config['net_r']['private'],\n slope_tensor=self.slope_tensor,\n name='refiner_private'+str(idx))\n for idx in range(config['num_track'])\n ]\n\n return (tf.concat([nn.tensor_out for nn in nets['private']], -1), nets,\n tf.concat([nn.layers[-1].preactivated\n for nn in nets['private']], -1))\n"
] |
[
[
"tensorflow.compat.v1.concat",
"tensorflow.compat.v1.disable_v2_behavior",
"tensorflow.compat.v1.expand_dims",
"tensorflow.compat.v1.reduce_sum",
"tensorflow.compat.v1.reshape",
"tensorflow.compat.v1.get_collection",
"tensorflow.compat.v1.variable_scope",
"tensorflow.compat.v1.pad"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
loerssoni/pytorch-CycleGAN-and-pix2pix
|
[
"289287f9e4bd948a306627b32fc6b57b78420121"
] |
[
"data/base_dataset.py"
] |
[
"\"\"\"This module implements an abstract base class (ABC) 'BaseDataset' for datasets.\n\nIt also includes common transformation functions (e.g., get_transform, __scale_width), which can be later used in subclasses.\n\"\"\"\nimport random\nimport numpy as np\nimport torch.utils.data as data\nimport torch\nfrom PIL import Image\nimport torchvision.transforms as transforms\nfrom abc import ABC, abstractmethod\n\n\nclass BaseDataset(data.Dataset, ABC):\n \"\"\"This class is an abstract base class (ABC) for datasets.\n\n To create a subclass, you need to implement the following four functions:\n -- <__init__>: initialize the class, first call BaseDataset.__init__(self, opt).\n -- <__len__>: return the size of dataset.\n -- <__getitem__>: get a data point.\n -- <modify_commandline_options>: (optionally) add dataset-specific options and set default options.\n \"\"\"\n\n def __init__(self, opt):\n \"\"\"Initialize the class; save the options in the class\n\n Parameters:\n opt (Option class)-- stores all the experiment flags; needs to be a subclass of BaseOptions\n \"\"\"\n self.opt = opt\n self.root = opt.dataroot\n\n @staticmethod\n def modify_commandline_options(parser, is_train):\n \"\"\"Add new dataset-specific options, and rewrite default values for existing options.\n\n Parameters:\n parser -- original option parser\n is_train (bool) -- whether training phase or test phase. You can use this flag to add training-specific or test-specific options.\n\n Returns:\n the modified parser.\n \"\"\"\n return parser\n\n @abstractmethod\n def __len__(self):\n \"\"\"Return the total number of images in the dataset.\"\"\"\n return 0\n\n @abstractmethod\n def __getitem__(self, index):\n \"\"\"Return a data point and its metadata information.\n\n Parameters:\n index - - a random integer for data indexing\n\n Returns:\n a dictionary of data with their names. It ususally contains the data itself and its metadata information.\n \"\"\"\n pass\n\n\ndef get_params(opt, size):\n w, h = size\n new_h = h\n new_w = w\n if opt.preprocess == 'resize_and_crop':\n new_h = new_w = opt.load_size\n elif opt.preprocess == 'scale_width_and_crop':\n new_w = opt.load_size\n new_h = opt.load_size * h // w\n\n x = random.randint(0, np.maximum(0, new_w - opt.crop_size))\n y = random.randint(0, np.maximum(0, new_h - opt.crop_size))\n\n flip = random.random() > 0.5\n\n return {'crop_pos': (x, y), 'flip': flip}\n\n\ndef get_transform(opt, params=None, noise=False, grayscale=False, method=Image.BICUBIC):\n transform_list = []\n if grayscale:\n transform_list.append(transforms.Grayscale(1))\n if 'resize' in opt.preprocess:\n osize = [opt.load_size, opt.load_size]\n transform_list.append(transforms.Resize(osize, method))\n\n if 'crop' in opt.preprocess:\n if params is None:\n transform_list.append(transforms.RandomCrop(opt.crop_size))\n else:\n transform_list.append(transforms.Lambda(lambda img: __crop(img, params['crop_pos'], opt.crop_size)))\n\n if opt.preprocess == 'none':\n transform_list.append(transforms.Lambda(lambda img: __make_power_2(img, base=4, method=method)))\n transform_list.append(transforms.ToTensor())\n if noise:\n transform_list.append(transforms.ColorJitter(brightness=(0.8, 1.2), contrast=(0.8, 1.2),\n saturation=(0.8, 1.2), hue=(-0.05, 0.05)))\n transform_list.append(transforms.Lambda(lambda x: coarse_dropout(x)))\n transform_list.append(transforms.Lambda(lambda x: x + torch.rand(x.shape)*0.05))\n\n if grayscale:\n transform_list += [transforms.Normalize((0.5,), (0.5,))]\n else:\n transform_list += [transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]\n return transforms.Compose(transform_list)\n\n\ndef __make_power_2(img, base, method=Image.BICUBIC):\n ow, oh = img.size\n h = int(round(oh / base) * base)\n w = int(round(ow / base) * base)\n if h == oh and w == ow:\n return img\n\n __print_size_warning(ow, oh, w, h)\n return img.resize((w, h), method)\n\n\ndef __scale_width(img, target_size, crop_size, method=Image.BICUBIC):\n ow, oh = img.size\n if ow == target_size and oh >= crop_size:\n return img\n w = target_size\n h = int(max(target_size * oh / ow, crop_size))\n return img.resize((w, h), method)\n\n\ndef __crop(img, pos, size):\n ow, oh = img.size\n x1, y1 = pos\n tw = th = size\n if (ow > tw or oh > th):\n return img.crop((x1, y1, x1 + tw, y1 + th))\n return img\n\n\ndef __flip(img, flip):\n if flip:\n return img.transpose(Image.FLIP_LEFT_RIGHT)\n return img\n\n\ndef __print_size_warning(ow, oh, w, h):\n \"\"\"Print warning information about image size(only print once)\"\"\"\n if not hasattr(__print_size_warning, 'has_printed'):\n print(\"The image size needs to be a multiple of 4. \"\n \"The loaded image size was (%d, %d), so it was adjusted to \"\n \"(%d, %d). This adjustment will be done to all images \"\n \"whose sizes are not multiples of 4\" % (ow, oh, w, h))\n __print_size_warning.has_printed = True\n\n\ndef coarse_dropout(image, count=200, max_size=0.05):\n dim = image.shape[1]\n for k in range(count):\n x = (torch.rand(1) * image.shape[1]).int()\n y = (torch.rand(1) * image.shape[2]).int()\n height = (dim* max_size * torch.rand(1))\n ya = torch.max(torch.tensor(0), y - height // 2).int()\n yb = torch.min(torch.tensor(dim), y + height // 2).int()\n xa = torch.max(torch.tensor(0), x - height // 2).int()\n xb = torch.min(torch.tensor(dim), x + height // 2).int()\n one = image[:, ya:yb, 0:xa]\n two = torch.rand([3, yb - ya, xb - xa]) * 1\n three = image[:, ya:yb, xb:dim]\n middle = torch.cat([one, two, three], axis=2)\n image = torch.cat([image[:, 0:ya, :], middle, image[:, yb:dim, :]], axis=1)\n image = torch.reshape(image, [3, dim, dim])\n return image"
] |
[
[
"numpy.maximum",
"torch.cat",
"torch.reshape",
"torch.tensor",
"torch.rand"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
alxshine/eNNclave
|
[
"639aa7e8df9440922788d0c2a79846b198f117aa"
] |
[
"frontend/python/tests/test_depthwise_conv2d.py"
] |
[
"from tensorflow.python.framework.test_util import TensorFlowTestCase\nfrom tensorflow.keras.models import Sequential\nimport tensorflow.keras.layers as layers\n\nimport os\nimport unittest\n\nfrom common import common_test_basis\n\n\nclass DepthwiseConv2dTests(TensorFlowTestCase):\n @staticmethod\n def testSmallNative():\n model = Sequential([\n layers.DepthwiseConv2D(3, padding='same', input_shape=(5, 5, 3))\n ])\n common_test_basis(model, False)\n\n @staticmethod\n def testMediumNative():\n model = Sequential([\n layers.DepthwiseConv2D(3, padding='same', input_shape=(10, 10, 5))\n ])\n common_test_basis(model, False)\n\n @staticmethod\n def testLargeNative():\n model = Sequential([\n layers.DepthwiseConv2D(\n 5, padding='same', input_shape=(100, 100, 5))\n ])\n common_test_basis(model, False)\n\n @unittest.skip\n def testHugeNative(self):\n raise AssertionError(\"Causes a segfault in C\")\n # model = Sequential([\n # layers.DepthwiseConv2D(\n # 10, padding='same', input_shape=(500, 500, 64))\n # ])\n # common_test_basis(model, False)\n\n @unittest.skipIf(os.environ.get('SGX_SDK') is None, \"SGX is not available\")\n def testSmallEnclave(self):\n model = Sequential([\n layers.DepthwiseConv2D(3, padding='same', input_shape=(5, 5, 3))\n ])\n common_test_basis(model, True)\n\n @unittest.skipIf(os.environ.get('SGX_SDK') is None, \"SGX is not available\")\n def testMediumEnclave(self):\n model = Sequential([\n layers.DepthwiseConv2D(3, padding='same', input_shape=(10, 10, 5))\n ])\n common_test_basis(model, True)\n\n @unittest.skipIf(os.environ.get('SGX_SDK') is None, \"SGX is not available\")\n def testLargeEnclave(self):\n model = Sequential([\n layers.DepthwiseConv2D(\n 5, padding='same', input_shape=(100, 100, 5))\n ])\n common_test_basis(model, True)\n\n @unittest.skip\n @unittest.skipIf(os.environ.get('SGX_SDK') is None, \"SGX is not available\")\n def testHugeEnclave(self):\n raise AssertionError(\"Causes a segfault in C\")\n # model = Sequential([\n # layers.DepthwiseConv2D(\n # 10, padding='same', input_shape=(500, 500, 64))\n # ])\n # common_test_basis(model, True)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n"
] |
[
[
"tensorflow.keras.layers.DepthwiseConv2D"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"2.2",
"2.3",
"2.4",
"2.5",
"2.6"
]
}
] |
lolgab/scvi-tools
|
[
"abbadc4172381aaf938f131796393ee113165167",
"abbadc4172381aaf938f131796393ee113165167",
"abbadc4172381aaf938f131796393ee113165167"
] |
[
"scvi/core/modules/vaec.py",
"scvi/model/_utils.py",
"scvi/core/models/_utils.py"
] |
[
"import torch\nfrom torch.distributions import Categorical, Normal\nfrom torch.distributions import kl_divergence as kl\n\nfrom ._base import DecoderSCVI, Encoder\nfrom .classifier import Classifier\nfrom .utils import broadcast_labels\nfrom .vae import VAE\n\n\nclass VAEC(VAE):\n r\"\"\"\n A semi-supervised Variational auto-encoder model - inspired from M2 model.\n\n Described in (https://arxiv.org/pdf/1406.5298.pdf)\n\n Parameters\n ----------\n n_input :\n Number of input genes\n n_batch :\n Number of batches\n n_labels :\n Number of labels\n n_hidden :\n Number of nodes per hidden layer\n n_latent :\n Dimensionality of the latent space\n n_layers :\n Number of hidden layers used for encoder and decoder NNs\n dropout_rate :\n Dropout rate for neural networks\n dispersion :\n One of the following\n\n * ``'gene'`` - dispersion parameter of NB is constant per gene across cells\n * ``'gene-batch'`` - dispersion can differ between different batches\n * ``'gene-label'`` - dispersion can differ between different labels\n * ``'gene-cell'`` - dispersion can differ for every gene in every cell\n log_variational :\n Log(data+1) prior to encoding for numerical stability. Not normalization.\n gene_likelihood :\n One of\n\n * ``'nb'`` - Negative binomial distribution\n * ``'zinb'`` - Zero-inflated negative binomial distribution\n y_prior :\n If None, initialized to uniform probability over cell types\n\n Examples\n --------\n >>> gene_dataset = CortexDataset()\n >>> vaec = VAEC(gene_dataset.nb_genes, n_batch=gene_dataset.n_batches * False,\n ... n_labels=gene_dataset.n_labels)\n\n >>> gene_dataset = SyntheticDataset(n_labels=3)\n >>> vaec = VAEC(gene_dataset.nb_genes, n_batch=gene_dataset.n_batches * False,\n ... n_labels=3, y_prior=torch.tensor([[0.1,0.5,0.4]]))\n\n \"\"\"\n\n def __init__(\n self,\n n_input,\n n_batch,\n n_labels,\n n_hidden=128,\n n_latent=10,\n n_layers=1,\n dropout_rate=0.1,\n y_prior=None,\n dispersion=\"gene\",\n log_variational=True,\n gene_likelihood=\"zinb\",\n ):\n super().__init__(\n n_input,\n n_batch,\n n_labels,\n n_hidden=n_hidden,\n n_latent=n_latent,\n n_layers=n_layers,\n dropout_rate=dropout_rate,\n dispersion=dispersion,\n log_variational=log_variational,\n gene_likelihood=gene_likelihood,\n )\n\n self.z_encoder = Encoder(\n n_input,\n n_latent,\n n_cat_list=[n_labels],\n n_hidden=n_hidden,\n n_layers=n_layers,\n dropout_rate=dropout_rate,\n )\n self.decoder = DecoderSCVI(\n n_latent,\n n_input,\n n_cat_list=[n_batch, n_labels],\n n_layers=n_layers,\n n_hidden=n_hidden,\n )\n\n self.y_prior = torch.nn.Parameter(\n y_prior\n if y_prior is not None\n else (1 / n_labels) * torch.ones(1, n_labels),\n requires_grad=False,\n )\n\n self.classifier = Classifier(\n n_input, n_hidden, n_labels, n_layers=n_layers, dropout_rate=dropout_rate\n )\n\n def classify(self, x):\n x = torch.log(1 + x)\n return self.classifier(x)\n\n def forward(self, x, local_l_mean, local_l_var, batch_index=None, y=None):\n is_labelled = False if y is None else True\n\n # Prepare for sampling\n x_ = torch.log(1 + x)\n ql_m, ql_v, library = self.l_encoder(x_)\n\n # Enumerate choices of label\n ys, xs, library_s, batch_index_s = broadcast_labels(\n y, x, library, batch_index, n_broadcast=self.n_labels\n )\n\n # Sampling\n outputs = self.inference(xs, batch_index_s, ys)\n px_r = outputs[\"px_r\"]\n px_rate = outputs[\"px_rate\"]\n px_dropout = outputs[\"px_dropout\"]\n qz_m = outputs[\"qz_m\"]\n qz_v = outputs[\"qz_v\"]\n reconst_loss = self.get_reconstruction_loss(xs, px_rate, px_r, px_dropout)\n\n # KL Divergence\n mean = torch.zeros_like(qz_m)\n scale = torch.ones_like(qz_v)\n\n kl_divergence_z = kl(Normal(qz_m, torch.sqrt(qz_v)), Normal(mean, scale)).sum(\n dim=1\n )\n kl_divergence_l = kl(\n Normal(ql_m, torch.sqrt(ql_v)),\n Normal(local_l_mean, torch.sqrt(local_l_var)),\n ).sum(dim=1)\n\n if is_labelled:\n return reconst_loss, kl_divergence_z + kl_divergence_l, 0.0\n\n reconst_loss = reconst_loss.view(self.n_labels, -1)\n\n probs = self.classifier(x_)\n reconst_loss = (reconst_loss.t() * probs).sum(dim=1)\n\n kl_divergence = (kl_divergence_z.view(self.n_labels, -1).t() * probs).sum(dim=1)\n kl_divergence += kl(\n Categorical(probs=probs),\n Categorical(probs=self.y_prior.repeat(probs.size(0), 1)),\n )\n kl_divergence += kl_divergence_l\n\n return reconst_loss, kl_divergence, 0.0\n",
"import logging\nfrom typing import Dict, List, Union, Sequence\nfrom collections.abc import Iterable as IterableClass\n\nimport anndata\nimport numpy as np\nimport scipy.sparse as sp_sparse\n\nfrom scvi import _CONSTANTS\nfrom scvi.data import get_from_registry\n\nlogger = logging.getLogger(__name__)\n\nNumber = Union[int, float]\n\n\ndef scrna_raw_counts_properties(\n adata: anndata.AnnData,\n idx1: Union[List[int], np.ndarray],\n idx2: Union[List[int], np.ndarray],\n) -> Dict[str, np.ndarray]:\n \"\"\"\n Computes and returns some statistics on the raw counts of two sub-populations.\n\n Parameters\n ----------\n adata\n AnnData object setup with `scvi`.\n idx1\n subset of indices describing the first population.\n idx2\n subset of indices describing the second population.\n\n Returns\n -------\n type\n Dict of ``np.ndarray`` containing, by pair (one for each sub-population),\n mean expression per gene, proportion of non-zero expression per gene, mean of normalized expression.\n \"\"\"\n data = get_from_registry(adata, _CONSTANTS.X_KEY)\n data1 = data[idx1]\n data2 = data[idx2]\n mean1 = np.asarray((data1).mean(axis=0)).ravel()\n mean2 = np.asarray((data2).mean(axis=0)).ravel()\n nonz1 = np.asarray((data1 != 0).mean(axis=0)).ravel()\n nonz2 = np.asarray((data2 != 0).mean(axis=0)).ravel()\n\n key = \"_scvi_raw_norm_scaling\"\n if key not in adata.obs.keys():\n scaling_factor = 1 / np.asarray(data.sum(axis=1)).ravel().reshape(-1, 1)\n scaling_factor *= 1e4\n adata.obs[key] = scaling_factor.ravel()\n else:\n scaling_factor = adata.obs[key].to_numpy().ravel().reshape(-1, 1)\n\n if issubclass(type(data), sp_sparse.spmatrix):\n norm_data1 = data1.multiply(scaling_factor[idx1])\n norm_data2 = data2.multiply(scaling_factor[idx2])\n else:\n norm_data1 = data1 * scaling_factor[idx1]\n norm_data2 = data2 * scaling_factor[idx2]\n\n norm_mean1 = np.asarray(norm_data1.mean(axis=0)).ravel()\n norm_mean2 = np.asarray(norm_data2.mean(axis=0)).ravel()\n\n properties = dict(\n raw_mean1=mean1,\n raw_mean2=mean2,\n non_zeros_proportion1=nonz1,\n non_zeros_proportion2=nonz2,\n raw_normalized_mean1=norm_mean1,\n raw_normalized_mean2=norm_mean2,\n )\n return properties\n\n\ndef cite_seq_raw_counts_properties(\n adata: anndata.AnnData,\n idx1: Union[List[int], np.ndarray],\n idx2: Union[List[int], np.ndarray],\n) -> Dict[str, np.ndarray]:\n \"\"\"\n Computes and returns some statistics on the raw counts of two sub-populations.\n\n Parameters\n ----------\n adata\n AnnData object setup with `scvi`.\n idx1\n subset of indices describing the first population.\n idx2\n subset of indices describing the second population.\n\n Returns\n -------\n type\n Dict of ``np.ndarray`` containing, by pair (one for each sub-population),\n mean expression per gene, proportion of non-zero expression per gene, mean of normalized expression.\n \"\"\"\n gp = scrna_raw_counts_properties(adata, idx1, idx2)\n protein_exp = get_from_registry(adata, _CONSTANTS.PROTEIN_EXP_KEY)\n\n nan = np.array([np.nan] * len(adata.uns[\"scvi_protein_names\"]))\n protein_exp = get_from_registry(adata, _CONSTANTS.PROTEIN_EXP_KEY)\n mean1_pro = np.asarray(protein_exp[idx1].mean(0))\n mean2_pro = np.asarray(protein_exp[idx2].mean(0))\n nonz1_pro = np.asarray((protein_exp[idx1] > 0).mean(0))\n nonz2_pro = np.asarray((protein_exp[idx2] > 0).mean(0))\n properties = dict(\n raw_mean1=np.concatenate([gp[\"raw_mean1\"], mean1_pro]),\n raw_mean2=np.concatenate([gp[\"raw_mean2\"], mean2_pro]),\n non_zeros_proportion1=np.concatenate([gp[\"non_zeros_proportion1\"], nonz1_pro]),\n non_zeros_proportion2=np.concatenate([gp[\"non_zeros_proportion2\"], nonz2_pro]),\n raw_normalized_mean1=np.concatenate([gp[\"raw_normalized_mean1\"], nan]),\n raw_normalized_mean2=np.concatenate([gp[\"raw_normalized_mean2\"], nan]),\n )\n\n return properties\n\n\ndef _get_var_names_from_setup_anndata(adata):\n \"\"\"Gets var names by checking if using raw.\"\"\"\n var_names = adata.var_names\n return var_names\n\n\ndef _get_batch_code_from_category(\n adata: anndata.AnnData, category: Sequence[Union[Number, str]]\n):\n if not isinstance(category, IterableClass) or isinstance(category, str):\n category = [category]\n\n categorical_mappings = adata.uns[\"_scvi\"][\"categorical_mappings\"]\n batch_mappings = categorical_mappings[\"_scvi_batch\"][\"mapping\"]\n batch_code = []\n for cat in category:\n if cat is None:\n batch_code.append(None)\n elif cat not in batch_mappings:\n raise ValueError('\"{}\" not a valid batch category.'.format(cat))\n else:\n batch_loc = np.where(batch_mappings == cat)[0][0]\n batch_code.append(batch_loc)\n return batch_code\n",
"import numpy as np\nimport pandas as pd\n\nfrom scvi._utils import track\nfrom scvi.core.utils import DifferentialComputation\n\n\ndef _de_core(\n adata,\n model_fn,\n groupby,\n group1,\n group2,\n idx1,\n idx2,\n all_stats,\n all_stats_fn,\n col_names,\n mode,\n batchid1,\n batchid2,\n delta,\n batch_correction,\n fdr,\n **kwargs\n):\n \"\"\"Internal function for DE interface.\"\"\"\n if group1 is None and idx1 is None:\n group1 = adata.obs[groupby].cat.categories.tolist()\n if len(group1) == 1:\n raise ValueError(\n \"Only a single group in the data. Can't run DE on a single group.\"\n )\n\n if isinstance(group1, str):\n group1 = [group1]\n\n # make a temp obs key using indices\n temp_key = None\n if idx1 is not None:\n idx1 = np.asarray(idx1).ravel()\n g1_key = \"one\"\n obs_col = np.array([\"None\"] * adata.shape[0], dtype=str)\n obs_col[idx1] = g1_key\n group2 = None if idx2 is None else \"two\"\n if idx2 is not None:\n idx2 = np.asarray(idx2).ravel()\n obs_col[idx2] = group2\n temp_key = \"_scvi_temp_de\"\n adata.obs[temp_key] = obs_col\n groupby = temp_key\n group1 = [g1_key]\n\n df_results = []\n dc = DifferentialComputation(model_fn, adata)\n for g1 in track(\n group1,\n description=\"DE...\",\n ):\n cell_idx1 = (adata.obs[groupby] == g1).to_numpy().ravel()\n if group2 is None:\n cell_idx2 = ~cell_idx1\n else:\n cell_idx2 = (adata.obs[groupby] == group2).to_numpy().ravel()\n\n all_info = dc.get_bayes_factors(\n cell_idx1,\n cell_idx2,\n mode=mode,\n delta=delta,\n batchid1=batchid1,\n batchid2=batchid2,\n use_observed_batches=not batch_correction,\n **kwargs,\n )\n\n if all_stats is True:\n genes_properties_dict = all_stats_fn(adata, cell_idx1, cell_idx2)\n all_info = {**all_info, **genes_properties_dict}\n\n res = pd.DataFrame(all_info, index=col_names)\n sort_key = \"proba_de\" if mode == \"change\" else \"bayes_factor\"\n res = res.sort_values(by=sort_key, ascending=False)\n if mode == \"change\":\n res[\"is_de_fdr_{}\".format(fdr)] = _fdr_de_prediction(\n res[\"proba_de\"], fdr=fdr\n )\n if idx1 is None:\n g2 = \"Rest\" if group2 is None else group2\n res[\"comparison\"] = \"{} vs {}\".format(g1, g2)\n df_results.append(res)\n\n if temp_key is not None:\n del adata.obs[temp_key]\n\n result = pd.concat(df_results, axis=0)\n\n return result\n\n\ndef _fdr_de_prediction(posterior_probas: np.ndarray, fdr: float = 0.05):\n \"\"\"Compute posterior expected FDR and tag features as DE.\"\"\"\n if not posterior_probas.ndim == 1:\n raise ValueError(\"posterior_probas should be 1-dimensional\")\n sorted_genes = np.argsort(-posterior_probas)\n sorted_pgs = posterior_probas[sorted_genes]\n cumulative_fdr = (1.0 - sorted_pgs).cumsum() / (1.0 + np.arange(len(sorted_pgs)))\n d = (cumulative_fdr <= fdr).sum()\n pred_de_genes = sorted_genes[:d]\n is_pred_de = np.zeros_like(cumulative_fdr).astype(bool)\n is_pred_de[pred_de_genes] = True\n return is_pred_de\n"
] |
[
[
"torch.ones",
"torch.sqrt",
"torch.zeros_like",
"torch.distributions.Categorical",
"torch.log",
"torch.distributions.Normal",
"torch.ones_like"
],
[
"numpy.concatenate",
"numpy.where"
],
[
"pandas.concat",
"numpy.asarray",
"pandas.DataFrame",
"numpy.zeros_like",
"numpy.argsort",
"numpy.array"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
ortslil64/dynamic_map_matcher_ros
|
[
"f0d6279adf15ea8bf87cba8cd27d18a49a36347d"
] |
[
"src/map_matcher.py"
] |
[
"#!/usr/bin/env python\nimport numpy as np\nfrom nav_msgs.msg import OccupancyGrid\nfrom scipy.optimize import differential_evolution\nfrom skimage.measure import ransac\nfrom skimage.transform import AffineTransform, SimilarityTransform\nfrom sklearn.neighbors import NearestNeighbors\nimport cv2\n\n\ndef send_map_ros_msg(landmarks, empty_landmarks, publisher, resolution = 0.01, width = 2048, height = 2048):\n map_msg = OccupancyGrid()\n map_msg.header.frame_id = 'map'\n map_msg.info.resolution = resolution\n map_msg.info.width = width\n map_msg.info.height = height\n\n data = -np.ones(shape = (width,height))\n for ii in range(len(landmarks)):\n on_x = landmarks[ii,0] // resolution + width // 2\n on_y = landmarks[ii,1] // resolution + height // 2\n if on_x < width and on_x > 0 and on_y < height and on_y > 0:\n data[int(on_x), int(on_y)] = 100\n\n for ii in range(len(empty_landmarks)):\n off_x = empty_landmarks[ii,0] // resolution + width // 2\n off_y = empty_landmarks[ii,1] // resolution + height // 2\n if off_x < width and off_x > 0 and off_y < height and off_y > 0:\n data[int(off_x), int(off_y)] = 0\n\n data_out = data.reshape((-1))\n map_msg.data = data_out\n publisher.publish(map_msg)\n \n# def rotate_map_parallel(map, T):\n# c ,s = np.cos(T[:,2]) , np.sin(T[:,2])\n# R = np.array(((c,-s), (s, c)))\n# Tmap = np.matmul(map,R)\n# rot_map = np.add(np.transpose(Tmap, (1,2,0)), T[:,0:2])\n# return np.transpose(rot_map, (1,0,2))\n\ndef rotate_map_parallel(map, T):\n c ,s = np.cos(T[:,2]) , np.sin(T[:,2])\n R = np.array(((c,-s), (s, c)))\n Tmap = np.matmul(map,R)\n Tmap = np.transpose(Tmap, (1,2,0))\n rot_map = Tmap.reshape((Tmap.shape[0],-1))\n rot_map = rot_map + T[:,0:2].reshape((-1))\n rot_map = rot_map.reshape(Tmap.shape)\n return rot_map\n\ndef rotate_map(map, T):\n c ,s = np.cos(T[2]) , np.sin(T[2])\n R = np.array(((c,-s), (s, c))) \n rot_map = np.matmul(map,R) + T[0:2]\n return rot_map\n\ndef likelihood(target_map_rotated, origin_map_nbrs, var, origin_empty_map_nbrs=None , res = 0.01):\n if origin_empty_map_nbrs is None:\n d, _ = origin_map_nbrs.kneighbors(target_map_rotated)\n p = np.mean((1/(np.sqrt(2*np.pi*var)))*np.exp(-np.power(d,2)/(2*var))) + 1e-200\n else:\n d, _ = origin_map_nbrs.kneighbors(target_map_rotated)\n d_empty, _ = origin_empty_map_nbrs.kneighbors(target_map_rotated)\n is_bad = d_empty > 0.5*res \n #print(np.mean(is_bad))\n p = np.mean(np.multiply(is_bad,(1/(np.sqrt(2*np.pi*var)))*np.exp(-np.power(d,2)/(2*var)))) + 1e-200\n #p = np.mean(is_bad)*p\n return p\n\ndef likelihood_parallel(T, target_map, origin_map_nbrs, var):\n target_map_rotated = rotate_map_parallel(target_map, T)\n d, _ = origin_map_nbrs.kneighbors(target_map_rotated.reshape((-1,2)))\n d = d.reshape((target_map_rotated.shape[0],target_map_rotated.shape[1]))\n p = np.sum((1/(np.sqrt(2*np.pi*var)))*np.exp(-np.power(d,2)/(2*var)), axis=0) + 1e-200\n p = p/np.sum(p)\n return p\n\n\nclass ParticleFilterMapMatcher():\n def __init__(self,\n init_origin_map_nbrs,\n init_target_map,\n Np = 1000,\n N_history = 5,\n N_theta = 50,\n N_x = 20,\n N_y = 20,\n R_var = 0.1,\n Q_xy = 0.01,\n Q_theta = 0.01,\n R_xy = 0.1,\n R_theta = 0.1,\n P_theta = [0.6,0.1,0.1,0.2],\n P_xy = [0.7, 0.05, 0.1, 0.05,0.1],\n xy_mul = 2.0):\n self.Np = Np\n self.R_var = R_var\n self.N_history = N_history\n self.filter = np.arange(3,N_history+3,dtype=np.float32)\n self.Q_xy = Q_xy\n self.Q_theta = Q_theta\n self.R_xy = R_xy\n self.R_theta = R_theta\n self.P_theta = P_theta\n self.P_xy = P_xy\n self.xy_mul = xy_mul\n\n temp_X = []\n angles = np.linspace(0 , 2*np.pi ,N_theta )\n xRange = np.linspace(-10 , 10 , N_x) \n yRange = np.linspace(-10 , 10 ,N_y) \n x0 = [xRange[np.random.randint(N_x)] ,yRange[np.random.randint(N_y)], angles[np.random.randint(N_theta)]]\n tempMap = rotate_map(init_target_map, x0)\n w0 = likelihood(tempMap, init_origin_map_nbrs, self.R_var)\n temp_X.append(x0)\n i = 0\n print(\"Initilizing particles...\")\n while i < (N_theta*N_x*N_y):\n xt = [xRange[np.random.randint(N_x)],\n yRange[np.random.randint(N_y)],\n angles[np.random.randint(N_theta)]]\n tempMap = rotate_map(init_target_map, xt)\n wt = likelihood(tempMap, init_origin_map_nbrs, self.R_var)\n if wt>w0:\n temp_X.append(xt)\n x0 = xt\n w0 = wt\n elif np.random.binomial(1, wt/w0) == 1:\n temp_X.append(xt)\n x0 = xt\n w0 = wt\n elif np.random.binomial(1, 0.5) == 1:\n temp_X.append(xt)\n x0 = xt\n w0 = wt\n else:\n x = x0\n x[0] = x[0] + np.random.normal(0.0, 0.1)\n x[1] = x[1] + np.random.normal(0.0, 0.1)\n x[2] = x[2] + np.random.normal(0.0, 0.1) + np.random.choice(a = 4,p = [0.4,0.2,0.2,0.2] )*0.5*np.pi\n x[2] = np.remainder(x[2],2*np.pi)\n temp_X.append(x)\n i += 1\n self.X = np.array(temp_X[-Np:])\n self.W = np.ones((Np,N_history))\n self.indicate = 0\n print(\"Initilizing done with \"+str(Np)+\" samples out of \"+str(len(temp_X)))\n\n def predict(self):\n self.X[:,0:2] = self.X[:,0:2] + np.random.normal(0.0, self.Q_xy, size=self.X[:,0:2].shape)\n self.X[:,2] = self.X[:,2] + np.random.normal(0.0, self.Q_theta, size=self.X[:,2].shape)\n self.X[:,2] = np.remainder(self.X[:,2],2*np.pi)\n\n def update(self, target_map, origin_map_nbrs, origin_empty_map_nbrs, res = 0.01):\n for i in range(self.Np):\n tempMap = rotate_map(target_map, self.X[i])\n if self.indicate > 0:\n self.W[i, self.indicate] = self.W[i, self.indicate - 1] * likelihood(tempMap, origin_map_nbrs, self.R_var, origin_empty_map_nbrs, res)\n else:\n self.W[i, self.indicate] = likelihood(tempMap, origin_map_nbrs, self.R_var, origin_empty_map_nbrs, res)\n self.indicate += 1\n \n def update_parallel(self, target_map, origin_map_nbrs, origin_empty_map_nbrs, res = 0.01):\n L = likelihood_parallel(self.X, target_map, origin_map_nbrs, 0.01)\n #L_not = likelihood_parallel(self.X, target_map, origin_empty_map_nbrs, 0.001)\n #L_not = np.ones_like(L_not) - L_not\n #L = np.multiply(L, L_not)\n if self.indicate > 0:\n self.W[:, self.indicate] = self.W[:, self.indicate - 1] * L\n else:\n self.W[:, self.indicate] = L\n self.indicate += 1\n \n def refinement(self, target_map, origin_map_nbrs, res = 0.01, Np = 2000):\n X_try = np.empty((3))\n W_temp = np.zeros((Np))\n X_temp = self.X_map\n tempMap = rotate_map(target_map, X_temp)\n W_temp = likelihood(tempMap, origin_map_nbrs, 0.001, origin_empty_map_nbrs=None , res = res)\n j = 0\n for i in range(1,Np):\n n = np.random.choice(10)\n X_try[0] = np.random.normal(self.best_10[n,0], 0.5/np.sqrt(i))\n X_try[1] = np.random.normal(self.best_10[n,1], 0.5/np.sqrt(i))\n X_try[2] = np.random.normal(self.best_10[n,2], 0.1/np.sqrt(i))\n tempMap = rotate_map(target_map, X_try)\n W_try = likelihood(tempMap, origin_map_nbrs, 0.001, origin_empty_map_nbrs=None , res = res)\n if W_try > W_temp:\n W_temp = W_try\n X_temp = X_try\n j+=1\n print(\"Finishet refinement with \"+str(j)+\" updates\")\n return X_temp\n\n\n def resample(self):\n print(\"performing resample!\")\n p = np.dot(self.W, self.filter)\n p = p/np.sum(p)\n self.X_map = self.X[np.argmax(p)]\n self.best_10 = self.X[p.argsort()[-10:][::-1]]\n idxs = np.random.choice(a = self.Np, size = self.Np,p = p)\n self.X = self.X[idxs]\n self.X[:,0] = self.X[:,0] + np.random.normal(0.0, self.R_xy, size=self.X[:,0].shape) + np.random.randint(-1,2) * np.random.choice(a = 5, size = self.X[:,0].shape,p = self.P_xy)*self.xy_mul\n self.X[:,1] = self.X[:,1] + np.random.normal(0.0, self.R_xy, size=self.X[:,1].shape) + np.random.randint(-1,2) * np.random.choice(a = 5, size = self.X[:,1].shape,p = self.P_xy)*self.xy_mul\n self.X[:,2] = self.X[:,2] + np.random.normal(0.0, self.R_theta, size=self.X[:,2].shape) + np.random.choice(a = 4, size = self.X[:,2].shape,p = self.P_theta )*0.5*np.pi\n self.X[:,2] = np.remainder(self.X[:,2],2*np.pi)\n self.indicate = 0\n\n\n\ndef DEMapMatcher(origin_map_nbrs, target_map, last_result = None):\n DE_func = lambda x: -likelihood(rotate_map(target_map,x),origin_map_nbrs, 0.3)\n if last_result is None:\n result = differential_evolution(DE_func, bounds = [(-15,15),(-15,15),(0,2*np.pi)] ,maxiter= 100 ,popsize=6,tol=0.0001, mutation=0.8)\n T_de = [result.x[0] , result.x[1] , min(result.x[2], 2*np.pi - result.x[2])]\n else:\n result = differential_evolution(DE_func, bounds = [(last_result[0]-10,last_result[0]+10),(last_result[1]-10,last_result[1]+10),(last_result[2]-0.5*np.pi,last_result[2]+0.5*np.pi)] ,maxiter= 100 ,popsize=6,tol=0.0001, mutation=0.8)\n T_de = [result.x[0] , result.x[1] , min(result.x[2], 2*np.pi - result.x[2])]\n return T_de\n\ndef RANSACMapMatcher(target_map, origin_map):\n min_samples = min([int(0.5 * len(target_map)), int(0.5 * len(origin_map))])\n if origin_map.shape[0] > target_map.shape[0]:\n origin_map = origin_map[0:target_map.shape[0]]\n elif origin_map.shape[0] < target_map.shape[0]:\n target_map = target_map[0:origin_map.shape[0]]\n model_robust, inliers = ransac((origin_map, target_map), SimilarityTransform, min_samples=min_samples,\n residual_threshold=10, max_trials=100)\n T_RANSAC = [model_robust.translation[0],model_robust.translation[1],model_robust.rotation]\n if T_RANSAC[2]<0:\n T_RANSAC[2] = 2*np.pi - T_RANSAC[2]\n T_RANSAC[2] = np.remainder(T_RANSAC[2],2*np.pi)\n return T_RANSAC\n\ndef ICPMapMatcher(src, dst, init_pose=(0,0,0), no_iterations = 13):\n Tr = np.array([[np.cos(init_pose[2]),-np.sin(init_pose[2])],\n [np.sin(init_pose[2]), np.cos(init_pose[2])]])\n src = cv2.transform(src, Tr)[:,:,0]\n nbrs = NearestNeighbors(n_neighbors=1, algorithm='ball_tree').fit(dst)\n for i in range(no_iterations):\n distances, indices = nbrs.kneighbors(src)\n T = RANSACMapMatcher(src, dst[indices.reshape((-1))])\n Tr = np.array([[np.cos(T[2]),-np.sin(T[2])],[np.sin(T[2]), np.cos(T[2])]])\n src = cv2.transform(src, Tr)[:,:,0]\n return T\n\n\n\ndef OccupancyGrid2LandmarksArray(OccupancyGridMsg, filter_map = None):\n map = np.array(OccupancyGridMsg.data , dtype = np.float32)\n N = np.sqrt(map.shape)[0].astype(np.int32)\n Re = np.copy(map.reshape((N,N)))\n scale = OccupancyGridMsg.info.resolution\n landMarksArray = (np.argwhere( Re == 100 ) * scale)\n landMarksArray_empty = (np.argwhere( Re == 0 ) * scale)\n if landMarksArray.shape[0] != 0:\n if filter_map is not None:\n if len(landMarksArray) > filter_map:\n a = len(landMarksArray)//filter_map\n else:\n a = 1\n landMarksArray = landMarksArray[np.arange(0,len(landMarksArray),a)]\n return landMarksArray, landMarksArray_empty\n else:\n print(\"Error: Empty map!\")\n return \"empty\" , \"empty\"\n"
] |
[
[
"numpy.dot",
"numpy.sqrt",
"numpy.linspace",
"numpy.random.randint",
"scipy.optimize.differential_evolution",
"numpy.arange",
"numpy.matmul",
"numpy.sin",
"numpy.argmax",
"sklearn.neighbors.NearestNeighbors",
"numpy.zeros",
"numpy.random.choice",
"numpy.power",
"numpy.transpose",
"numpy.random.binomial",
"numpy.array",
"numpy.sum",
"numpy.cos",
"numpy.ones",
"numpy.argwhere",
"numpy.random.normal",
"numpy.remainder",
"numpy.empty"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.6",
"1.10",
"0.15",
"1.4",
"0.16",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"1.0",
"0.17",
"1.3",
"1.8"
],
"tensorflow": []
}
] |
kissf-lu/python_test
|
[
"5df2f31984e385c25cd6b6bc144302450f671d8a"
] |
[
"simpy_test/hospital_test.py"
] |
[
"# -*- coding: utf-8 -*-\n\n\"\"\"\n======================================================================================================\n 杭州HUB仿真项目\n\n 项目启动日期:2017年7月6日\n 项目启动标识:AIRPORT OF EZHOU'S PROJECT -- HZ\n ===========================================\n 代码创建日期:2017年7月6日\n 代码创建工程师:卢健\n 代码版本:1.0\n 版本更新日期:2017年7月6日\n 版本更新工程师:卢健\n\n 代码整体功能描述:汇流点机器测试单元\n 1、用于测试cross类模块;\n\n\n=====================================================================================================\n\"\"\"\n\nfrom simpy import Environment\nfrom simpy import PriorityStore\nfrom simpy import PriorityItem\nfrom simpy import Resource\nfrom simpy_test.hospital import Hospital\nimport random as rd\nfrom numpy.random import choice\n\n\nRANDOM_SEED = 42\nPACKAGE_RES = 2 # 每次生成包裹的数量\nQUEUE_RES = 2\nNUM_PORT_OUT = 1\nNUM_PORT_IN = 2\nNUM_PEOPLE = 2\nNUM_PACKAGES = 10 # 本次数据的\nMACHINE_ID = 'h1'\nINPUT_QUEUE_ID = []\nOUTPUT_QUEUE_ID = []\nINPUT_QUEUE_DIC = {}\nOUTPUT_QUEUE_DIC = {}\nrd.seed(RANDOM_SEED)\n\n\ndef packages(env, generator_package_res, generator_queue_res):\n \"\"\"\"\"\"\n for num in range(NUM_PACKAGES):\n env.process(machine_package(env=env, package_name=num,\n generator_package_res=generator_package_res,\n generator_queue_res=generator_queue_res))\n\n\ndef machine_package(env, package_name,\n generator_package_res,\n generator_queue_res):\n \"\"\"\"\"\"\n with generator_package_res.request() as req:\n yield req\n env.process(machine_queue_input(env, generator_queue_res, package_name))\n yield env.timeout(1)\n\n\ndef machine_queue_input(env, generator_queue_res, package_id):\n \"\"\"\"\"\"\n # print('package', package_id, 'come', 'at', env.now)\n with generator_queue_res.request() as req:\n yield req\n id_queue = str(choice(INPUT_QUEUE_ID))\n id_package = ''.join([id_queue, '_', str(package_id)])\n package_items = {'package_id': id_package, 'package_gen_time': env.now}\n yield env.timeout(rd.randint(5, 5))\n print('<----package', package_id, 'was pushed into queue', id_queue,\n 'at', env.now)\n INPUT_QUEUE_DIC[id_queue].put(PriorityItem(priority=env.now,\n item=package_items))\n\n\ndef cross_sim():\n \"\"\"\n cross module test: Cross 模块测试单元\n \"\"\"\n env = Environment()\n INPUT_QUEUE_ID.extend(\n [''.join([MACHINE_ID, '_in', str(i)]) for i in range(NUM_PORT_IN)]\n )\n OUTPUT_QUEUE_ID.extend(\n [''.join([MACHINE_ID, '_out', str(i)]) for i in range(NUM_PORT_OUT)]\n )\n\n for id in INPUT_QUEUE_ID:\n INPUT_QUEUE_DIC.update({id: PriorityStore(env=env)})\n for id in OUTPUT_QUEUE_ID:\n OUTPUT_QUEUE_DIC.update({id: PriorityStore(env=env)})\n\n generator_queue_res = Resource(env=env, capacity=QUEUE_RES)\n generator_package_res = Resource(env=env, capacity=PACKAGE_RES)\n\n packages(env=env,\n generator_package_res=generator_package_res,\n generator_queue_res=generator_queue_res)\n hospital = Hospital(env=env,id_h=MACHINE_ID,\n hospital_capacity=NUM_PEOPLE,\n input_dic=INPUT_QUEUE_DIC,\n output_dic=OUTPUT_QUEUE_DIC)\n env.run()\n\n\nif __name__ == '__main__':\n print('run!')\n cross_sim()\n print('end')\n"
] |
[
[
"numpy.random.choice"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
vinnamkim/EfficientNet-PyTorch
|
[
"6671c729c046897d460fda0c97a3f3bf6923ee49"
] |
[
"efficientnet_pytorch/utils.py"
] |
[
"\"\"\"\nThis file contains helper functions for building the model and for loading model parameters.\nThese helper functions are built to mirror those in the official TensorFlow implementation.\n\"\"\"\n\nimport re\nimport math\nimport collections\nfrom functools import partial\nimport torch\nfrom torch import nn\nfrom torch.nn import functional as F\nfrom torch.utils import model_zoo\n\ndef zerocenter(x):\n \"\"\"x : [B, C, H, W]\"\"\"\n return x - x.flatten(1).mean(1, keepdim=True).unsqueeze(-1).unsqueeze(-1)\n\n########################################################################\n############### HELPERS FUNCTIONS FOR MODEL ARCHITECTURE ###############\n########################################################################\n\n\n# Parameters for the entire model (stem, all blocks, and head)\nGlobalParams = collections.namedtuple('GlobalParams', [\n 'batch_norm_momentum', 'batch_norm_epsilon', 'dropout_rate',\n 'num_classes', 'width_coefficient', 'depth_coefficient',\n 'depth_divisor', 'min_depth', 'drop_connect_rate', 'image_size'])\n\n# Parameters for an individual model block\nBlockArgs = collections.namedtuple('BlockArgs', [\n 'kernel_size', 'num_repeat', 'input_filters', 'output_filters',\n 'expand_ratio', 'id_skip', 'stride', 'se_ratio'])\n\n# Change namedtuple defaults\nGlobalParams.__new__.__defaults__ = (None,) * len(GlobalParams._fields)\nBlockArgs.__new__.__defaults__ = (None,) * len(BlockArgs._fields)\n\n\nclass SwishImplementation(torch.autograd.Function):\n @staticmethod\n def forward(ctx, i):\n result = i * torch.sigmoid(i)\n ctx.save_for_backward(i)\n return result\n\n @staticmethod\n def backward(ctx, grad_output):\n i = ctx.saved_variables[0]\n sigmoid_i = torch.sigmoid(i)\n return grad_output * (sigmoid_i * (1 + i * (1 - sigmoid_i)))\n\n\nclass Swish(nn.Module):\n @staticmethod\n def forward(x):\n return SwishImplementation.apply(x)\n\n\nrelu_fn = Swish()\n\n\ndef round_filters(filters, global_params):\n \"\"\" Calculate and round number of filters based on depth multiplier. \"\"\"\n multiplier = global_params.width_coefficient\n if not multiplier:\n return filters\n divisor = global_params.depth_divisor\n min_depth = global_params.min_depth\n filters *= multiplier\n min_depth = min_depth or divisor\n new_filters = max(min_depth, int(filters + divisor / 2) // divisor * divisor)\n if new_filters < 0.9 * filters: # prevent rounding by more than 10%\n new_filters += divisor\n return int(new_filters)\n\n\ndef round_repeats(repeats, global_params):\n \"\"\" Round number of filters based on depth multiplier. \"\"\"\n multiplier = global_params.depth_coefficient\n if not multiplier:\n return repeats\n return int(math.ceil(multiplier * repeats))\n\n\ndef drop_connect(inputs, p, training):\n \"\"\" Drop connect. \"\"\"\n if not training: return inputs\n batch_size = inputs.shape[0]\n keep_prob = 1 - p\n random_tensor = keep_prob\n random_tensor += torch.rand([batch_size, 1, 1, 1], dtype=inputs.dtype, device=inputs.device)\n binary_tensor = torch.floor(random_tensor)\n output = inputs / keep_prob * binary_tensor\n return output\n\n\ndef get_same_padding_conv2d(image_size=None):\n \"\"\" Chooses static padding if you have specified an image size, and dynamic padding otherwise.\n Static padding is necessary for ONNX exporting of models. \"\"\"\n if image_size is None:\n return Conv2dDynamicSamePadding\n else:\n return partial(Conv2dStaticSamePadding, image_size=image_size)\n\n\nclass Conv2dDynamicSamePadding(nn.Conv2d):\n \"\"\" 2D Convolutions like TensorFlow, for a dynamic image size \"\"\"\n\n def __init__(self, in_channels, out_channels, kernel_size, stride=1, dilation=1, groups=1, bias=True):\n super().__init__(in_channels, out_channels, kernel_size, stride, 0, dilation, groups, bias)\n self.stride = self.stride if len(self.stride) == 2 else [self.stride[0]] * 2\n\n def forward(self, x):\n ih, iw = x.size()[-2:]\n kh, kw = self.weight.size()[-2:]\n sh, sw = self.stride\n oh, ow = math.ceil(ih / sh), math.ceil(iw / sw)\n pad_h = max((oh - 1) * self.stride[0] + (kh - 1) * self.dilation[0] + 1 - ih, 0)\n pad_w = max((ow - 1) * self.stride[1] + (kw - 1) * self.dilation[1] + 1 - iw, 0)\n if pad_h > 0 or pad_w > 0:\n x = F.pad(x, [pad_w // 2, pad_w - pad_w // 2, pad_h // 2, pad_h - pad_h // 2])\n return F.conv2d(x, self.weight, self.bias, self.stride, self.padding, self.dilation, self.groups)\n\n\nclass Conv2dStaticSamePadding(nn.Conv2d):\n \"\"\" 2D Convolutions like TensorFlow, for a fixed image size\"\"\"\n\n def __init__(self, in_channels, out_channels, kernel_size, image_size=None, **kwargs):\n super().__init__(in_channels, out_channels, kernel_size, **kwargs)\n self.stride = self.stride if len(self.stride) == 2 else [self.stride[0]] * 2\n\n # Calculate padding based on image size and save it\n assert image_size is not None\n ih, iw = image_size if type(image_size) == list else [image_size, image_size]\n kh, kw = self.weight.size()[-2:]\n sh, sw = self.stride\n oh, ow = math.ceil(ih / sh), math.ceil(iw / sw)\n pad_h = max((oh - 1) * self.stride[0] + (kh - 1) * self.dilation[0] + 1 - ih, 0)\n pad_w = max((ow - 1) * self.stride[1] + (kw - 1) * self.dilation[1] + 1 - iw, 0)\n if pad_h > 0 or pad_w > 0:\n self.static_padding = nn.ZeroPad2d((pad_w // 2, pad_w - pad_w // 2, pad_h // 2, pad_h - pad_h // 2))\n else:\n self.static_padding = Identity()\n\n def forward(self, x):\n x = self.static_padding(x)\n x = F.conv2d(x, self.weight, self.bias, self.stride, self.padding, self.dilation, self.groups)\n return x\n\n\nclass Identity(nn.Module):\n def __init__(self, ):\n super(Identity, self).__init__()\n\n def forward(self, input):\n return input\n\n\n########################################################################\n############## HELPERS FUNCTIONS FOR LOADING MODEL PARAMS ##############\n########################################################################\n\n\ndef efficientnet_params(model_name):\n \"\"\" Map EfficientNet model name to parameter coefficients. \"\"\"\n params_dict = {\n # Coefficients: width,depth,res,dropout\n 'efficientnet-b0': (1.0, 1.0, 224, 0.2),\n 'efficientnet-b1': (1.0, 1.1, 240, 0.2),\n 'efficientnet-b2': (1.1, 1.2, 260, 0.3),\n 'efficientnet-b3': (1.2, 1.4, 300, 0.3),\n 'efficientnet-b4': (1.4, 1.8, 380, 0.4),\n 'efficientnet-b5': (1.6, 2.2, 456, 0.4),\n 'efficientnet-b6': (1.8, 2.6, 528, 0.5),\n 'efficientnet-b7': (2.0, 3.1, 600, 0.5),\n }\n return params_dict[model_name]\n\n\nclass BlockDecoder(object):\n \"\"\" Block Decoder for readability, straight from the official TensorFlow repository \"\"\"\n\n @staticmethod\n def _decode_block_string(block_string):\n \"\"\" Gets a block through a string notation of arguments. \"\"\"\n assert isinstance(block_string, str)\n\n ops = block_string.split('_')\n options = {}\n for op in ops:\n splits = re.split(r'(\\d.*)', op)\n if len(splits) >= 2:\n key, value = splits[:2]\n options[key] = value\n\n # Check stride\n assert (('s' in options and len(options['s']) == 1) or\n (len(options['s']) == 2 and options['s'][0] == options['s'][1]))\n\n return BlockArgs(\n kernel_size=int(options['k']),\n num_repeat=int(options['r']),\n input_filters=int(options['i']),\n output_filters=int(options['o']),\n expand_ratio=int(options['e']),\n id_skip=('noskip' not in block_string),\n se_ratio=float(options['se']) if 'se' in options else None,\n stride=[int(options['s'][0])])\n\n @staticmethod\n def _encode_block_string(block):\n \"\"\"Encodes a block to a string.\"\"\"\n args = [\n 'r%d' % block.num_repeat,\n 'k%d' % block.kernel_size,\n 's%d%d' % (block.strides[0], block.strides[1]),\n 'e%s' % block.expand_ratio,\n 'i%d' % block.input_filters,\n 'o%d' % block.output_filters\n ]\n if 0 < block.se_ratio <= 1:\n args.append('se%s' % block.se_ratio)\n if block.id_skip is False:\n args.append('noskip')\n return '_'.join(args)\n\n @staticmethod\n def decode(string_list):\n \"\"\"\n Decodes a list of string notations to specify blocks inside the network.\n\n :param string_list: a list of strings, each string is a notation of block\n :return: a list of BlockArgs namedtuples of block args\n \"\"\"\n assert isinstance(string_list, list)\n blocks_args = []\n for block_string in string_list:\n blocks_args.append(BlockDecoder._decode_block_string(block_string))\n return blocks_args\n\n @staticmethod\n def encode(blocks_args):\n \"\"\"\n Encodes a list of BlockArgs to a list of strings.\n\n :param blocks_args: a list of BlockArgs namedtuples of block args\n :return: a list of strings, each string is a notation of block\n \"\"\"\n block_strings = []\n for block in blocks_args:\n block_strings.append(BlockDecoder._encode_block_string(block))\n return block_strings\n\n\ndef efficientnet(width_coefficient=None, depth_coefficient=None, dropout_rate=0.2,\n drop_connect_rate=0.2, image_size=None, num_classes=1000):\n \"\"\" Creates a efficientnet model. \"\"\"\n\n blocks_args = [\n 'r1_k3_s11_e1_i32_o16_se0.25', 'r2_k3_s22_e6_i16_o24_se0.25',\n 'r2_k5_s22_e6_i24_o40_se0.25', 'r3_k3_s22_e6_i40_o80_se0.25',\n 'r3_k5_s11_e6_i80_o112_se0.25', 'r4_k5_s22_e6_i112_o192_se0.25',\n 'r1_k3_s11_e6_i192_o320_se0.25',\n ]\n blocks_args = BlockDecoder.decode(blocks_args)\n\n global_params = GlobalParams(\n batch_norm_momentum=0.99,\n batch_norm_epsilon=1e-3,\n dropout_rate=dropout_rate,\n drop_connect_rate=drop_connect_rate,\n # data_format='channels_last', # removed, this is always true in PyTorch\n num_classes=num_classes,\n width_coefficient=width_coefficient,\n depth_coefficient=depth_coefficient,\n depth_divisor=8,\n min_depth=None,\n image_size=image_size,\n )\n\n return blocks_args, global_params\n\n\ndef get_model_params(model_name, override_params):\n \"\"\" Get the block args and global params for a given model \"\"\"\n if model_name.startswith('efficientnet'):\n w, d, s, p = efficientnet_params(model_name)\n # note: all models have drop connect rate = 0.2\n blocks_args, global_params = efficientnet(\n width_coefficient=w, depth_coefficient=d, dropout_rate=p, image_size=s)\n else:\n raise NotImplementedError('model name is not pre-defined: %s' % model_name)\n if override_params:\n # ValueError will be raised here if override_params has fields not included in global_params.\n global_params = global_params._replace(**override_params)\n return blocks_args, global_params\n\n\nurl_map = {\n 'efficientnet-b0': 'http://storage.googleapis.com/public-models/efficientnet/efficientnet-b0-355c32eb.pth',\n 'efficientnet-b1': 'http://storage.googleapis.com/public-models/efficientnet/efficientnet-b1-f1951068.pth',\n 'efficientnet-b2': 'http://storage.googleapis.com/public-models/efficientnet/efficientnet-b2-8bb594d6.pth',\n 'efficientnet-b3': 'http://storage.googleapis.com/public-models/efficientnet/efficientnet-b3-5fb5a3c3.pth',\n 'efficientnet-b4': 'http://storage.googleapis.com/public-models/efficientnet/efficientnet-b4-6ed6700e.pth',\n 'efficientnet-b5': 'http://storage.googleapis.com/public-models/efficientnet/efficientnet-b5-b6417697.pth',\n 'efficientnet-b6': 'http://storage.googleapis.com/public-models/efficientnet/efficientnet-b6-c76e70fd.pth',\n 'efficientnet-b7': 'http://storage.googleapis.com/public-models/efficientnet/efficientnet-b7-dcc49843.pth',\n}\n\n\ndef load_pretrained_weights(model, model_name, load_fc=True):\n \"\"\" Loads pretrained weights, and downloads if loading for the first time. \"\"\"\n state_dict = model_zoo.load_url(url_map[model_name])\n if load_fc:\n model.load_state_dict(state_dict)\n else:\n state_dict.pop('_fc.weight')\n state_dict.pop('_fc.bias')\n res = model.load_state_dict(state_dict, strict=False)\n assert set(res.missing_keys) == set(['_fc.weight', '_fc.bias']), 'issue loading pretrained weights'\n print('Loaded pretrained weights for {}'.format(model_name))\n"
] |
[
[
"torch.sigmoid",
"torch.floor",
"torch.nn.functional.conv2d",
"torch.rand",
"torch.nn.ZeroPad2d",
"torch.utils.model_zoo.load_url",
"torch.nn.functional.pad"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
lss616263/ganomaly
|
[
"0e8ddd7b97fdbe35b33d607cddaf62f36cb591c8"
] |
[
"lib/data.py"
] |
[
"\"\"\"\nLOAD DATA from file.\n\"\"\"\n\n# pylint: disable=C0301,E1101,W0622,C0103,R0902,R0915\n\n##\nimport os\nimport torch\nimport numpy as np\nimport torchvision.datasets as datasets\nfrom torchvision.datasets import MNIST\nfrom torchvision.datasets import CIFAR10\nfrom torchvision.datasets import ImageFolder\nimport torchvision.transforms as transforms\n\n##\ndef load_data(opt):\n \"\"\" Load Data\n\n Args:\n opt ([type]): Argument Parser\n\n Raises:\n IOError: Cannot Load Dataset\n\n Returns:\n [type]: dataloader\n \"\"\"\n\n ##\n # LOAD DATA SET\n if opt.dataroot == '':\n opt.dataroot = './data/{}'.format(opt.dataset)\n\n if opt.dataset in ['cifar10']:\n splits = ['train', 'test']\n drop_last_batch = {'train': True, 'test': False}\n shuffle = {'train': True, 'test': False}\n\n transform = transforms.Compose(\n [\n transforms.Resize(opt.isize),\n transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))\n ]\n )\n\n classes = {\n 'plane': 0, 'car': 1, 'bird': 2, 'cat': 3, 'deer': 4,\n 'dog': 5, 'frog': 6, 'horse': 7, 'ship': 8, 'truck': 9\n }\n\n dataset = {}\n dataset['train'] = CIFAR10(root='./data', train=True, download=True, transform=transform)\n dataset['test'] = CIFAR10(root='./data', train=False, download=True, transform=transform)\n\n dataset['train'].train_data, dataset['train'].train_labels, \\\n dataset['test'].test_data, dataset['test'].test_labels = get_cifar_anomaly_dataset(\n trn_img=dataset['train'].train_data,\n trn_lbl=dataset['train'].train_labels,\n tst_img=dataset['test'].test_data,\n tst_lbl=dataset['test'].test_labels,\n abn_cls_idx=classes[opt.anomaly_class]\n )\n\n dataloader = {x: torch.utils.data.DataLoader(dataset=dataset[x],\n batch_size=opt.batchsize,\n shuffle=shuffle[x],\n num_workers=int(opt.workers),\n drop_last=drop_last_batch[x]) for x in splits}\n return dataloader\n\n elif opt.dataset in ['mnist']:\n opt.anomaly_class = int(opt.anomaly_class)\n\n splits = ['train', 'test']\n drop_last_batch = {'train': True, 'test': False}\n shuffle = {'train': True, 'test': True}\n\n transform = transforms.Compose(\n [\n transforms.Scale(opt.isize),\n transforms.ToTensor(),\n transforms.Normalize((0.1307,), (0.3081,))\n ]\n )\n\n dataset = {}\n dataset['train'] = MNIST(root='./data', train=True, download=True, transform=transform)\n dataset['test'] = MNIST(root='./data', train=False, download=True, transform=transform)\n\n dataset['train'].train_data, dataset['train'].train_labels, \\\n dataset['test'].test_data, dataset['test'].test_labels = get_mnist_anomaly_dataset(\n trn_img=dataset['train'].train_data,\n trn_lbl=dataset['train'].train_labels,\n tst_img=dataset['test'].test_data,\n tst_lbl=dataset['test'].test_labels,\n abn_cls_idx=opt.anomaly_class\n )\n\n dataloader = {x: torch.utils.data.DataLoader(dataset=dataset[x],\n batch_size=opt.batchsize,\n shuffle=shuffle[x],\n num_workers=int(opt.workers),\n drop_last=drop_last_batch[x]) for x in splits}\n return dataloader\n\n elif opt.dataset in ['mnist2']:\n opt.anomaly_class = int(opt.anomaly_class)\n\n splits = ['train', 'test']\n drop_last_batch = {'train': True, 'test': False}\n shuffle = {'train': True, 'test': True}\n\n transform = transforms.Compose(\n [\n transforms.Scale(opt.isize),\n transforms.ToTensor(),\n transforms.Normalize((0.1307,), (0.3081,))\n ]\n )\n\n dataset = {}\n dataset['train'] = MNIST(root='./data', train=True, download=True, transform=transform)\n dataset['test'] = MNIST(root='./data', train=False, download=True, transform=transform)\n\n dataset['train'].train_data, dataset['train'].train_labels, \\\n dataset['test'].test_data, dataset['test'].test_labels = get_mnist2_anomaly_dataset(\n trn_img=dataset['train'].train_data,\n trn_lbl=dataset['train'].train_labels,\n tst_img=dataset['test'].test_data,\n tst_lbl=dataset['test'].test_labels,\n nrm_cls_idx=opt.anomaly_class,\n proportion=opt.proportion\n )\n\n dataloader = {x: torch.utils.data.DataLoader(dataset=dataset[x],\n batch_size=opt.batchsize,\n shuffle=shuffle[x],\n num_workers=int(opt.workers),\n drop_last=drop_last_batch[x]) for x in splits}\n return dataloader\n\n else:\n splits = ['train', 'test']\n drop_last_batch = {'train': True, 'test': False}\n shuffle = {'train': True, 'test': True}\n transform = transforms.Compose([transforms.Scale(opt.isize),\n transforms.CenterCrop(opt.isize),\n transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)), ])\n\n dataset = {x: ImageFolder(os.path.join(opt.dataroot, x), transform) for x in splits}\n dataloader = {x: torch.utils.data.DataLoader(dataset=dataset[x],\n batch_size=opt.batchsize,\n shuffle=shuffle[x],\n num_workers=int(opt.workers),\n drop_last=drop_last_batch[x]) for x in splits}\n return dataloader\n\n##\ndef get_cifar_anomaly_dataset(trn_img, trn_lbl, tst_img, tst_lbl, abn_cls_idx=0, manualseed=-1):\n \"\"\"[summary]\n\n Arguments:\n trn_img {np.array} -- Training images\n trn_lbl {np.array} -- Training labels\n tst_img {np.array} -- Test images\n tst_lbl {np.array} -- Test labels\n\n Keyword Arguments:\n abn_cls_idx {int} -- Anomalous class index (default: {0})\n\n Returns:\n [np.array] -- New training-test images and labels.\n \"\"\"\n # Convert train-test labels into numpy array.\n trn_lbl = np.array(trn_lbl)\n tst_lbl = np.array(tst_lbl)\n\n # --\n # Find idx, img, lbl for abnormal and normal on org dataset.\n nrm_trn_idx = np.where(trn_lbl != abn_cls_idx)[0]\n abn_trn_idx = np.where(trn_lbl == abn_cls_idx)[0]\n nrm_trn_img = trn_img[nrm_trn_idx] # Normal training images\n abn_trn_img = trn_img[abn_trn_idx] # Abnormal training images\n nrm_trn_lbl = trn_lbl[nrm_trn_idx] # Normal training labels\n abn_trn_lbl = trn_lbl[abn_trn_idx] # Abnormal training labels.\n\n nrm_tst_idx = np.where(tst_lbl != abn_cls_idx)[0]\n abn_tst_idx = np.where(tst_lbl == abn_cls_idx)[0]\n nrm_tst_img = tst_img[nrm_tst_idx] # Normal training images\n abn_tst_img = tst_img[abn_tst_idx] # Abnormal training images.\n nrm_tst_lbl = tst_lbl[nrm_tst_idx] # Normal training labels\n abn_tst_lbl = tst_lbl[abn_tst_idx] # Abnormal training labels.\n\n # --\n # Assign labels to normal (0) and abnormals (1)\n nrm_trn_lbl[:] = 0\n nrm_tst_lbl[:] = 0\n abn_trn_lbl[:] = 1\n abn_tst_lbl[:] = 1\n\n # --\n if manualseed != -1:\n # Random seed.\n # Concatenate the original train and test sets.\n nrm_img = np.concatenate((nrm_trn_img, nrm_tst_img), axis=0)\n nrm_lbl = np.concatenate((nrm_trn_lbl, nrm_tst_lbl), axis=0)\n abn_img = np.concatenate((abn_trn_img, abn_tst_img), axis=0)\n abn_lbl = np.concatenate((abn_trn_lbl, abn_tst_lbl), axis=0)\n\n # Split the normal data into the new train and tests.\n idx = np.arange(len(nrm_lbl))\n np.random.seed(manualseed)\n np.random.shuffle(idx)\n\n nrm_trn_len = int(len(idx) * 0.80)\n nrm_trn_idx = idx[:nrm_trn_len]\n nrm_tst_idx = idx[nrm_trn_len:]\n\n nrm_trn_img = nrm_img[nrm_trn_idx]\n nrm_trn_lbl = nrm_lbl[nrm_trn_idx]\n nrm_tst_img = nrm_img[nrm_tst_idx]\n nrm_tst_lbl = nrm_lbl[nrm_tst_idx]\n\n # Create new anomaly dataset based on the following data structure:\n # - anomaly dataset\n # . -> train\n # . -> normal\n # . -> test\n # . -> normal\n # . -> abnormal\n new_trn_img = np.copy(nrm_trn_img)\n new_trn_lbl = np.copy(nrm_trn_lbl)\n new_tst_img = np.concatenate((nrm_tst_img, abn_trn_img, abn_tst_img), axis=0)\n new_tst_lbl = np.concatenate((nrm_tst_lbl, abn_trn_lbl, abn_tst_lbl), axis=0)\n\n return new_trn_img, new_trn_lbl, new_tst_img, new_tst_lbl\n\n##\ndef get_mnist_anomaly_dataset(trn_img, trn_lbl, tst_img, tst_lbl, abn_cls_idx=0, manualseed=-1):\n \"\"\"[summary]\n\n Arguments:\n trn_img {np.array} -- Training images\n trn_lbl {np.array} -- Training labels\n tst_img {np.array} -- Test images\n tst_lbl {np.array} -- Test labels\n\n Keyword Arguments:\n abn_cls_idx {int} -- Anomalous class index (default: {0})\n\n Returns:\n [np.array] -- New training-test images and labels.\n \"\"\"\n # --\n # Find normal abnormal indexes.\n nrm_trn_idx = torch.from_numpy(np.where(trn_lbl.numpy() != abn_cls_idx)[0])\n abn_trn_idx = torch.from_numpy(np.where(trn_lbl.numpy() == abn_cls_idx)[0])\n nrm_tst_idx = torch.from_numpy(np.where(tst_lbl.numpy() != abn_cls_idx)[0])\n abn_tst_idx = torch.from_numpy(np.where(tst_lbl.numpy() == abn_cls_idx)[0])\n\n # --\n # Find normal and abnormal images\n nrm_trn_img = trn_img[nrm_trn_idx] # Normal training images\n abn_trn_img = trn_img[abn_trn_idx] # Abnormal training images.\n nrm_tst_img = tst_img[nrm_tst_idx] # Normal training images\n abn_tst_img = tst_img[abn_tst_idx] # Abnormal training images.\n\n # --\n # Find normal and abnormal labels.\n nrm_trn_lbl = trn_lbl[nrm_trn_idx] # Normal training labels\n abn_trn_lbl = trn_lbl[abn_trn_idx] # Abnormal training labels.\n nrm_tst_lbl = tst_lbl[nrm_tst_idx] # Normal training labels\n abn_tst_lbl = tst_lbl[abn_tst_idx] # Abnormal training labels.\n\n # --\n # Assign labels to normal (0) and abnormals (1)\n nrm_trn_lbl[:] = 0\n nrm_tst_lbl[:] = 0\n abn_trn_lbl[:] = 1\n abn_tst_lbl[:] = 1\n\n # --\n if manualseed != -1:\n # Random seed.\n # Concatenate the original train and test sets.\n nrm_img = torch.cat((nrm_trn_img, nrm_tst_img), dim=0)\n nrm_lbl = torch.cat((nrm_trn_lbl, nrm_tst_lbl), dim=0)\n abn_img = torch.cat((abn_trn_img, abn_tst_img), dim=0)\n abn_lbl = torch.cat((abn_trn_lbl, abn_tst_lbl), dim=0)\n\n # Split the normal data into the new train and tests.\n idx = np.arange(len(nrm_lbl))\n np.random.seed(manualseed)\n np.random.shuffle(idx)\n\n nrm_trn_len = int(len(idx) * 0.80)\n nrm_trn_idx = idx[:nrm_trn_len]\n nrm_tst_idx = idx[nrm_trn_len:]\n\n nrm_trn_img = nrm_img[nrm_trn_idx]\n nrm_trn_lbl = nrm_lbl[nrm_trn_idx]\n nrm_tst_img = nrm_img[nrm_tst_idx]\n nrm_tst_lbl = nrm_lbl[nrm_tst_idx]\n\n # Create new anomaly dataset based on the following data structure:\n new_trn_img = nrm_trn_img.clone()\n new_trn_lbl = nrm_trn_lbl.clone()\n new_tst_img = torch.cat((nrm_tst_img, abn_trn_img, abn_tst_img), dim=0)\n new_tst_lbl = torch.cat((nrm_tst_lbl, abn_trn_lbl, abn_tst_lbl), dim=0)\n\n return new_trn_img, new_trn_lbl, new_tst_img, new_tst_lbl\n\n##\ndef get_mnist2_anomaly_dataset(trn_img, trn_lbl, tst_img, tst_lbl, nrm_cls_idx=0, proportion=0.5):\n \"\"\" Create mnist 2 anomaly dataset.\n\n Arguments:\n trn_img {np.array} -- Training images\n trn_lbl {np.array} -- Training labels\n tst_img {np.array} -- Test images\n tst_lbl {np.array} -- Test labels\n\n Keyword Arguments:\n nrm_cls_idx {int} -- Anomalous class index (default: {0})\n\n Returns:\n [tensor] -- New training-test images and labels.\n \"\"\"\n # --\n # Find normal abnormal indexes.\n # TODO: PyTorch v0.4 has torch.where function\n nrm_trn_idx = torch.from_numpy(np.where(trn_lbl.numpy() == nrm_cls_idx)[0])\n abn_trn_idx = torch.from_numpy(np.where(trn_lbl.numpy() != nrm_cls_idx)[0])\n nrm_tst_idx = torch.from_numpy(np.where(tst_lbl.numpy() == nrm_cls_idx)[0])\n abn_tst_idx = torch.from_numpy(np.where(tst_lbl.numpy() != nrm_cls_idx)[0])\n\n # Get n percent of the abnormal samples.\n abn_tst_idx = abn_tst_idx[torch.randperm(len(abn_tst_idx))]\n abn_tst_idx = abn_tst_idx[:int(len(abn_tst_idx) * proportion)]\n\n\n # --\n # Find normal and abnormal images\n nrm_trn_img = trn_img[nrm_trn_idx] # Normal training images\n abn_trn_img = trn_img[abn_trn_idx] # Abnormal training images.\n nrm_tst_img = tst_img[nrm_tst_idx] # Normal training images\n abn_tst_img = tst_img[abn_tst_idx] # Abnormal training images.\n\n # --\n # Find normal and abnormal labels.\n nrm_trn_lbl = trn_lbl[nrm_trn_idx] # Normal training labels\n abn_trn_lbl = trn_lbl[abn_trn_idx] # Abnormal training labels.\n nrm_tst_lbl = tst_lbl[nrm_tst_idx] # Normal training labels\n abn_tst_lbl = tst_lbl[abn_tst_idx] # Abnormal training labels.\n\n # --\n # Assign labels to normal (0) and abnormals (1)\n nrm_trn_lbl[:] = 0\n nrm_tst_lbl[:] = 0\n abn_trn_lbl[:] = 1\n abn_tst_lbl[:] = 1\n\n # Create new anomaly dataset based on the following data structure:\n new_trn_img = nrm_trn_img.clone()\n new_trn_lbl = nrm_trn_lbl.clone()\n new_tst_img = torch.cat((nrm_tst_img, abn_tst_img), dim=0)\n new_tst_lbl = torch.cat((nrm_tst_lbl, abn_tst_lbl), dim=0)\n\n return new_trn_img, new_trn_lbl, new_tst_img, new_tst_lbl"
] |
[
[
"numpy.random.seed",
"torch.cat",
"numpy.random.shuffle",
"numpy.concatenate",
"numpy.copy",
"numpy.array",
"numpy.where"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
zhcet19/NeoAlgo-1
|
[
"c534a23307109280bda0e4867d6e8e490002a4ee"
] |
[
"Python/graphs/Directed_Acyclic_Graph.py"
] |
[
"'''\n Author : @anushkrishnav\n Built using : networkx since it is a gold standard for Python DAGs (and other graphs). You can create a networkx directed graph with a list of tuples that represent the graph edges:\n'''\nimport networkx as nx\nfrom matplotlib import pyplot as plt\nclass DAG:\n def __init__(self):\n self.graph=nx.DiGraph()\n def addEdges(self,edges):\n \"\"\"Function to add one edge at a time and check if the graph is acyclic post insertion\"\"\"\n self.graph.add_edge(edges)\n if nx.is_directed_acyclic_graph(self.graph):\n pass\n else:\n raise \"Unable to insert \"+str(edges)+\"This is an Acyclic graph\"\n self.graph.remove_edge(edges)\n def AddSetofEdges(self,listt):\n \"\"\"Function to all a list of edges and check is the graph is an DAG for furthur details refer networkx\"\"\"\n self.graph.add_edges_from(listt)\n if nx.is_directed_acyclic_graph(self.graph):\n pass\n else:\n raise \"This is an acyclic graph check your edges\"\n self.graph.remove_edge(listt)\n def Visualise(self,location=\"home\"):\n \"\"\"It uses Matplotlib to visualise the DAG .\n The graph is stored in a PNG format . So name the file accourdingly\n eg \n >>> DAG.Visualise(home/img.png)\"\"\"\n if self.graph==None:\n return \"There is no graph consider adding edges to visualise\" \n plt.tight_layout()\n nx.draw_networkx(self.graph,arrows=True,node_size=800)\n plt.savefig(location,format=\"PNG\")\n plt.clf()\n return \"Graph generated\"\n\n\ngraph = DAG()\ngraph.AddSetofEdges([(\"root\", \"a\"), (\"a\", \"b\"), (\"a\", \"e\"), (\"b\", \"c\"), (\"b\", \"d\"), (\"d\", \"e\")])\ngraph.Visualise(\"Python/graphs/graph.png\")\n\n"
] |
[
[
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.clf"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
GTJuniorDesign0100-2020/anti-malarial-MCMC-bayesian-algorithm
|
[
"8ab95c9b65275096dd86268fbb99bb37b6806e05",
"8ab95c9b65275096dd86268fbb99bb37b6806e05"
] |
[
"tests/test_define_alleles.py",
"tests/test_calculate_frequencies.py"
] |
[
"import os\nimport sys\n\nimport pandas as pd\nimport pytest\nimport numpy as np\n\nimport api.define_alleles as Define_Alleles\nfrom api.algorithm_instance import AlgorithmInstance\nimport api.recrudescence_utils as recrudescence_utils\nfrom api.recrudescence_file_parser import RecrudescenceFileParser\n\n# NOTE: Makes this reliant on AlgorithmInstance tests passing\nexample_file = os.path.join(\n os.path.dirname(__file__),\n '../Angola2017_example.xlsx')\ngenotypedata, additional = RecrudescenceFileParser.parse_file(example_file)\n\ngenotypedata_RR_Benguela = AlgorithmInstance._get_samples_from_site(genotypedata, 'Benguela')\nadditional_neutral_Benguela = AlgorithmInstance._replace_sample_names(\n AlgorithmInstance._get_samples_from_site(additional, 'Benguela'),'Additional_')\n\n\ngenotypedata_RR_Lunda_Sul = AlgorithmInstance._get_samples_from_site(genotypedata, 'Lunda Sul')\nadditional_neutral_Lunda_Sul = AlgorithmInstance._replace_sample_names(\n AlgorithmInstance._get_samples_from_site(additional, 'Lunda Sul'),'Additional_')\n\n\ngenotypedata_RR_Zaire = AlgorithmInstance._get_samples_from_site(genotypedata, 'Zaire')\nadditional_neutral_Zaire = AlgorithmInstance._replace_sample_names(\n AlgorithmInstance._get_samples_from_site(additional, 'Zaire'),'Additional_')\n\nlocirepeats = np.array([2, 2, 3, 3, 3, 3, 3])\nmaxk = np.array([30, 30, 30, 30, 30, 30, 30])\n\[email protected]\ndef expected_Benguela_define_alleles():\n expected_list = []\n expected_columns = ['0', '1']\n index_0 = pd.DataFrame(\n [[219.0, 221.0],[221.0, 223.0],[243.0, 245.0],[261.0, 263.0],[223.0, 225.0],\n [225.0, 227.0],[231.0, 233.0],[237.0, 239.0],[239.0, 241.0],[249.0, 251.0],\n [217.0, 219.0],[229.0, 231.0],[245.0, 247.0]],\n columns=expected_columns)\n index_1 = pd.DataFrame(\n [[123.0, 125.0],[139.0, 141.0],[103.0, 105.0],[85.0, 87.0],[121.0, 123.0],\n [143.0, 145.0],[145.0, 147.0],[87.0, 89.0],[135.0, 137.0],[137.0, 139.0],\n [147.0, 149.0],[149.0, 151.0],[151.0, 153.0],[161.0, 163.0],[163.0, 165.0],\n [169.0, 171.0]],\n columns=expected_columns)\n index_2 = pd.DataFrame(\n [[176.5, 179.5],[161.5, 164.5],[164.5, 167.5],[167.5, 170.5],[173.5, 176.5],\n [170.5, 173.5],[71.5, 74.5],[179.5, 182.5],[158.5, 161.5],[182.5, 185.5],\n [200.5, 203.5]],\n columns=expected_columns)\n index_3 = pd.DataFrame(\n [[153.5, 156.5],[150.5, 153.5],[162.5, 165.5],[165.5, 168.5],[156.5, 159.5],\n [141.5, 144.5],[168.5, 171.5],[171.5, 174.5],[102.5, 105.5],[111.5, 114.5],\n [159.5, 162.5],[174.5, 177.5],[177.5, 180.5]],\n columns=expected_columns)\n index_4 = pd.DataFrame(\n [[160.5, 163.5],[166.5, 169.5],[169.5, 172.5],[175.5, 178.5],[172.5, 175.5],\n [163.5, 166.5],[178.5, 181.5],[148.5, 151.5],[154.5, 157.5],[157.5, 160.5],\n [184.5, 187.5],[193.5, 196.5],[136.5, 139.5],[139.5, 142.5],[181.5, 184.5],\n [187.5, 190.5],[190.5, 193.5],[196.5, 199.5],[199.5, 202.5]],\n columns=expected_columns)\n index_5 = pd.DataFrame(\n [[80.5, 83.5],[77.5, 80.5],[71.5, 74.5],[83.5, 86.5],[86.5, 89.5]],\n columns=expected_columns)\n index_6 = pd.DataFrame(\n [[161.5, 164.5],[158.5, 161.5],[146.5, 149.5],[173.5, 176.5],[170.5, 173.5],\n [149.5, 152.5],[167.5, 170.5],[164.5, 167.5],[176.5, 179.5],[179.5, 182.5],\n [182.5, 185.5],[185.5, 188.5],[194.5, 197.5]],\n columns=expected_columns)\n\n expected_list.append(index_0)\n expected_list.append(index_1)\n expected_list.append(index_2)\n expected_list.append(index_3)\n expected_list.append(index_4)\n expected_list.append(index_5)\n expected_list.append(index_6)\n\n return expected_list\n\[email protected]\ndef expected_Lunda_Sul_define_alleles():\n expected_list = []\n expected_columns = ['0', '1']\n index_0 = pd.DataFrame(\n [[218.0, 220.0],[244.0, 246.0],[258.0, 260.0],[216.0, 218.0],[224.0, 226.0],\n [232.0, 234.0],[234.0, 236.0],[236.0, 238.0],[210.0, 212.0],[220.0, 222.0],\n [230.0, 232.0],[238.0, 240.0],[240.0, 242.0],[246.0, 248.0],[248.0, 250.0],\n [256.0, 258.0],[274.0, 276.0]],\n columns=expected_columns)\n index_1 = pd.DataFrame(\n [[123.0, 125.0],[139.0, 141.0],[85.0, 87.0],[137.0, 139.0],[143.0, 145.0],\n [101.0, 103.0],[121.0, 123.0],[135.0, 137.0],[145.0, 147.0],[87.0, 89.0],\n [99.0, 101.0],[107.0, 109.0],[127.0, 129.0],[149.0, 151.0],[151.0, 153.0],\n [153.0, 155.0],[155.0, 157.0],[169.0, 171.0]],\n columns=expected_columns)\n index_2 = pd.DataFrame(\n [[158.5, 161.5],[164.5, 167.5],[167.5, 170.5],[170.5, 173.5],[173.5, 176.5],\n [176.5, 179.5],[179.5, 182.5],[71.5, 74.5],[161.5, 164.5],[185.5, 188.5]],\n columns=expected_columns)\n index_3 = pd.DataFrame(\n [[153.5, 156.5],[162.5, 165.5],[156.5, 159.5],[147.5, 150.5],[150.5, 153.5],\n [168.5, 171.5],[174.5, 177.5],[159.5, 162.5],[165.5, 168.5],[135.5, 138.5],\n [141.5, 144.5],[180.5, 183.5],[186.5, 189.5]],\n columns=expected_columns)\n index_4 = pd.DataFrame(\n [[163.5, 166.5],[160.5, 163.5],[166.5, 169.5],[157.5, 160.5],[169.5, 172.5],\n [175.5, 178.5],[172.5, 175.5],[154.5, 157.5],[178.5, 181.5],[184.5, 187.5],\n [181.5, 184.5],[187.5, 190.5],[190.5, 193.5]],\n columns=expected_columns)\n index_5 = pd.DataFrame(\n [[80.5, 83.5],[71.5, 74.5],[77.5, 80.5]],\n columns=expected_columns)\n index_6 = pd.DataFrame(\n [[161.5, 164.5],[158.5, 161.5],[173.5, 176.5],[170.5, 173.5],[146.5, 149.5],\n [149.5, 152.5],[164.5, 167.5],[152.5, 155.5],[179.5, 182.5],[167.5, 170.5],\n [176.5, 179.5],[200.5, 203.5],[203.5, 206.5],[206.5, 209.5],[209.5, 212.5]],\n columns=expected_columns)\n\n expected_list.append(index_0)\n expected_list.append(index_1)\n expected_list.append(index_2)\n expected_list.append(index_3)\n expected_list.append(index_4)\n expected_list.append(index_5)\n expected_list.append(index_6)\n\n return expected_list\n\[email protected]\ndef expected_Zaire_define_alleles():\n expected_list = []\n expected_columns = ['0', '1']\n index_0 = pd.DataFrame(\n [[216.0, 218.0],[224.0, 226.0],[232.0, 234.0],[220.0, 222.0],[228.0, 230.0],\n [242.0, 244.0],[238.0, 240.0],[240.0, 242.0],[222.0, 224.0],[230.0, 232.0],\n [250.0, 252.0],[210.0, 212.0],[214.0, 216.0],[234.0, 236.0],[236.0, 238.0],\n [244.0, 246.0],[254.0, 256.0],[260.0, 262.0]],\n columns=expected_columns)\n index_1 = pd.DataFrame(\n [[137.0, 139.0],[139.0, 141.0],[123.0, 125.0],[101.0, 103.0],[135.0, 137.0],\n [83.0, 85.0],[121.0, 123.0],[141.0, 143.0],[85.0, 87.0],[97.0, 99.0],\n [99.0, 101.0],[129.0, 131.0],[163.0, 165.0],[171.0, 173.0],[87.0, 89.0],\n [89.0, 91.0],[103.0, 105.0],[105.0, 107.0],[143.0, 145.0],[147.0, 149.0],\n [149.0, 151.0],[151.0, 153.0],[161.0, 163.0]],\n columns=expected_columns)\n index_2 = pd.DataFrame(\n [[158.5, 161.5],[164.5, 167.5],[170.5, 173.5],[176.5, 179.5],[161.5, 164.5],\n [167.5, 170.5],[182.5, 185.5],[137.5, 140.5],[173.5, 176.5],[179.5, 182.5],\n [134.5, 137.5],[140.5, 143.5],[185.5, 188.5],[191.5, 194.5]],\n columns=expected_columns)\n index_3 = pd.DataFrame(\n [[153.5, 156.5],[150.5, 153.5],[165.5, 168.5],[147.5, 150.5],[177.5, 180.5],\n [171.5, 174.5],[141.5, 144.5],[156.5, 159.5],[159.5, 162.5],[138.5, 141.5],\n [162.5, 165.5],[168.5, 171.5],[117.5, 120.5],[135.5, 138.5],[180.5, 183.5]],\n columns=expected_columns)\n index_4 = pd.DataFrame(\n [[160.5, 163.5],[166.5, 169.5],[169.5, 172.5],[157.5, 160.5],[163.5, 166.5],\n [172.5, 175.5],[175.5, 178.5],[178.5, 181.5],[181.5, 184.5],[151.5, 154.5],\n [184.5, 187.5],[187.5, 190.5],[133.5, 136.5],[136.5, 139.5],[142.5, 145.5],\n [145.5, 148.5]],\n columns=expected_columns)\n index_5 = pd.DataFrame(\n [[80.5, 83.5],[77.5, 80.5],[83.5, 86.5],[71.5, 74.5],],\n columns=expected_columns)\n index_6 = pd.DataFrame(\n [[170.5, 173.5],[158.5, 161.5],[161.5, 164.5],[173.5, 176.5],[149.5, 152.5],\n [182.5, 185.5],[146.5, 149.5],[164.5, 167.5],[176.5, 179.5],[185.5, 188.5],\n [143.5, 146.5],[152.5, 155.5],[155.5, 158.5],[167.5, 170.5],[179.5, 182.5],\n [194.5, 197.5]],\n columns=expected_columns)\n\n expected_list.append(index_0)\n expected_list.append(index_1)\n expected_list.append(index_2)\n expected_list.append(index_3)\n expected_list.append(index_4)\n expected_list.append(index_5)\n expected_list.append(index_6)\n\n return expected_list\n\ndef test_Benguela__define_alleles_output_correct(expected_Benguela_define_alleles):\n result_list = Define_Alleles.define_alleles(\n pd.concat([genotypedata_RR_Benguela, additional_neutral_Benguela]), locirepeats, maxk\n )\n\n # assert result_list.length == expected_Benguela_define_alleles.length\n\n assert len(result_list) == len(expected_Benguela_define_alleles)\n \n for index in range(len(result_list)):\n pd.testing.assert_frame_equal(result_list[index], expected_Benguela_define_alleles[index])\n\ndef test_Lunda_Sul__define_alleles_output_correct(expected_Lunda_Sul_define_alleles):\n result_list = Define_Alleles.define_alleles(\n pd.concat([genotypedata_RR_Lunda_Sul, additional_neutral_Lunda_Sul]), locirepeats, maxk\n )\n\n # assert result_list.length == expected_Benguela_define_alleles.length\n\n assert len(result_list) == len(expected_Lunda_Sul_define_alleles)\n \n for index in range(len(result_list)):\n pd.testing.assert_frame_equal(result_list[index], expected_Lunda_Sul_define_alleles[index])\n\ndef test_Zaire__define_alleles_output_correct(expected_Zaire_define_alleles):\n result_list = Define_Alleles.define_alleles(\n pd.concat([genotypedata_RR_Zaire, additional_neutral_Zaire]), locirepeats, maxk\n )\n\n # assert result_list.length == expected_Benguela_define_alleles.length\n\n assert len(result_list) == len(expected_Zaire_define_alleles)\n \n for index in range(len(result_list)):\n pd.testing.assert_frame_equal(result_list[index], expected_Zaire_define_alleles[index])",
"import os\nimport sys\n\nimport pandas as pd\nimport pytest\nimport numpy as np\n\nfrom api.algorithm_instance import AlgorithmInstance\nimport api.recrudescence_utils as recrudescence_utils\nfrom api.recrudescence_file_parser import RecrudescenceFileParser\nfrom api.calculate_frequencies import calculate_frequencies3, Frequencies\n\n# NOTE: Makes this reliant on AlgorithmInstance tests passing\nexample_file = os.path.join(\n os.path.dirname(__file__),\n '../Angola2017_example.xlsx')\ngenotypedata, additional = RecrudescenceFileParser.parse_file(example_file)\n\ngenotypedata_RR_Benguela = AlgorithmInstance._get_samples_from_site(genotypedata, 'Benguela')\nadditional_neutral_Benguela = AlgorithmInstance._replace_sample_names(\n AlgorithmInstance._get_samples_from_site(additional, 'Benguela'),'Additional_')\n\ngenotypedata_RR_Lunda_Sul = AlgorithmInstance._get_samples_from_site(genotypedata, 'Lunda Sul')\nadditional_neutral_Lunda_Sul = AlgorithmInstance._replace_sample_names(\n AlgorithmInstance._get_samples_from_site(additional, 'Lunda Sul'),'Additional_')\n\ngenotypedata_RR_Zaire = AlgorithmInstance._get_samples_from_site(genotypedata, 'Zaire')\nadditional_neutral_Zaire = AlgorithmInstance._replace_sample_names(\n AlgorithmInstance._get_samples_from_site(additional, 'Zaire'),'Additional_')\n\[email protected]\ndef Benguela_define_alleles():\n define_alleles = []\n columns = ['0', '1']\n index_0 = pd.DataFrame(\n [[219.0, 221.0],[221.0, 223.0],[243.0, 245.0],[261.0, 263.0],[223.0, 225.0],\n [225.0, 227.0],[231.0, 233.0],[237.0, 239.0],[239.0, 241.0],[249.0, 251.0],\n [217.0, 219.0],[229.0, 231.0],[245.0, 247.0]],\n columns=columns)\n index_1 = pd.DataFrame(\n [[123.0, 125.0],[139.0, 141.0],[103.0, 105.0],[85.0, 87.0],[121.0, 123.0],\n [143.0, 145.0],[145.0, 147.0],[87.0, 89.0],[135.0, 137.0],[137.0, 139.0],\n [147.0, 149.0],[149.0, 151.0],[151.0, 153.0],[161.0, 163.0],[163.0, 165.0],\n [169.0, 171.0]],\n columns=columns)\n index_2 = pd.DataFrame(\n [[176.5, 179.5],[161.5, 164.5],[164.5, 167.5],[167.5, 170.5],[173.5, 176.5],\n [170.5, 173.5],[71.5, 74.5],[179.5, 182.5],[158.5, 161.5],[182.5, 185.5],\n [200.5, 203.5]],\n columns=columns)\n index_3 = pd.DataFrame(\n [[153.5, 156.5],[150.5, 153.5],[162.5, 165.5],[165.5, 168.5],[156.5, 159.5],\n [141.5, 144.5],[168.5, 171.5],[171.5, 174.5],[102.5, 105.5],[111.5, 114.5],\n [159.5, 162.5],[174.5, 177.5],[177.5, 180.5]],\n columns=columns)\n index_4 = pd.DataFrame(\n [[160.5, 163.5],[166.5, 169.5],[169.5, 172.5],[175.5, 178.5],[172.5, 175.5],\n [163.5, 166.5],[178.5, 181.5],[148.5, 151.5],[154.5, 157.5],[157.5, 160.5],\n [184.5, 187.5],[193.5, 196.5],[136.5, 139.5],[139.5, 142.5],[181.5, 184.5],\n [187.5, 190.5],[190.5, 193.5],[196.5, 199.5],[199.5, 202.5]],\n columns=columns)\n index_5 = pd.DataFrame(\n [[80.5, 83.5],[77.5, 80.5],[71.5, 74.5],[83.5, 86.5],[86.5, 89.5]],\n columns=columns)\n index_6 = pd.DataFrame(\n [[161.5, 164.5],[158.5, 161.5],[146.5, 149.5],[173.5, 176.5],[170.5, 173.5],\n [149.5, 152.5],[167.5, 170.5],[164.5, 167.5],[176.5, 179.5],[179.5, 182.5],\n [182.5, 185.5],[185.5, 188.5],[194.5, 197.5]],\n columns=columns)\n\n define_alleles.append(index_0)\n define_alleles.append(index_1)\n define_alleles.append(index_2)\n define_alleles.append(index_3)\n define_alleles.append(index_4)\n define_alleles.append(index_5)\n define_alleles.append(index_6)\n return define_alleles\n\[email protected]\ndef Lunda_Sul_define_alleles():\n define_alleles = []\n columns = ['0', '1']\n index_0 = pd.DataFrame(\n [[218.0, 220.0],[244.0, 246.0],[258.0, 260.0],[216.0, 218.0],[224.0, 226.0],\n [232.0, 234.0],[234.0, 236.0],[236.0, 238.0],[210.0, 212.0],[220.0, 222.0],\n [230.0, 232.0],[238.0, 240.0],[240.0, 242.0],[246.0, 248.0],[248.0, 250.0],\n [256.0, 258.0],[274.0, 276.0]],\n columns=columns)\n index_1 = pd.DataFrame(\n [[123.0, 125.0],[139.0, 141.0],[85.0, 87.0],[137.0, 139.0],[143.0, 145.0],\n [101.0, 103.0],[121.0, 123.0],[135.0, 137.0],[145.0, 147.0],[87.0, 89.0],\n [99.0, 101.0],[107.0, 109.0],[127.0, 129.0],[149.0, 151.0],[151.0, 153.0],\n [153.0, 155.0],[155.0, 157.0],[169.0, 171.0]],\n columns=columns)\n index_2 = pd.DataFrame(\n [[158.5, 161.5],[164.5, 167.5],[167.5, 170.5],[170.5, 173.5],[173.5, 176.5],\n [176.5, 179.5],[179.5, 182.5],[71.5, 74.5],[161.5, 164.5],[185.5, 188.5]],\n columns=columns)\n index_3 = pd.DataFrame(\n [[153.5, 156.5],[162.5, 165.5],[156.5, 159.5],[147.5, 150.5],[150.5, 153.5],\n [168.5, 171.5],[174.5, 177.5],[159.5, 162.5],[165.5, 168.5],[135.5, 138.5],\n [141.5, 144.5],[180.5, 183.5],[186.5, 189.5]],\n columns=columns)\n index_4 = pd.DataFrame(\n [[163.5, 166.5],[160.5, 163.5],[166.5, 169.5],[157.5, 160.5],[169.5, 172.5],\n [175.5, 178.5],[172.5, 175.5],[154.5, 157.5],[178.5, 181.5],[184.5, 187.5],\n [181.5, 184.5],[187.5, 190.5],[190.5, 193.5]],\n columns=columns)\n index_5 = pd.DataFrame(\n [[80.5, 83.5],[71.5, 74.5],[77.5, 80.5]],\n columns=columns)\n index_6 = pd.DataFrame(\n [[161.5, 164.5],[158.5, 161.5],[173.5, 176.5],[170.5, 173.5],[146.5, 149.5],\n [149.5, 152.5],[164.5, 167.5],[152.5, 155.5],[179.5, 182.5],[167.5, 170.5],\n [176.5, 179.5],[200.5, 203.5],[203.5, 206.5],[206.5, 209.5],[209.5, 212.5]],\n columns=columns)\n\n define_alleles.append(index_0)\n define_alleles.append(index_1)\n define_alleles.append(index_2)\n define_alleles.append(index_3)\n define_alleles.append(index_4)\n define_alleles.append(index_5)\n define_alleles.append(index_6)\n return define_alleles\n\[email protected]\ndef Zaire_define_alleles():\n define_alleles = []\n columns = ['0', '1']\n index_0 = pd.DataFrame(\n [[216.0, 218.0],[224.0, 226.0],[232.0, 234.0],[220.0, 222.0],[228.0, 230.0],\n [242.0, 244.0],[238.0, 240.0],[240.0, 242.0],[222.0, 224.0],[230.0, 232.0],\n [250.0, 252.0],[210.0, 212.0],[214.0, 216.0],[234.0, 236.0],[236.0, 238.0],\n [244.0, 246.0],[254.0, 256.0],[260.0, 262.0]],\n columns=columns)\n index_1 = pd.DataFrame(\n [[137.0, 139.0],[139.0, 141.0],[123.0, 125.0],[101.0, 103.0],[135.0, 137.0],\n [83.0, 85.0],[121.0, 123.0],[141.0, 143.0],[85.0, 87.0],[97.0, 99.0],\n [99.0, 101.0],[129.0, 131.0],[163.0, 165.0],[171.0, 173.0],[87.0, 89.0],\n [89.0, 91.0],[103.0, 105.0],[105.0, 107.0],[143.0, 145.0],[147.0, 149.0],\n [149.0, 151.0],[151.0, 153.0],[161.0, 163.0]],\n columns=columns)\n index_2 = pd.DataFrame(\n [[158.5, 161.5],[164.5, 167.5],[170.5, 173.5],[176.5, 179.5],[161.5, 164.5],\n [167.5, 170.5],[182.5, 185.5],[137.5, 140.5],[173.5, 176.5],[179.5, 182.5],\n [134.5, 137.5],[140.5, 143.5],[185.5, 188.5],[191.5, 194.5]],\n columns=columns)\n index_3 = pd.DataFrame(\n [[153.5, 156.5],[150.5, 153.5],[165.5, 168.5],[147.5, 150.5],[177.5, 180.5],\n [171.5, 174.5],[141.5, 144.5],[156.5, 159.5],[159.5, 162.5],[138.5, 141.5],\n [162.5, 165.5],[168.5, 171.5],[117.5, 120.5],[135.5, 138.5],[180.5, 183.5]],\n columns=columns)\n index_4 = pd.DataFrame(\n [[160.5, 163.5],[166.5, 169.5],[169.5, 172.5],[157.5, 160.5],[163.5, 166.5],\n [172.5, 175.5],[175.5, 178.5],[178.5, 181.5],[181.5, 184.5],[151.5, 154.5],\n [184.5, 187.5],[187.5, 190.5],[133.5, 136.5],[136.5, 139.5],[142.5, 145.5],\n [145.5, 148.5]],\n columns=columns)\n index_5 = pd.DataFrame(\n [[80.5, 83.5],[77.5, 80.5],[83.5, 86.5],[71.5, 74.5],],\n columns=columns)\n index_6 = pd.DataFrame(\n [[170.5, 173.5],[158.5, 161.5],[161.5, 164.5],[173.5, 176.5],[149.5, 152.5],\n [182.5, 185.5],[146.5, 149.5],[164.5, 167.5],[176.5, 179.5],[185.5, 188.5],\n [143.5, 146.5],[152.5, 155.5],[155.5, 158.5],[167.5, 170.5],[179.5, 182.5],\n [194.5, 197.5]],\n columns=columns)\n\n define_alleles.append(index_0)\n define_alleles.append(index_1)\n define_alleles.append(index_2)\n define_alleles.append(index_3)\n define_alleles.append(index_4)\n define_alleles.append(index_5)\n define_alleles.append(index_6)\n return define_alleles\n\n\[email protected]\ndef expected_Benguela_frequencies():\n expected_list = []\n\n index_0 = np.array([13, 16, 11, 13, 19, 5, 13])\n index_1 = np.array([\n [0.11111111, 0.11111111, 0.11111111, 0.11111111, 0.07407407,\n 0.07407407, 0.07407407, 0.07407407, 0.07407407, 0.07407407,\n 0.03703704, 0.03703704, 0.03703704, 0. , 0. ,\n 0. , 0. , 0. , 0. ],\n [0.24324324, 0.21621622, 0.08108108, 0.05405405, 0.05405405,\n 0.05405405, 0.05405405, 0.02702703, 0.02702703, 0.02702703,\n 0.02702703, 0.02702703, 0.02702703, 0.02702703, 0.02702703,\n 0.02702703, 0. , 0. , 0. ],\n [0.2 , 0.17142857, 0.11428571, 0.11428571, 0.11428571,\n 0.08571429, 0.05714286, 0.05714286, 0.02857143, 0.02857143,\n 0.02857143, 0. , 0. , 0. , 0. ,\n 0. , 0. , 0. , 0. ],\n [0.18181818, 0.15151515, 0.12121212, 0.12121212, 0.09090909,\n 0.06060606, 0.06060606, 0.06060606, 0.03030303, 0.03030303,\n 0.03030303, 0.03030303, 0.03030303, 0. , 0. ,\n 0. , 0. , 0. , 0. ],\n [0.1372549 , 0.11764706, 0.11764706, 0.09803922, 0.07843137,\n 0.05882353, 0.05882353, 0.03921569, 0.03921569, 0.03921569,\n 0.03921569, 0.03921569, 0.01960784, 0.01960784, 0.01960784,\n 0.01960784, 0.01960784, 0.01960784, 0.01960784],\n [0.57142857, 0.25 , 0.10714286, 0.03571429, 0.03571429,\n 0. , 0. , 0. , 0. , 0. ,\n 0. , 0. , 0. , 0. , 0. ,\n 0. , 0. , 0. , 0. ],\n [0.23076923, 0.19230769, 0.13461538, 0.13461538, 0.09615385,\n 0.05769231, 0.03846154, 0.01923077, 0.01923077, 0.01923077,\n 0.01923077, 0.01923077, 0.01923077, 0. , 0. ,\n 0. , 0. , 0. , 0. ]\n ])\n index_2 = np.array([0.4821891, 0.14313027, 0.08446277, 0.24941966, 0.30618052, 0.25014729, 0.79079775])\n\n expected_frequencies_RR = Frequencies(index_0, index_1, index_2)\n return expected_frequencies_RR\n\[email protected]\ndef expected_Lunda_Sul_frequencies():\n expected_list = []\n\n index_0 = np.array([17, 18, 10, 13, 13, 3, 15])\n index_1 = np.array([\n [0.11764706, 0.11764706, 0.11764706, 0.08823529, 0.08823529,\n 0.08823529, 0.05882353, 0.05882353, 0.02941176, 0.02941176,\n 0.02941176, 0.02941176, 0.02941176, 0.02941176, 0.02941176,\n 0.02941176, 0.02941176, 0. ],\n [0.225 , 0.125 , 0.075 , 0.075 , 0.075 ,\n 0.05 , 0.05 , 0.05 , 0.05 , 0.025 ,\n 0.025 , 0.025 , 0.025 , 0.025 , 0.025 ,\n 0.025 , 0.025 , 0.025 ],\n [0.22857143, 0.17142857, 0.11428571, 0.11428571, 0.11428571,\n 0.11428571, 0.05714286, 0.02857143, 0.02857143, 0.02857143,\n 0. , 0. , 0. , 0. , 0. ,\n 0. , 0. , 0. ],\n [0.20512821, 0.17948718, 0.1025641 , 0.07692308, 0.07692308,\n 0.07692308, 0.07692308, 0.05128205, 0.05128205, 0.02564103,\n 0.02564103, 0.02564103, 0.02564103, 0. , 0. ,\n 0. , 0. , 0. ],\n [0.2 , 0.13333333, 0.13333333, 0.08888889, 0.08888889,\n 0.08888889, 0.06666667, 0.04444444, 0.04444444, 0.04444444,\n 0.02222222, 0.02222222, 0.02222222, 0. , 0. ,\n 0. , 0. , 0. ],\n [0.66666667, 0.16666667, 0.16666667, 0. , 0. ,\n 0. , 0. , 0. , 0. , 0. ,\n 0. , 0. , 0. , 0. , 0. ,\n 0. , 0. , 0. ],\n [0.22033898, 0.15254237, 0.15254237, 0.10169492, 0.06779661,\n 0.06779661, 0.06779661, 0.03389831, 0.03389831, 0.01694915,\n 0.01694915, 0.01694915, 0.01694915, 0.01694915, 0.01694915,\n 0. , 0. , 0. ]\n ])\n index_2 = np.array([0.30069772, 0.32102995, 0.35015972, 0.13598753, 0.18125558, 0.23458575, 0.50453622])\n\n expected_frequencies_RR = Frequencies(index_0, index_1, index_2)\n return expected_frequencies_RR\n\[email protected]\ndef expected_Zaire_frequencies():\n expected_list = []\n\n index_0 = np.array([18, 23, 14, 15, 16, 4, 16])\n index_1 = np.array([\n [0.14545455, 0.12727273, 0.12727273, 0.10909091, 0.07272727,\n 0.07272727, 0.05454545, 0.05454545, 0.03636364, 0.03636364,\n 0.03636364, 0.01818182, 0.01818182, 0.01818182, 0.01818182,\n 0.01818182, 0.01818182, 0.01818182, 0. , 0. ,\n 0. , 0. , 0. ],\n [0.14285714, 0.14285714, 0.12987013, 0.09090909, 0.09090909,\n 0.05194805, 0.03896104, 0.03896104, 0.02597403, 0.02597403,\n 0.02597403, 0.02597403, 0.02597403, 0.02597403, 0.01298701,\n 0.01298701, 0.01298701, 0.01298701, 0.01298701, 0.01298701,\n 0.01298701, 0.01298701, 0.01298701],\n [0.18032787, 0.13114754, 0.1147541 , 0.1147541 , 0.09836066,\n 0.09836066, 0.06557377, 0.04918033, 0.04918033, 0.03278689,\n 0.01639344, 0.01639344, 0.01639344, 0.01639344, 0. ,\n 0. , 0. , 0. , 0. , 0. ,\n 0. , 0. , 0. ],\n [0.19672131, 0.16393443, 0.1147541 , 0.08196721, 0.08196721,\n 0.06557377, 0.04918033, 0.04918033, 0.04918033, 0.03278689,\n 0.03278689, 0.03278689, 0.01639344, 0.01639344, 0.01639344,\n 0. , 0. , 0. , 0. , 0. ,\n 0. , 0. , 0. ],\n [0.2375 , 0.1625 , 0.125 , 0.1 , 0.0875 ,\n 0.05 , 0.0375 , 0.0375 , 0.0375 , 0.025 ,\n 0.025 , 0.025 , 0.0125 , 0.0125 , 0.0125 ,\n 0.0125 , 0. , 0. , 0. , 0. ,\n 0. , 0. , 0. ],\n [0.62745098, 0.31372549, 0.03921569, 0.01960784, 0. ,\n 0. , 0. , 0. , 0. , 0. ,\n 0. , 0. , 0. , 0. , 0. ,\n 0. , 0. , 0. , 0. , 0. ,\n 0. , 0. , 0. ],\n [0.19 , 0.18 , 0.15 , 0.12 , 0.07 ,\n 0.07 , 0.06 , 0.04 , 0.04 , 0.02 ,\n 0.01 , 0.01 , 0.01 , 0.01 , 0.01 ,\n 0.01 , 0. , 0. , 0. , 0. ,\n 0. , 0. , 0. ]\n ])\n index_2 = np.array([0.22105065, 0.22734084, 0.24295935, 0.2933828 , 0.35520227,0.11494194, 0.47930888])\n\n expected_frequencies_RR = Frequencies(index_0, index_1, index_2)\n return expected_frequencies_RR\n\n\ndef test_Benguela_calculate_frequencies_output_correct(expected_Benguela_frequencies, Benguela_define_alleles):\n result_list = calculate_frequencies3(\n pd.concat([genotypedata_RR_Benguela, additional_neutral_Benguela]), Benguela_define_alleles)\n\n np.testing.assert_array_almost_equal(expected_Benguela_frequencies.lengths, result_list.lengths, decimal=5)\n np.testing.assert_array_almost_equal(expected_Benguela_frequencies.matrix, result_list.matrix, decimal=5)\n np.testing.assert_array_almost_equal(expected_Benguela_frequencies.variability, result_list.variability, decimal=5)\n\ndef test_Lunda_Sul_calculate_frequencies_output_correct(expected_Lunda_Sul_frequencies, Lunda_Sul_define_alleles):\n result_list = calculate_frequencies3(\n pd.concat([genotypedata_RR_Lunda_Sul, additional_neutral_Lunda_Sul]), Lunda_Sul_define_alleles)\n\n np.testing.assert_array_almost_equal(expected_Lunda_Sul_frequencies.lengths, result_list.lengths, decimal=5)\n np.testing.assert_array_almost_equal(expected_Lunda_Sul_frequencies.matrix, result_list.matrix, decimal=5)\n np.testing.assert_array_almost_equal(expected_Lunda_Sul_frequencies.variability, result_list.variability, decimal=5)\n\ndef test_Zaire_calculate_frequencies_output_correct(expected_Zaire_frequencies, Zaire_define_alleles):\n result_list = calculate_frequencies3(\n pd.concat([genotypedata_RR_Zaire, additional_neutral_Zaire]), Zaire_define_alleles)\n\n np.testing.assert_array_almost_equal(expected_Zaire_frequencies.lengths, result_list.lengths, decimal=5)\n np.testing.assert_array_almost_equal(expected_Zaire_frequencies.matrix, result_list.matrix, decimal=5)\n np.testing.assert_array_almost_equal(expected_Zaire_frequencies.variability, result_list.variability, decimal=5)\n\n"
] |
[
[
"pandas.concat",
"numpy.array",
"pandas.testing.assert_frame_equal",
"pandas.DataFrame"
],
[
"pandas.concat",
"numpy.array",
"pandas.DataFrame",
"numpy.testing.assert_array_almost_equal"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
JiaShun-Xiao/SparseLMM
|
[
"0bca5353ffc129a2d440cec91785da05909af2f2",
"0bca5353ffc129a2d440cec91785da05909af2f2"
] |
[
"SparseLmm.py",
"demo_sparselmm.py"
] |
[
"import numpy as np\nimport logging\n\nlogging.basicConfig(filename='spaselmm.log', level=logging.DEBUG)\n\n###\n# Input Arguments\n###\n# X: n x p input matrix, where n is the number of samples, and p is the number of variables. X cannot be sparse.\n# Z: n x m covariate data matrix\n# y: Vector of length n containing phenotype\n\n###\n# Output Arguments\n###\n# w: normalized weights compute from logw.\n# alpha: variational estimates of posterior inclusion probs.\n# pip: \"Averaged\" posterior inclusion probabilities.\n# mu: variational estimates of posterior mean coefficients.\n# mu_cov: posterior estimates of coefficients for covariates.\n# heri: estimated heritability\n# logodd: approximate marginal log-likelihood for each setting of hyperparameters.\n\n\ndef varbvs(X,y,Z=None,tol=1e-5,maxiter=1e5,verbose=False):\n logging.info(\"X,y,Z shape: {},{},{}\".format(str(X.shape),str(y.shape),str(Z.shape)))\n X = X.astype(np.float32)\n n,p = X.shape\n if Z.all() != None:\n Z = Z.astype(np.float64)\n Z = np.concatenate((np.ones((n,1)),Z),axis=1)\n else:\n Z = np.ones((n,1))\n ncov = Z.shape[1]-1\n y = y.astype(np.float32)\n ## 1. PROCESS OPTIONS\n ## initial sigma, sa, logodds\n ns = 20\n sigma = np.array([y.var() for _ in range(ns)])\n sa = np.array([1. for _ in range(ns)])\n logodds = np.linspace(-np.log10(3500),-1,ns)\n ## initial estimates of variational parameter alpha.\n logging.info(\"initial parameter\")\n alpha = np.random.rand(p,ns)\n alpha = alpha/alpha.sum(axis=0)\n mu = np.random.randn(p,ns)\n ## 2.PREPROCESSING STEPS\n ## Adjust the genotypes and phenotypes\n if ncov == 0:\n X = X - X.mean(axis=0)\n y = y - y.mean()\n else:\n ZTZ_ = np.linalg.inv(Z.T.dot(Z))\n SZy = ZTZ_.dot(Z.T).dot(y)\n SZX = ZTZ_.dot(Z.T).dot(X)\n X = X - Z.dot(SZX)\n y = y - Z.dot(SZy)\n d = np.linalg.norm(X,axis=0)**2\n xy = (y.T.dot(X)).T\n ## 3. FIT BAYESIAN VARIABLE SELECTION MODE\n logging.info(\"fit bayesian variable selection model\")\n logging.info(\"number of tried log odds: {}\".format(str(ns)))\n logw = np.zeros(ns) # log likelihood for each hyperparameter setting (logw)\n s = np.zeros((p,ns)) # variances of the regression coefficients (s)\n mu_cov = np.zeros((ncov+1,ns)) # posterior mean estimates of the coefficients for the covariates\n ## 4. find a good initialization for the variational parameters\n logging.info(\"find a good initialization for the variational parameters\")\n for i in range(ns):\n logging.info(\"itreration for {} log odds\".format(str(i)))\n logw[i], sigma[i], sa[i], alpha[:,i], mu[:,i], s[:,i], mu_cov[:,i] = \\\n outerloop(d,xy,X,Z,y,SZy,SZX,sigma[i],sa[i],logodds[i],alpha[:,i],mu[:,i],tol,maxiter)\n ## Choose an initialization common to all the runs of the coordinate ascent algorithm\n i = np.argmax(logw)\n alpha = np.repeat(alpha[:,i], ns).reshape(p,ns)\n mu = np.repeat(mu[:,i], ns).reshape(p,ns)\n sigma = np.full(ns,sigma[i])\n sa = np.full(ns,sa[i])\n ## 5. optimazition\n logging.info(\"Main loop for computing a variational approximation\")\n for i in range(ns):\n logging.info(\"itreration for {} log odds\".format(str(i)))\n logw[i], sigma[i], sa[i], alpha[:,i], mu[:,i], s[:,i], mu_cov[:,i] = \\\n outerloop(d,xy,X,Z,y,SZy,SZX,sigma[i],sa[i],logodds[i],alpha[:,i],mu[:,i],tol,maxiter)\n ## 6. CREATE FINAL OUTPUT\n w = normalizelogw(logw)\n pip = alpha.dot(w)\n #beta = mu.dot(w)\n #cov_beta = mu_cov.dot(w)\n sigma = sigma.dot(w)\n sa = sa.dot(w)\n #print(cov_beta)\n #print(pip,beta)\n #print(np.exp(logw.max()))\n return w,alpha,pip,mu,mu_cov,sa/(sigma+sa),np.exp(logw.max())\n\ndef outerloop(d,xy,X,Z,y,SZy,SZX,sigma,sa,logodds,alpha,mu,tol,maxiter):\n n,p = X.shape\n if np.isscalar(logodds):\n logodds = np.full(p,logodds)\n #print(logodds)\n logw,err,sigma,sa,alpha,mu,s = varbvsnorm(d,xy,X,Z,y,sigma,sa,logodds,alpha,mu,tol,maxiter)\n (sign, logdet) = np.linalg.slogdet(Z.T.dot(Z))\n logw = logw - sign*logdet/2\n mu_cov = SZy - SZX.dot(alpha*mu)\n logw = logw[-1]\n return logw, sigma, sa, alpha, mu, s, mu_cov\n\ndef varbvsnorm(d,xy,X,Z,y,sigma,sa,logodds,alpha,mu,tol,maxiter):\n maxiter = int(maxiter)\n X = X.astype(np.float32)\n n,p = X.shape\n Xr = X.dot(alpha*mu)\n #print(sigma,sa,logodds,alpha,mu,tol,maxiter,sa0,n0)\n s = (sa*sigma)/(sa*d+1)\n logw = np.zeros(maxiter)\n err = np.zeros(maxiter)\n ## main loop\n for it in range(maxiter):\n alpha0 = alpha\n mu0 = mu\n s0 = s\n sigma0 = sigma\n sa0 = sa\n ## COMPUTE CURRENT VARIATIONAL LOWER BOUND\n logw0 = int_linear(Xr,d,y,sigma,alpha,mu,s)\n ## UPDATE VARIATIONAL APPROXIMATION\n #print(alpha,mu,Xr)\n if it%2 == 0:\n order = range(p)\n else:\n order = range(p-1,-1,-1)\n alpha,mu,Xr = varbvsnormupdate(X,sigma,sa,logodds,xy,d,alpha,mu,Xr,order)\n #print(alpha,mu)\n ## COMPUTE UPDATED VARIATIONAL LOWER BOUND\n logw[it] = int_linear(Xr,d,y,sigma,alpha,mu,s)\n ## UPDATE RESIDUAL VARIANCE\n betavar = alpha*(sigma+(mu**2))-(alpha*mu)**2\n sigma = (np.linalg.norm(y - Xr)**2+d.dot(betavar)+alpha.dot(s+mu**2)/sa)/(n+alpha.sum())\n s = (sa*sigma)/(sa*d+1)\n #sa = (sa0*n0+alpha.dot(s+mu**2))/(n0+sigma*alpha.sum())\n sa = (alpha.dot(s+mu**2))/(sigma*alpha.sum())\n s = (sa*sigma)/(sa*d+1)\n ## check convergence\n err[it] = np.absolute(alpha - alpha0).max()\n #print(err[it],logw[it])\n if logw[it] < logw0:\n logw[it] = logw0\n err[it] = 0\n sigma = sigma0\n sa = sa0\n alpha = alpha0\n mu = mu0\n s = s0\n break\n elif err[it] < tol:\n break\n logw = logw[:it+1]\n err = err[:it+1]\n return logw,err,sigma,sa,alpha,mu,s\n \ndef int_linear(Xr, d, y, sigma, alpha, mu, s):\n n = y.shape[0]\n betavar = alpha*(sigma+(mu**2))-(alpha*mu)**2\n return (-n/2)*np.log(2*np.pi*sigma) - np.linalg.norm(y - Xr)**2/(2*sigma) - d.T.dot(betavar)/(2*sigma)\n\ndef varbvsnormupdate(X,sigma,sa,logodds,xy,d,alpha,mu,Xr,order):\n n,p = X.shape\n #print(mu)\n s = np.zeros(p)\n for i in order:\n s[i] = (sa*sigma)/(sa*d[i]+1)\n r = alpha[i]*mu[i]\n mu[i] = (s[i]/sigma)*(xy[i]+d[i]*r-X[:,i].T.dot(Xr))\n #print(mu**2/s)\n SSR = mu[i]**2/s[i]\n alpha_tmp = logodds[i]+(np.log(s[i]/(sigma*sa))+SSR)/2\n alpha[i] = 1/(1+np.exp(-alpha_tmp))\n rnew = alpha[i]*mu[i]\n Xr = Xr + X[:,i]*(rnew-r)\n return alpha,mu,Xr\n\ndef normalizelogw(logw):\n c = logw.max()\n w = np.exp(logw-c)\n w = w/w.sum()\n return w\n\ndef varbvspredict(X,Z,w,alpha,mu_,mu_cov_):\n X = X.astype(np.float32)\n Z = np.concatenate((np.ones((Z.shape[0],1)),Z),axis=1)\n return (Z.dot(mu_cov_)+X.dot(alpha*mu_)).dot(w)",
"import numpy as np\nfrom sklearn.metrics import r2_score\nfrom sklearn import linear_model\nfrom SparseLmm import *\n\nn = 1000 # number of sample\np = 100000 # number of SNP\nm = 3 # number of covariate\nna = 20 # number of causal snp\nr = 0.5 # SNP heritability\nse = 4 # variance of residual\n\nmaf = 0.05+0.45*np.random.rand(p)\nmaf_mat = np.repeat(maf,n).reshape(p,n).T\n# latent variable gamma\ngamma = np.zeros(p)\nindex_gamma = np.random.choice(list(range(p)),na,replace=False)\nbeta = np.zeros(p)\nbeta[index_gamma] = np.random.randn(na)\n\nX = (np.random.rand(n,p) < maf_mat)*1 + (np.random.rand(n,p) < maf_mat)*1\nbeta = np.sqrt((r/(1-r))*se/X.dot(beta).var())*beta\n\nintercept = np.random.randn(1)\nZ = np.random.randn(n,m)\nu = np.random.randn(m)\ny = intercept + X.dot(beta) + Z.dot(u) + np.sqrt(se)*np.random.randn(n)\n\nsep = 800\ntrain_X = X[:sep,:]\ntrain_y = y[:sep]\ntrain_Z = Z[:sep,:]\ntest_X = X[sep:,:]\ntest_y = y[sep:]\ntest_Z = Z[sep:,:]\nprint(train_X.shape,train_y.shape,train_Z.shape,test_X.shape,test_y.shape,test_Z.shape)\n\nw,alpha,pip,mu_,mu_cov_,heri,logodd = varbvs(train_X,train_y,train_Z)\n\ny_pre = varbvspredict(test_X,test_Z,w,alpha,mu_,mu_cov_)\nprint(r2_score(test_y,y_pre))\n\nclf = linear_model.Lasso(alpha=.1)\nclf.fit(np.concatenate((train_X,train_Z),axis=1),train_y)\ny_pre_lasso = clf.predict(np.concatenate((test_X,test_Z),axis=1))\nprint(r2_score(test_y,y_pre_lasso))"
] |
[
[
"numpy.log",
"numpy.absolute",
"numpy.linalg.norm",
"numpy.full",
"numpy.ones",
"numpy.argmax",
"numpy.random.randn",
"numpy.random.rand",
"numpy.isscalar",
"numpy.log10",
"numpy.repeat",
"numpy.exp",
"numpy.zeros"
],
[
"sklearn.metrics.r2_score",
"numpy.sqrt",
"sklearn.linear_model.Lasso",
"numpy.concatenate",
"numpy.random.randn",
"numpy.random.rand",
"numpy.repeat",
"numpy.zeros"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
qxmd/implicit-recommender
|
[
"abd0fe8b7da46655861be36cb408b79e23ff6805"
] |
[
"implicitmf/validation.py"
] |
[
"#!/usr/bin/env python\n\n\"\"\"\nValidation\n==========\n\"\"\"\nimport numpy as np\nimport pandas as pd\nfrom scipy.sparse import csr_matrix\nfrom implicitmf._utils import _sparse_checker\n\ndef hold_out_entries(X, hold_out_size=0.2, seed=None):\n \"\"\"\n Generates a sparse array of training examples by masking a random subset of values\n\n Parameters\n ----------\n X : scipy.sparse.csr_matrix\n sparse array of all observed interactions\n hold_out_size : float\n proportion of entries to be masked \n seed : int\n random seed for use by np.random.choice\n\n Returns\n -------\n scipy.sparse.csr_matrix\n sparse matrix of same shape as X with hold_out_size proportion of entries masked\n \"\"\"\n _sparse_checker(X, '`X`')\n\n # compute the number of nonzero entries in sparse array\n num_nonzero = X.count_nonzero()\n\n # set seed and randomly select some entries to be held out\n np.random.seed(seed)\n rand_hold_out = np.random.choice(np.arange(num_nonzero),\n size=int(\n np.floor(num_nonzero*hold_out_size)),\n replace=False)\n\n # get the indices of the nonzero components\n ind_nonzero = X.nonzero()\n\n # use randomly selected hold out values to pluck out corresponding indices\n indices_hold_out = (\n ind_nonzero[0][rand_hold_out], ind_nonzero[1][rand_hold_out])\n X[indices_hold_out] = 0\n X.eliminate_zeros()\n return X\n\ndef cross_val_folds(X, n_folds, seed=None):\n \"\"\"\n Generates cross validation folds using provided utility matrix\n\n Parameters\n ----------\n X : scipy.sparse.csr_matrix\n utility matrix of shape (u, i) where u is number of users and i is number of items\n n_folds : int\n number of folds to create\n seed : int\n random seed for use by np.random.choice\n\n Returns\n -------\n dict\n dictionary of length n_folds\n \n Example\n -------\n >>> output = cross_val_folds(X, n_folds=3, seed=42)\n ... print(output)\n {0: {'train': X_train, 'test': X_test}, \n 1: {'train': X_train, 'test': X_test},\n 2: {'train': X_train, 'test': X_test}}\n \"\"\"\n _sparse_checker(X, '`X`')\n\n if not isinstance(n_folds, int) or n_folds < 2:\n raise TypeError(\"`n_folds` must be an integer equal to or greater than 2\")\n\n # compute the number of nonzero entries in sparse array\n num_nonzero = X.count_nonzero()\n ind_nonzero = X.nonzero()\n\n # set seed and shuffle the indices of the nonzero entries\n np.random.seed(seed)\n shuffled_ind = np.random.choice(\n np.arange(num_nonzero), size=num_nonzero, replace=False)\n\n fold_sizes = (num_nonzero // n_folds) * np.ones(n_folds, dtype=np.int)\n fold_sizes[:(num_nonzero % n_folds)] += 1\n\n split_shuffled_ind = dict()\n current = 0\n for key, fold_size in enumerate(fold_sizes):\n start, stop = current, current + fold_size\n split_shuffled_ind[key] = shuffled_ind[start:stop]\n current = stop\n\n # use the split shuffled indices to subset indices of nonzero entries from X\n val_indices = {key: (ind_nonzero[0][val], ind_nonzero[1][val])\n for key, val in split_shuffled_ind.items()}\n\n folds = dict()\n for i in range(n_folds):\n print('Creating fold number {} ...'.format(i+1))\n test = csr_matrix((np.array(X[val_indices[i]]).reshape(-1),\n val_indices[i]), shape=X.shape)\n\n train = X - test\n train.eliminate_zeros()\n\n folds[i] = {'train': train, 'test': test}\n return folds"
] |
[
[
"numpy.random.seed",
"numpy.arange",
"numpy.ones",
"numpy.floor",
"numpy.array"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
houruiyan/hilearn
|
[
"44960832769e142babd42de311890c25fe18c943"
] |
[
"hilearn/plot/base_plot.py"
] |
[
"import numpy as np\nimport matplotlib\nimport matplotlib.pyplot as plt\n\n# codes for ggplot like backgroud\n# http://messymind.net/making-matplotlib-look-like-ggplot/\n# Or simply import seaborn\n\nfavorite_colors=[\"deepskyblue\", \"limegreen\", \"orangered\", \"cyan\", \"magenta\", \n \"gold\", \"blueviolet\", \"dodgerblue\", \"greenyellow\", \"tomato\",\n \"turquoise\", \"orchid\", \"darkorange\", \"mediumslateblue\"]\n\nWeiZhu_colors = ['#4796d7', '#f79e54', '#79a702', '#df5858', '#556cab', \n '#de7a1f', '#ffda5c', '#4b595c', '#6ab186', '#bddbcf', \n '#daad58', '#488a99', '#f79b78', '#ffba00']\n\n#seaborn_colors = seaborn.color_palette(\"hls\", 8)\n\ndef set_colors(color_list=WeiZhu_colors):\n \"\"\"\n Set the favorite colors in matplotlib color_cycle and return the \n list of favorite colors.\n \"\"\"\n matplotlib.rcParams['axes.color_cycle'] = color_list\n return color_list\n\n\ndef set_frame(ax):\n \"\"\"Example of setting the frame of the plot box.\n \"\"\"\n ax.spines['top'].set_visible(False)\n ax.spines['right'].set_visible(False)\n ax.spines['bottom'].set_visible(True)\n ax.spines['left'].set_visible(True)\n ax.xaxis.set_ticks_position('bottom')\n ax.yaxis.set_ticks_position('left')\n ax.get_xaxis().set_tick_params(direction='out')\n ax.get_yaxis().set_tick_params(direction='out')\n return ax\n\n\ndef set_style(label_size=12, grid_alpha=0.4):\n \"\"\"\n Set the figure style\n \"\"\"\n if grid_alpha is not None and grid_alpha >= 0 and grid_alpha <= 1:\n matplotlib.rcParams['grid.alpha'] = grid_alpha\n matplotlib.rcParams['xtick.labelsize'] = label_size\n matplotlib.rcParams['ytick.labelsize'] = label_size\n matplotlib.rcParams['legend.fontsize'] = label_size * 1.1\n matplotlib.rcParams['axes.labelsize'] = label_size * 1.1\n matplotlib.rcParams['axes.titlesize'] = label_size * 1.2\n matplotlib.rcParams['axes.titleweight'] = 'bold'\n\n\ndef venn3_plot(sets, set_labels=('A', 'B', 'C'), \n set_colors=None, alpha=1.0, circle_on=False):\n \"\"\"\n venn3 plot based on venn3 and venn3_circles from matplotlib_venn.\n\n Example:\n --------\n set1 = set(['A', 'B', 'C', 'D'])\n set2 = set(['B', 'C', 'D', 'E'])\n set3 = set(['C', 'D',' E', 'F', 'G'])\n venn3_plot([set1, set2, set3], ('Set1', 'Set2', 'Set3'))\n \"\"\"\n from matplotlib_venn import venn3, venn3_circles\n\n if circle_on:\n v = venn3_circles(subsets=(1,1,1,1,1,1,1), alpha=0.8, color=\"r\")\n if set_colors is None: \n set_colors = favorite_colors[:3]\n v = venn3(subsets=(1,1,1,1,1,1,1), set_labels=set_labels, \n set_colors=set_colors, alpha=alpha)\n v.get_label_by_id('111').set_text(len(sets[0]&sets[1]&sets[2]))\n v.get_label_by_id('110').set_text(len(sets[0]&sets[1]-sets[2]))\n v.get_label_by_id('101').set_text(len(sets[0]-sets[1]&sets[2]))\n v.get_label_by_id('100').set_text(len(sets[0]-sets[1]-sets[2]))\n v.get_label_by_id('011').set_text(len(sets[2]&sets[1]-sets[0]))\n v.get_label_by_id('010').set_text(len(sets[1]-sets[2]-sets[0]))\n v.get_label_by_id('001').set_text(len(sets[2]-sets[1]-sets[0]))\n\n return v\n\n\ndef contour2D(x, y, f, N=10, cmap=\"bwr\", contourLine=True, optima=True, **kwargs):\n \"\"\"\n Plot 2D contour.\n \n Parameters\n ----------\n x: array like (m1, )\n The first dimention\n y: array like (m2, )\n The Second dimention\n f: a function\n The funciton return f(x1, y1)\n N: int\n The number of contours\n contourLine: bool\n Turn the contour line\n optima: bool\n Turn on the optima\n **kwargs: further arguments for matplotlib.boxplot\n \n Returns\n -------\n result: contourf\n The same as the return of matplotlib.plot.contourf\n See: .. plot:: http://matplotlib.org/examples/pylab_examples/contourf_demo.html\n\n Example\n -------\n def f(x, y):\n return -x**2-y**2\n x = np.linspace(-0.5, 0.5, 100)\n y = np.linspace(-0.5, 0.5, 100)\n contour2D(x, y, f)\n \"\"\"\n X,Y = np.meshgrid(x,y)\n Z = np.zeros(X.shape)\n for i in range(Z.shape[0]):\n for j in range(Z.shape[1]):\n Z[i,j] = f(X[i,j],Y[i,j])\n idx = np.argmax(Z)\n\n cf = plt.contourf(X, Y, Z, N, cmap=cmap, **kwargs)\n if contourLine is True:\n C = plt.contour(X, Y, Z, N, alpha=0.7, colors=\"k\", linewidth=0.5)\n plt.clabel(C, inline=1, fontsize=10)\n if optima is True:\n plt.scatter(X[idx/100, idx%100], Y[idx/100, idx%100], s=120, marker='*')\n #, marker=(5,2))\n \n plt.xlim(np.min(x), np.max(x))\n plt.ylim(np.min(y), np.max(y))\n return cf\n\n\n"
] |
[
[
"matplotlib.pyplot.contourf",
"matplotlib.pyplot.clabel",
"matplotlib.pyplot.scatter",
"numpy.min",
"numpy.max",
"numpy.argmax",
"matplotlib.pyplot.contour",
"numpy.meshgrid",
"numpy.zeros"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
s-mizuki-nlp/word_sense_disambiguation
|
[
"e920b37f879e8db8f2b2d7a55e7997bb8b61ff1a"
] |
[
"model/logit_layer.py"
] |
[
"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\nfrom __future__ import division, absolute_import, print_function, unicode_literals\n\nfrom typing import Optional, Union\nimport math\n\nimport torch\nfrom torch import nn\n\nfrom model.encoder_internal import BasePrefixAwareLayer, BaseLogitAdjustableLayer\nfrom model.hashembed import HashEmbedding\n\n\nclass HashCodeAwareLogits(BaseLogitAdjustableLayer):\n\n def __init__(self, n_digits: int, n_ary_out: int,\n num_embeddings: int, embedding_dim: int, num_buckets: int,\n additive: bool,\n logit_adjustment: bool,\n matrix_rank_reduction: bool = False,\n num_hashes=2,\n replace_trailing_zeroes: bool = False,\n **kwargs):\n \"\"\"\n Prefix-aware (=y_{<d}) logit layer implemented on top of Hash Embedings.\n\n @param n_digits: number of digits of sense code.\n @param n_ary_out: number of ary of sense code.\n @param num_embeddings: vocabulary size of Hash Embeddings.\n @param embedding_dim: number of hidden dimensions.\n @param num_buckets: number of unique shared embeddings of Hash Embeddings algorithm.\n @param additive: average along with prefixes (=sense hierarchy).\n @param logit_adjustment: adjust logit score using prefix-aware prior probability\n @param matrix_rank_reduction: apply rank reduction to logit coefficient matrix. it doesn't work well so far.\n @param num_hashes:\n @param replace_trailing_zeroes: replace prefix index of zeroes with last non-zero indices.\n @param kwargs:\n \"\"\"\n if logit_adjustment:\n for required_argument in (\"logit_adjust_tau\", \"logit_adjust_when\"):\n assert required_argument in kwargs, f\"argument {required_argument} must be specified.\"\n super().__init__(replace_trailing_zeroes=replace_trailing_zeroes, null_prefix_index=0,\n num_classes=n_ary_out, unobserved_class_fill_strategy=kwargs.get(\"unobserved_class_fill_strategy\", \"min\"),\n smoothing_alpha=kwargs.get(\"smoothing_alpha\", 0.1),\n logit_adjust_when=kwargs[\"logit_adjust_when\"],\n logit_adjust_tau=kwargs[\"logit_adjust_tau\"])\n else:\n super().__init__(replace_trailing_zeroes=replace_trailing_zeroes, null_prefix_index=0,\n logit_adjust_when=False)\n\n self._n_digits = n_digits\n self._n_ary = n_ary_out\n self._n_dim_emb = embedding_dim\n self._n_distinc_prefix = num_embeddings\n self._logit_adjustment = logit_adjustment\n self._additive = additive\n self._matrix_rank_reduction = matrix_rank_reduction\n if matrix_rank_reduction:\n self._matrix_rank = max(int(math.sqrt(n_ary_out)), n_ary_out // 8)\n else:\n self._matrix_rank = n_ary_out\n\n # prefix hash から HashEmbeddingsを使って n_ary * n_dim_emb 個のparameterをlookupする\n if matrix_rank_reduction:\n _embedding_dim = (n_ary_out + embedding_dim) * self._matrix_rank\n else:\n _embedding_dim = embedding_dim*n_ary_out\n\n self._logit_layer_weights = HashEmbedding(num_embeddings=num_embeddings, num_hashes=num_hashes,\n embedding_dim=_embedding_dim,\n num_buckets=num_buckets, append_weight=False)\n\n def forward(self, input_sequence: torch.Tensor, t_representation: torch.Tensor):\n # input_sequence: (n_batch, n_digits_so_far) input_sequence[b,d] \\in {0,n_ary_in}\n # t_representation: (n_batch, n_digits_so_far, n_dim)\n\n n_digits_so_far = min(self._n_digits, input_sequence.shape[-1])\n\n # input_sequence_prefix_hashes: (n_batch, n_digits_so_far)\n input_sequence_prefix_hashes = self.transform_sequence_to_prefix_indices(input_sequence)\n # t_weight_: (n_batch, n_digits_so_far, n_ary_out * n_dim)\n t_weight_ = self._logit_layer_weights.forward(input_sequence_prefix_hashes)\n\n if self._additive:\n # moving average from MSD to d-th digits.\n t_weight_ = torch.cumsum(t_weight_, dim=1)\n # by dividing number of digits, it may avoid nan error.\n # t_denom: (1, n_digits_so_far, 1)\n t_denom = torch.arange(start=1, end=n_digits_so_far+1, device=t_weight_.device).view(1, -1, 1)\n t_weight_ = t_weight_ / t_denom\n\n # t_weight: (n_batch, n_digits_so_far, n_ary_out, n_dim)\n if self._matrix_rank_reduction:\n # u: (n_batch, n_digits_so_far, n_ary_out, n_rank)\n # v: (n_batch, n_digits_so_far, n_rank, n_dim)\n t_weight_u, t_weight_v = torch.split(t_weight_, [self._n_ary*self._matrix_rank, self._n_dim_emb*self._matrix_rank], dim=-1)\n t_weight_u = t_weight_u.view((-1, n_digits_so_far, self._n_ary, self._matrix_rank))\n t_weight_v = t_weight_v.view((-1, n_digits_so_far, self._matrix_rank, self._n_dim_emb))\n # t_logits_: (n_batch, n_digits_so_far, n_rank, 1)\n t_logits_ = torch.matmul(t_weight_v, t_representation.unsqueeze(-1))\n # t_logits: (n_batch, n_digits_so_far, n_ary_out)\n t_logits = torch.matmul(t_weight_u, t_logits_).squeeze(-1)\n else:\n t_weight = t_weight_.view((-1, n_digits_so_far, self._n_ary, self._n_dim_emb))\n # t_logits: (n_batch, n_digits_so_far, n_ary_out)\n t_logits = torch.matmul(t_weight, t_representation.unsqueeze(-1)).squeeze(-1)\n\n if self._logit_adjustment:\n t_logits = super().apply_logit_adjustment(logits=t_logits, sequences=input_sequence)\n\n return t_logits\n\n def init_weights(self, *args, **kwargs):\n self._logit_layer_weights.reset_parameters(std=0.00001)\n\n def summary(self):\n ret = super().summary()\n ret[\"matrix_rank_reduction\"] = self._matrix_rank_reduction\n ret[\"matrix_rank\"] = self._matrix_rank\n ret[\"num_buckets\"] = self._logit_layer_weights.num_buckets\n ret[\"num_hashes\"] = self._logit_layer_weights.num_hashes\n ret[\"additive\"] = self._additive\n return ret\n\n\nclass AdditiveCodeAwareLogits(torch.nn.Module):\n\n def __init__(self, n_digits: int, n_ary_in: int, n_ary_out: int, n_dim_emb: int,\n bias: bool = False,\n depends_on_previous_digits: Optional[int] = None,\n **kwargs):\n\n super().__init__()\n self._n_digits = n_digits\n self._n_ary_in = n_ary_in\n self._n_ary_out = n_ary_out\n self._n_dim_emb = n_dim_emb\n self._bias = bias\n self._depends_on_previous_digits = depends_on_previous_digits\n\n cfg_base_weight_layer = {\n \"num_embeddings\": n_ary_in,\n \"embedding_dim\": n_ary_out * n_dim_emb\n }\n cfg_base_weight_layer.update(kwargs)\n\n # base_weight_layers: (n_digit, n_ary_in, n_ary_out * n_dim)\n lst_base_weight_layers = [nn.Embedding(**cfg_base_weight_layer) for _ in range(n_digits)]\n self.base_weight_layers = nn.ModuleList(lst_base_weight_layers)\n\n # offset_weights: (n_digit, n_ary_out * n_dim)\n if bias:\n self.bias_weights = nn.Parameter(torch.zeros(size=(n_digits, n_ary_out * n_dim_emb)), requires_grad=True)\n\n self.init_weights()\n\n def init_weights(self):\n for layer in self.base_weight_layers:\n nn.init.zeros_(layer.weight)\n\n def _ragged_cumsum(self, tensor: torch.Tensor, dim: int, stride: Optional[int]):\n if stride is None:\n # t_cumsum[:,d,:] = tensor[:,:d+1,:].sum(dim=dim)\n t_cumsum = torch.cumsum(tensor, dim=dim)\n else:\n # t_cumsum[:,d,:] = tensor[:,(d-stride):d+1,:].sum(dim=dim)\n shp = list(tensor.shape)\n length = shp[dim]\n\n _stride = min(stride + 1, length)\n t_ = torch.cumsum(tensor, dim=dim)\n\n shp[dim] = _stride\n pad = torch.zeros(shp, dtype=tensor.dtype).to(tensor.device)\n index = torch.arange(end=length - _stride).to(tensor.device)\n t_ragged = torch.index_select(t_, dim=dim, index=index)\n\n t_cumsum = t_ - torch.cat((pad, t_ragged), dim=dim)\n\n return t_cumsum\n\n def forward(self, input_sequence: torch.Tensor, t_representation: torch.Tensor):\n # input_sequence: (n_batch, n_digits_so_far) input_sequence[b,d] \\in {0,n_ary_in}\n # t_representation: (n_batch, n_digits_so_far, n_dim)\n\n device = input_sequence.device\n n_digits_so_far = min(self._n_digits, input_sequence.shape[-1])\n lst_base_weights = [self.base_weight_layers[digit](input_sequence[:,digit]) for digit in range(n_digits_so_far)]\n # t_base_weight: (n_batch, n_digits_so_far, n_ary_out * n_dim)\n t_base_weight = torch.stack(lst_base_weights, dim=1)\n if self._depends_on_previous_digits is None:\n t_weight_ = torch.cumsum(t_base_weight, dim=1)\n # by dividing number of digits, it may avoid nan error.\n # t_denom: (1, n_digits_so_far, 1)\n t_denom = torch.arange(start=1, end=n_digits_so_far+1, device=device).view(1, -1, 1)\n t_weight_ = t_weight_ / t_denom\n else:\n t_weight_ = self._ragged_cumsum(t_base_weight, dim=1, stride=min(self._depends_on_previous_digits, n_digits_so_far))\n if self._bias:\n t_weight_ = t_weight_ + self.bias_weights[:n_digits_so_far, :]\n # t_weight: (n_batch, n_digits_so_far, n_ary_out, n_dim)\n t_weight = t_weight_.view((-1, n_digits_so_far, self._n_ary_out, self._n_dim_emb))\n # t_logits: (n_batch, n_digits_so_far, n_ary_out)\n t_logits = torch.matmul(t_weight, t_representation.unsqueeze(-1)).squeeze(-1)\n\n return t_logits\n\n def summary(self):\n ret = {}\n for attr_name in (\"bias\", \"depends_on_previous_digits\", \"n_ary_in\", \"n_ary_out\"):\n ret[attr_name] = getattr(self, f\"_{attr_name}\")\n return ret\n\n\nclass PositionAwareLogits(torch.nn.Module):\n\n def __init__(self, n_seq_len: int = None, **kwargs):\n\n super().__init__()\n if isinstance(n_seq_len, int):\n lst_layers = [nn.Linear(**kwargs) for _ in range(n_seq_len)]\n self.linear_layers = nn.ModuleList(lst_layers)\n else:\n self.linear_layers = nn.Linear(**kwargs)\n self.n_seq_len = n_seq_len\n\n self.init_weights()\n\n def init_weights(self):\n if isinstance(self.linear_layers, nn.ModuleList):\n for layer in self.linear_layers:\n nn.init.zeros_(layer.weight)\n else:\n nn.init.zeros_(self.linear_layers.weight)\n\n def forward(self, t_representation: torch.Tensor, **kwargs) -> torch.Tensor:\n # t_representation: (n_batch, n_digits_so_far, n_dim)\n assert t_representation.ndim == 3, f\"unexpected dimension size: {t_representation.ndim}\"\n if isinstance(self.linear_layers, nn.ModuleList):\n n_digits = t_representation.shape[1]\n lst_t_logits = [self.linear_layers[digit](t_representation[:,digit,:]) for digit in range(n_digits)]\n t_logits = torch.stack(lst_t_logits, dim=1)\n else:\n t_logits = self.linear_layers.forward(t_representation)\n # t_logits: (n_batch, n_digits_so_far, n_ary)\n return t_logits\n\n def summary(self):\n ret = {\n \"n_seq_len\": self.n_seq_len\n }\n return ret"
] |
[
[
"torch.zeros",
"torch.cat",
"torch.nn.ModuleList",
"torch.arange",
"torch.nn.Embedding",
"torch.nn.Linear",
"torch.matmul",
"torch.split",
"torch.stack",
"torch.nn.init.zeros_",
"torch.cumsum",
"torch.index_select"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
mikniloes/frcnn-sutdy
|
[
"f9aacb22f0e4306dc5cc54aebc6172e129bc2d93",
"f9aacb22f0e4306dc5cc54aebc6172e129bc2d93"
] |
[
"lib/model/roi/roi_align.py",
"lib/dataset/voc/pascal_voc_org.py"
] |
[
"import torch\nimport torch.nn as nn\nfrom _C import roi_align_forward\nfrom _C import roi_align_backward\n\nclass _ROIAlign(torch.autograd.Function):\n @staticmethod\n def forward(ctx, input, rois, spatial_scale, output_size, sampling_ratio, aligned):\n \"\"\"\n Performs Region of Interest (RoI) Align operator described in Mask R-CNN\n Arguments:\n input (Tensor[N, C, H, W]): input tensor\n rois (Tensor[K, 5] or List[Tensor[L, 4]]): the box coordinates in (x1, y1, x2, y2)\n format where the regions will be taken from. If a single Tensor is passed,\n then the first column should contain the batch index. If a list of Tensors\n is passed, then each Tensor will correspond to the boxes for an element i\n in a batch\n output_size (int or Tuple[int, int]): the size of the output after the cropping\n is performed, as (height, width)\n spatial_scale (float): a scaling factor that maps the input coordinates to\n the box coordinates.\n sampling_ratio (int): number of sampling points in the interpolation grid\n used to compute the output value of each pooled output bin. If > 0,\n then exactly sampling_ratio x sampling_ratio grid points are used. If\n <= 0, then an adaptive number of grid points are used (computed as\n ceil(roi_width / pooled_w), and likewise for height).\n aligned (bool): If False, use the legacy implementation.\n If True, pixel shift it by -0.5 for align more perfectly about two neighboring pixel indices.\n This version in Detectron2\n Returns:\n output (Tensor[K, C, output_size[0], output_size[1]])\n \"\"\"\n result = roi_align_forward(input, rois, spatial_scale, output_size, sampling_ratio, aligned)\n \n ctx.save_for_backward(torch.tensor(input.shape, dtype=torch.int32),\n rois, torch.Tensor([spatial_scale, sampling_ratio, aligned]))\n \n return result\n\n @staticmethod\n def backward(ctx, grad_output):\n input_size, rois, params = ctx.saved_tensors\n spatial_scale = params[0].item()\n sampling_ratio = params[1].int().item()\n aligned = bool(params[2].int().item())\n\n grad_input = roi_align_backward(grad_output, rois, spatial_scale,\n input_size, sampling_ratio, aligned)\n \n return grad_input, None, None, None, None, None\n\n\nroi_align = _ROIAlign.apply\n\n\nclass ROIAlign(nn.Module):\n def __init__(self, spatial_scale, output_size, sampling_ratio, aligned=False):\n super(ROIAlign, self).__init__()\n self.spatial_scale = spatial_scale\n self.output_size = output_size\n self.sampling_ratio = sampling_ratio\n self.aligned = aligned\n\n def forward(self, input, rois):\n return roi_align(input,\n rois,\n self.spatial_scale,\n self.output_size,\n self.sampling_ratio,\n self.aligned)",
"import os\nimport re\nimport pickle\nimport uuid\nimport cv2 as cv\nimport numpy as np\nimport xml.etree.ElementTree as ET\nimport dataset.utils as utils\nfrom dataset.image_dataset import ImageDataset\nfrom dataset.voc.voc_eval import voc_eval\n\n# _CLASSES = ('__background__', # always index 0\n# 'aeroplane', 'bicycle', 'bird', 'boat',\n# 'bottle', 'bus', 'car', 'cat', 'chair',\n# 'cow', 'diningtable', 'dog', 'horse',\n# 'motorbike', 'person', 'pottedplant',\n# 'sheep', 'sofa', 'train', 'tvmonitor')\n\n_CLASSES = ('__background__','tank')\n\nclass PascalVoc(ImageDataset):\n def __init__(self, image_set, year, params, only_classes=False):\n ImageDataset.__init__(self, 'voc_' + year + '_' + image_set, params)\n self._image_set = image_set\n self._year = year\n self._devkit_path = params['devkit_path']\n self._data_path = os.path.join(self._devkit_path, 'VOC' + self._year)\n assert os.path.exists(self._data_path), \\\n 'Path to data does not exist: {}'.format(self._data_path)\n self._classes = _CLASSES\n if not only_classes:\n self._class_index = dict(zip(self.classes, range(self.num_classes)))\n self._image_index = self._load_image_index()\n self._image_data = self._load_image_data()\n self._salt = str(uuid.uuid4())\n self._comp_id = 'comp1'\n\n # PASCAL specific config options\n self.config = {'cleanup': True,\n 'use_salt': True,\n 'use_diff': False,\n 'matlab_eval': False}\n\n def image_path_at(self, id):\n# image_path = os.path.join(self._data_path, 'JPEGImages',str(id) + '.jpg')\n # for train\n # virtual data path : '/root/data/virtual_Data/unreal_5000_default/train'\n\n image_path = os.path.join('/root/data/virtual_Data/unreal_5000_default/train', str(id)+'.png')\n print(image_path)\n\n #if str(id).endswith('Object'):\n #print(str(id))\n # image_path = os.path.join('/root/data/virtual_Data/unreal_5000_default/train', str(id) + '.png')\n #else:\n # image_path = os.path.join(self._data_path, 'JPEGImages', str(id)+ '.jpg')\n # for test\n #image_path = os.path.join('/root/data/Validation_ADD_0115/Validation/R_tank_InHouseTanktest_side_RGB_800', str(id) + '.jpg')\n #print(image_path)\n assert os.path.exists(image_path), \\\n 'Image Path does not exist: {}'.format(image_path)\n return image_path\n \n def _load_image_index(self):\n image_set_file = os.path.join(self._data_path, 'ImageSets', 'Main', \n self._image_set + '.txt')\n assert os.path.exists(image_set_file), 'Path does not exist: {}'.format(image_set_file)\n with open(image_set_file) as f:\n image_index = []\n for id in f.readlines():\n _tmp = re.sub(r'\\s+', ' ', id).strip().split(' ')\n if len(_tmp) == 1:\n image_index.append(_tmp[0])\n elif len(_tmp) > 1:\n if _tmp[1] == '0' or _tmp[1] == '1': image_index.append(_tmp[0])\n else:\n raise ValueError('Unknown string format: %s' % (id))\n\n return image_index\n \n def _load_annotation(self, idx, id):\n img_path = self.image_path_at(id)\n img_size = cv.imread(img_path).shape\n file_name = os.path.join(self._data_path, 'Annotations', id + '.xml')\n# print(f'id : {id}')\n# print(f'filename : {file_name}')\n tree = ET.parse(file_name)\n objects = tree.findall('object')\n objects_count = len(objects)\n \n boxes = np.zeros((objects_count, 4), dtype=np.uint16)\n is_difficult = np.zeros((objects_count), dtype=np.int32)\n is_truncated = np.zeros((objects_count), dtype=np.int32)\n gt_classes = np.zeros((objects_count), dtype=np.int32)\n overlaps = np.zeros((objects_count, self.num_classes), dtype=np.float32)\n areas = np.zeros((objects_count), dtype=np.float32)\n \n for idx, obj in enumerate(objects):\n bndbox = obj.find('bndbox')\n # Start coord is 0\n x1 = max(int(bndbox.find('xmin').text.strip()),0)\n y1 = max(int(bndbox.find('ymin').text.strip()),0)\n x2 = max(int(bndbox.find('xmax').text.strip()),0)\n y2 = max(int(bndbox.find('ymax').text.strip()),0)\n boxes[idx, :] = [x1, y1, x2, y2]\n \n difficult = obj.find('difficult')\n difficult = 0 if difficult is None else int(difficult.text)\n is_difficult[idx] = difficult\n \n truncated = obj.find('truncated')\n truncated = 0 if truncated is None else int(truncated.text)\n is_truncated[idx] = truncated\n \n cls = self._class_index[obj.find('name').text.lower().strip()]\n gt_classes[idx] = cls\n overlaps[idx, cls] = 1.0\n areas[idx] = (x2 - x1 + 1) * (y2 - y1 + 1)\n try: \n utils.validate_boxes(boxes, width=img_size[1], height=img_size[0])\n except(AssertionError) as err:\n print(err)\n print(boxes)\n print(file_name)\n return {'index': idx,\n 'id': str(id),\n 'path': img_path,\n 'width': img_size[1],\n 'height': img_size[0],\n 'boxes': boxes, \n 'gt_is_difficult': is_difficult, \n 'gt_is_truncated': is_truncated, \n 'gt_classes': gt_classes, \n 'gt_overlaps': overlaps, \n 'gt_areas': areas,\n 'flipped': False}\n\n def _get_comp_id(self):\n comp_id = (self._comp_id + '_' + self._salt if self.config['use_salt']\n else self._comp_id)\n return comp_id\n\n def _get_voc_results_file_template(self):\n # VOCdevkit/results/VOC2007/Main/<comp_id>_det_test_aeroplane.txt\n filename = self._get_comp_id() + '_det_' + self._image_set + '_{:s}.txt'\n filedir = os.path.join(self._devkit_path, 'results', 'VOC' + self._year, 'Main')\n if not os.path.exists(filedir):\n os.makedirs(filedir)\n path = os.path.join(filedir, filename)\n return path\n\n def _write_voc_results_file(self, all_boxes):\n for cls_ind, cls in enumerate(self._classes):\n if cls == '__background__':\n continue\n print('Writing {} VOC results file'.format(cls))\n filename = self._get_voc_results_file_template().format(cls)\n with open(filename, 'wt') as f:\n for im_ind, index in enumerate(self._image_index):\n dets = all_boxes[cls_ind][im_ind]\n if dets == []:\n continue\n # the VOCdevkit expects 1-based indices\n for k in range(dets.shape[0]):\n f.write('{:s} {:.3f} {:.1f} {:.1f} {:.1f} {:.1f}\\n'.\n format(index, dets[k, -1],\n dets[k, 0] + 1, dets[k, 1] + 1,\n dets[k, 2] + 1, dets[k, 3] + 1))\n\n def _do_python_eval(self, output_dir='output'):\n annopath = os.path.join(\n self._devkit_path,\n 'VOC' + self._year,\n 'Annotations',\n '{}.xml')\n imagesetfile = os.path.join(\n self._devkit_path,\n 'VOC' + self._year,\n 'ImageSets',\n 'Main',\n self._image_set + '.txt')\n cachedir = os.path.join(self._devkit_path, 'annotations_cache')\n aps = []\n # The PASCAL VOC metric changed in 2010\n use_07_metric = True if int(self._year) < 2010 else False\n print('VOC07 metric? ' + ('Yes' if use_07_metric else 'No'))\n if not os.path.isdir(output_dir):\n os.mkdir(output_dir)\n for _, cls in enumerate(self._classes):\n if cls == '__background__':\n continue\n filename = self._get_voc_results_file_template().format(cls)\n rec, prec, ap = voc_eval(\n filename, annopath, imagesetfile, cls, cachedir, ovthresh=0.5,\n use_07_metric=use_07_metric)\n aps += [ap]\n print('AP for {} = {:.4f}'.format(cls, ap))\n with open(os.path.join(output_dir, cls + '_pr.pkl'), 'wb') as f:\n pickle.dump({'rec': rec, 'prec': prec, 'ap': ap}, f)\n print('Mean AP = {:.4f}'.format(np.mean(aps)))\n print('~~~~~~~~')\n print('Results:')\n for ap in aps:\n print('{:.3f}'.format(ap))\n print('{:.3f}'.format(np.mean(aps)))\n print('~~~~~~~~')\n print('')\n print('--------------------------------------------------------------')\n print('Results computed with the **unofficial** Python eval code.')\n print('Results should be very close to the official MATLAB eval code.')\n print('Recompute with `./tools/reval.py --matlab ...` for your paper.')\n print('-- Thanks, The Management')\n print('--------------------------------------------------------------')\n \n def evaluate_detections(self, all_boxes, output_dir):\n self._write_voc_results_file(all_boxes)\n self._do_python_eval(output_dir)\n if self.config['matlab_eval']:\n #self._do_matlab_eval(output_dir)\n raise NotImplementedError\n if self.config['cleanup']:\n for cls in self._classes:\n if cls == '__background__':\n continue\n filename = self._get_voc_results_file_template().format(cls)\n os.remove(filename)\n \n def competition_mode(self, on):\n if on:\n self.config['use_salt'] = False\n self.config['cleanup'] = False\n else:\n self.config['use_salt'] = True\n self.config['cleanup'] = True\n"
] |
[
[
"torch.Tensor",
"torch.tensor"
],
[
"numpy.zeros",
"numpy.mean"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
chengingx/fibdrv
|
[
"816f7cab7af1014762a3bbfbef190e922baab276"
] |
[
"AutoGenerate.py"
] |
[
"import os \nimport pandas as pd\nimport math\nfrom tqdm import tqdm\n\ndef process_data(l, pd, name):\n\n print(f\"Processing {name} data...\")\n mean, std = pd.mean(axis = 1), pd.std(axis = 1)\n \n for i in tqdm(range(len(pd.index))):\n sum = 0\n cnt = 0\n for j in pd.iloc[i]:\n if abs(j - mean[i]) < 2 * std[i]:\n sum = sum + j;\n cnt = cnt + 1;\n sum = sum / cnt;\n l.append(sum)\n\nif __name__ == '__main__':\n\n times = 10000\n \n kernel = pd.DataFrame(columns = range(times))\n user = pd.DataFrame(columns = range(times))\n kernel_to_user = pd.DataFrame(columns = range(times))\n\n kernel_cost = []\n user_cost = []\n kernel_to_user_cost = []\n\n print(\"Generating data...\")\n for i in tqdm(range(times)):\n os.system('sudo taskset -c 0 ./client > ./data/' + str(i))\n\n print(\"Reading data...\")\n for i in tqdm(range(times)):\n f = open('./data/' + str(i), 'r')\n content = f.read()\n raw = content.split('\\n')\n raw.remove('')\n \n kernel_tmp = []\n user_tmp = []\n kernel_to_user_tmp = []\n \n for j in range(len(raw)):\n numbers = raw[j].split(' ')\n kernel_tmp.append(int(numbers[1]))\n user_tmp.append(int(numbers[2]))\n kernel_to_user_tmp.append(int(numbers[3]))\n kernel[i] = kernel_tmp\n user[i] = user_tmp\n kernel_to_user[i] = kernel_to_user_tmp\n\n process_data(kernel_cost, kernel, \"kernel\")\n process_data(user_cost, user, \"user\")\n process_data(kernel_to_user_cost, kernel_to_user, \"kernel to user\")\n\n out = []\n for i, j, k, l in zip(range(len(kernel_cost)), kernel_cost, user_cost, kernel_to_user_cost):\n\t out.append('{} {} {} {}'.format(i, j, k, l))\n\n # output the result\n f = open('output_fast.txt', 'w')\n print('file write: {}'.format(f.write('\\n'.join(out))))\n f.close()\n print(\"Finish !\")\n"
] |
[
[
"pandas.mean",
"pandas.std"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
aallaire91/phyre
|
[
"ee882194c12bae5561c25ec65f95a7c0944f8129",
"ee882194c12bae5561c25ec65f95a7c0944f8129"
] |
[
"agents/neural_agent.py",
"agents/nets.py"
] |
[
"# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"This library contains actual implementation of the DQN agent.\"\"\"\nfrom typing import Optional, Sequence, Tuple\nimport glob\nimport logging\nimport os\nimport time\n\nimport numpy as np\nimport torch\n\nimport nets\nimport phyre\n\nAUCCESS_EVAL_TASKS = 200\nXE_EVAL_SIZE = 10000\n\nTaskIds = Sequence[str]\nNeuralModel = torch.nn.Module\nTrainData = Tuple[torch.Tensor, torch.Tensor, torch.Tensor, phyre.\n ActionSimulator, torch.Tensor]\n\n\ndef create_balanced_eval_set(cache: phyre.SimulationCache, task_ids: TaskIds,\n size: int, tier: str) -> TrainData:\n \"\"\"Prepares balanced eval set to run through a network.\n\n Selects (size // 2) positive (task, action) pairs and (size // 2)\n negative pairs and represents them in a compact formaer\n\n Returns a tuple\n (task_indices, is_solved, selected_actions, simulator, observations).\n\n Tensors task_indices, is_solved, selected_actions, observations, all\n have lengths size and correspond to some (task, action) pair.\n For any i the following is true:\n is_solved[i] is true iff selected_actions[i] solves task\n task_ids[task_indices[i]].\n \"\"\"\n task_ids = tuple(task_ids)\n data = cache.get_sample(task_ids)\n actions = data['actions']\n simulation_statuses = data['simulation_statuses']\n\n flat_statuses = simulation_statuses.reshape(-1)\n [positive_indices\n ] = (flat_statuses == int(phyre.SimulationStatus.SOLVED)).nonzero()\n [negative_indices\n ] = (flat_statuses == int(phyre.SimulationStatus.NOT_SOLVED)).nonzero()\n\n half_size = size // 2\n rng = np.random.RandomState(42)\n positive_indices = rng.choice(positive_indices, half_size)\n negative_indices = rng.choice(negative_indices, half_size)\n\n all_indices = np.concatenate([positive_indices, negative_indices])\n selected_actions = torch.FloatTensor(actions[all_indices % len(actions)])\n is_solved = torch.LongTensor(flat_statuses[all_indices].astype('int')) > 0\n task_indices = torch.LongTensor(all_indices // len(actions))\n\n simulator = phyre.initialize_simulator(task_ids, tier)\n observations = torch.LongTensor(simulator.initial_scenes)\n return task_indices, is_solved, selected_actions, simulator, observations\n\n\ndef compact_simulation_data_to_trainset(action_tier_name: str,\n actions: np.ndarray,\n simulation_statuses: Sequence[int],\n task_ids: TaskIds) -> TrainData:\n \"\"\"Converts result of SimulationCache.get_data() to pytorch tensors.\n\n The format of the output is the same as in create_balanced_eval_set.\n \"\"\"\n invalid = int(phyre.SimulationStatus.INVALID_INPUT)\n solved = int(phyre.SimulationStatus.SOLVED)\n\n task_indices = np.repeat(np.arange(len(task_ids)).reshape((-1, 1)),\n actions.shape[0],\n axis=1).reshape(-1)\n action_indices = np.repeat(np.arange(actions.shape[0]).reshape((1, -1)),\n len(task_ids),\n axis=0).reshape(-1)\n simulation_statuses = simulation_statuses.reshape(-1)\n\n good_statuses = simulation_statuses != invalid\n is_solved = torch.LongTensor(\n simulation_statuses[good_statuses].astype('uint8')) == solved\n action_indices = action_indices[good_statuses]\n actions = torch.FloatTensor(actions[action_indices])\n task_indices = torch.LongTensor(task_indices[good_statuses])\n\n simulator = phyre.initialize_simulator(task_ids, action_tier_name)\n observations = torch.LongTensor(simulator.initial_scenes)\n return task_indices, is_solved, actions, simulator, observations\n\n\ndef build_model(network_type: str, **kwargs) -> NeuralModel:\n \"\"\"Builds a DQN network by name.\"\"\"\n if network_type == 'resnet18':\n model = nets.ResNet18FilmAction(\n kwargs['action_space_dim'],\n fusion_place=kwargs['fusion_place'],\n action_hidden_size=kwargs['action_hidden_size'],\n action_layers=kwargs['action_layers'])\n elif network_type == 'simple':\n model = nets.SimpleNetWithAction(kwargs['action_space_dim'])\n else:\n raise ValueError('Unknown network type: %s' % network_type)\n return model\n\n\ndef get_latest_checkpoint(output_dir: str) -> Optional[str]:\n known_checkpoints = sorted(glob.glob(os.path.join(output_dir, 'ckpt.*')))\n if known_checkpoints:\n return known_checkpoints[-1]\n else:\n return None\n\n\ndef load_agent_from_folder(agent_folder: str) -> NeuralModel:\n last_checkpoint = get_latest_checkpoint(agent_folder)\n assert last_checkpoint is not None, agent_folder\n logging.info('Loading a model from: %s', last_checkpoint)\n last_checkpoint = torch.load(last_checkpoint)\n model = build_model(**last_checkpoint['model_kwargs'])\n model.load_state_dict(last_checkpoint['model'])\n model.to(nets.DEVICE)\n return model\n\n\ndef finetune(\n model: NeuralModel,\n data: Sequence[Tuple[int, phyre.SimulationStatus, Sequence[float]]],\n simulator: phyre.ActionSimulator, learning_rate: float,\n num_updates: int) -> None:\n \"\"\"Finetunes a model on a small new batch of data.\n\n Args:\n model: DQN network, e.g., built with build_model().\n data: a list of tuples (task_index, status, action).\n learning_rate: learning rate for Adam.\n num_updates: number updates to do. All data is used for every update.\n \"\"\"\n\n data = [x for x in data if not x[1].is_invalid()]\n if not data:\n return\n task_indices, statuses, actions = zip(*data)\n if len(set(task_indices)) == 1:\n observations = np.expand_dims(simulator.initial_scenes[task_indices[0]],\n 0)\n else:\n observations = simulator.initial_scenes[task_indices]\n\n is_solved = torch.tensor(statuses, device=model.device) > 0\n observations = torch.tensor(observations, device=model.device)\n actions = torch.tensor(actions, device=model.device)\n\n optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)\n model.train()\n for _ in range(num_updates):\n optimizer.zero_grad()\n model.ce_loss(model(observations, actions), is_solved).backward()\n optimizer.step()\n\n\ndef refine_actions(model, actions, single_observarion, learning_rate,\n num_updates, batch_size, refine_loss):\n observations = torch.tensor(single_observarion,\n device=model.device).unsqueeze(0)\n actions = torch.tensor(actions)\n\n refined_actions = []\n model.eval()\n preprocessed = model.preprocess(observations)\n preprocessed = {k: v.detach() for k, v in preprocessed.items()}\n for start in range(0, len(actions), batch_size):\n action_batch = actions[start:][:batch_size].to(model.device)\n action_batch = torch.nn.Parameter(action_batch)\n optimizer = torch.optim.Adam([action_batch], lr=learning_rate)\n losses = []\n for _ in range(num_updates):\n optimizer.zero_grad()\n logits = model(None, action_batch, preprocessed=preprocessed)\n if refine_loss == 'ce':\n loss = model.ce_loss(logits, actions.new_ones(len(logits)))\n elif refine_loss == 'linear':\n loss = -logits.sum()\n else:\n raise ValueError(f'Unknown loss: {refine_loss}')\n loss.backward()\n losses.append(loss.item())\n optimizer.step()\n action_batch = torch.clamp_(action_batch.data, 0, 1)\n refined_actions.append(action_batch.cpu().numpy())\n refined_actions = np.concatenate(refined_actions, 0).tolist()\n return refined_actions\n\n\ndef train(output_dir,\n action_tier_name,\n task_ids,\n cache,\n train_batch_size,\n learning_rate,\n max_train_actions,\n updates,\n negative_sampling_prob,\n save_checkpoints_every,\n fusion_place,\n network_type,\n balance_classes,\n num_auccess_actions,\n eval_every,\n action_layers,\n action_hidden_size,\n cosine_scheduler,\n dev_tasks_ids=None):\n\n logging.info('Preprocessing train data')\n\n training_data = cache.get_sample(task_ids, max_train_actions)\n task_indices, is_solved, actions, simulator, observations = (\n compact_simulation_data_to_trainset(action_tier_name, **training_data))\n\n logging.info('Creating eval subset from train')\n eval_train = create_balanced_eval_set(cache, simulator.task_ids,\n XE_EVAL_SIZE, action_tier_name)\n if dev_tasks_ids is not None:\n logging.info('Creating eval subset from dev')\n eval_dev = create_balanced_eval_set(cache, dev_tasks_ids, XE_EVAL_SIZE,\n action_tier_name)\n else:\n eval_dev = None\n\n logging.info('Tran set: size=%d, positive_ratio=%.2f%%', len(is_solved),\n is_solved.float().mean().item() * 100)\n\n assert not balance_classes or (negative_sampling_prob == 1), (\n balance_classes, negative_sampling_prob)\n\n device = nets.DEVICE\n model_kwargs = dict(network_type=network_type,\n action_space_dim=simulator.action_space_dim,\n fusion_place=fusion_place,\n action_hidden_size=action_hidden_size,\n action_layers=action_layers)\n model = build_model(**model_kwargs)\n model.train()\n model.to(device)\n logging.info(model)\n\n optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)\n if cosine_scheduler:\n scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer,\n T_max=updates)\n else:\n scheduler = None\n logging.info('Starting actual training for %d updates', updates)\n\n rng = np.random.RandomState(42)\n\n def train_indices_sampler():\n indices = np.arange(len(is_solved))\n if balance_classes:\n solved_mask = is_solved.numpy() > 0\n positive_indices = indices[solved_mask]\n negative_indices = indices[~solved_mask]\n positive_size = train_batch_size // 2\n while True:\n positives = rng.choice(positive_indices, size=positive_size)\n negatives = rng.choice(negative_indices,\n size=train_batch_size - positive_size)\n positive_size = train_batch_size - positive_size\n yield np.concatenate((positives, negatives))\n elif negative_sampling_prob < 1:\n probs = (is_solved.numpy() * (1.0 - negative_sampling_prob) +\n negative_sampling_prob)\n probs /= probs.sum()\n while True:\n yield rng.choice(indices, size=train_batch_size, p=probs)\n else:\n while True:\n yield rng.choice(indices, size=train_batch_size)\n\n last_checkpoint = get_latest_checkpoint(output_dir)\n batch_start = 0\n if last_checkpoint is not None:\n logging.info('Going to load from %s', last_checkpoint)\n last_checkpoint = torch.load(last_checkpoint)\n model.load_state_dict(last_checkpoint['model'])\n optimizer.load_state_dict(last_checkpoint['optim'])\n rng.set_state(last_checkpoint['rng'])\n batch_start = last_checkpoint['done_batches']\n if scheduler is not None:\n scheduler.load_state_dict(last_checkpoint['scheduler'])\n\n def print_eval_stats(batch_id):\n logging.info('Start eval')\n eval_batch_size = train_batch_size * 4\n stats = {}\n stats['batch_id'] = batch_id + 1\n stats['train_loss'] = eval_loss(model, eval_train, eval_batch_size)\n if eval_dev:\n stats['dev_loss'] = eval_loss(model, eval_dev, eval_batch_size)\n if num_auccess_actions > 0:\n logging.info('Start AUCCESS eval')\n stats['train_auccess'] = _eval_and_score_actions(\n cache, model, eval_train[3], num_auccess_actions,\n eval_batch_size, eval_train[4])\n if eval_dev:\n stats['dev_auccess'] = _eval_and_score_actions(\n cache, model, eval_dev[3], num_auccess_actions,\n eval_batch_size, eval_dev[4])\n\n logging.info('__log__: %s', stats)\n\n report_every = 125\n logging.info('Report every %d; eval every %d', report_every, eval_every)\n if save_checkpoints_every > eval_every:\n save_checkpoints_every -= save_checkpoints_every % eval_every\n\n print_eval_stats(0)\n\n losses = []\n last_time = time.time()\n observations = observations.to(device)\n actions = actions.pin_memory()\n is_solved = is_solved.pin_memory()\n for batch_id, batch_indices in enumerate(train_indices_sampler(),\n start=batch_start):\n if batch_id >= updates:\n break\n if scheduler is not None:\n scheduler.step()\n model.train()\n batch_task_indices = task_indices[batch_indices]\n batch_observations = observations[batch_task_indices]\n batch_actions = actions[batch_indices].to(device, non_blocking=True)\n batch_is_solved = is_solved[batch_indices].to(device, non_blocking=True)\n\n optimizer.zero_grad()\n loss = model.ce_loss(model(batch_observations, batch_actions),\n batch_is_solved)\n loss.backward()\n optimizer.step()\n losses.append(loss.mean().item())\n if save_checkpoints_every > 0:\n if (batch_id + 1) % save_checkpoints_every == 0:\n fpath = os.path.join(output_dir, 'ckpt.%08d' % (batch_id + 1))\n logging.info('Saving: %s', fpath)\n torch.save(\n dict(\n model_kwargs=model_kwargs,\n model=model.state_dict(),\n optim=optimizer.state_dict(),\n done_batches=batch_id + 1,\n rng=rng.get_state(),\n scheduler=scheduler and scheduler.state_dict(),\n ), fpath)\n if (batch_id + 1) % eval_every == 0:\n print_eval_stats(batch_id)\n if (batch_id + 1) % report_every == 0:\n speed = report_every / (time.time() - last_time)\n last_time = time.time()\n logging.debug(\n 'Iter: %s, examples: %d, mean loss: %f, speed: %.1f batch/sec,'\n ' lr: %f', batch_id + 1, (batch_id + 1) * train_batch_size,\n np.mean(losses[-report_every:]), speed, get_lr(optimizer))\n return model.cpu()\n\n\ndef get_lr(optimizer):\n for param_group in optimizer.param_groups:\n return param_group['lr']\n\n\ndef eval_loss(model, data, batch_size):\n task_indices, is_solved, actions, _, observations = data\n losses = []\n observations = observations.to(model.device)\n with torch.no_grad():\n model.eval()\n for i in range(0, len(task_indices), batch_size):\n batch_indices = task_indices[i:i + batch_size]\n batch_task_indices = task_indices[batch_indices]\n batch_observations = observations[batch_task_indices]\n batch_actions = actions[batch_indices]\n batch_is_solved = is_solved[batch_indices]\n loss = model.ce_loss(model(batch_observations, batch_actions),\n batch_is_solved)\n losses.append(loss.item() * len(batch_indices))\n return sum(losses) / len(task_indices)\n\n\ndef eval_actions(model, actions, batch_size, observations):\n scores = []\n with torch.no_grad():\n model.eval()\n preprocessed = model.preprocess(\n torch.LongTensor(observations).unsqueeze(0))\n for batch_start in range(0, len(actions), batch_size):\n batch_end = min(len(actions), batch_start + batch_size)\n batch_actions = torch.FloatTensor(actions[batch_start:batch_end])\n batch_scores = model(None, batch_actions, preprocessed=preprocessed)\n assert len(batch_scores) == len(batch_actions), (\n batch_actions.shape, batch_scores.shape)\n scores.append(batch_scores.cpu().numpy())\n return np.concatenate(scores)\n\n\ndef _eval_and_score_actions(cache, model, simulator, num_actions, batch_size,\n observations):\n actions = cache.action_array[:num_actions]\n indices = np.random.RandomState(1).permutation(\n len(observations))[:AUCCESS_EVAL_TASKS]\n evaluator = phyre.Evaluator(\n [simulator.task_ids[index] for index in indices])\n for i, task_index in enumerate(indices):\n scores = eval_actions(model, actions, batch_size,\n observations[task_index]).tolist()\n\n _, sorted_actions = zip(\n *sorted(zip(scores, actions), key=lambda x: (-x[0], tuple(x[1]))))\n for action in sorted_actions:\n if (evaluator.get_attempts_for_task(i) >= phyre.MAX_TEST_ATTEMPTS):\n break\n status = simulator.simulate_action(task_index,\n action,\n need_images=False).status\n evaluator.maybe_log_attempt(i, status)\n return evaluator.get_aucess()\n",
"# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport torch\nimport torch.nn as nn\nimport torchvision\n\nimport phyre\n\nUSE_CUDA = torch.cuda.is_available()\nDEVICE = torch.device('cuda:0' if USE_CUDA else 'cpu')\n\n\nclass ActionNetwork(nn.Module):\n\n def __init__(self, action_size, output_size, hidden_size=256, num_layers=1):\n super().__init__()\n self.layers = nn.ModuleList([nn.Linear(action_size, hidden_size)])\n for _ in range(1, num_layers):\n self.layers.append(nn.Linear(hidden_size, hidden_size))\n self.output = nn.Linear(hidden_size, output_size)\n\n def forward(self, tensor):\n for layer in self.layers:\n tensor = nn.functional.relu(layer(tensor), inplace=True)\n return self.output(tensor)\n\n\nclass FilmActionNetwork(nn.Module):\n\n def __init__(self, action_size, output_size, **kwargs):\n super().__init__()\n self.net = ActionNetwork(action_size, output_size * 2, **kwargs)\n\n def forward(self, actions, image):\n beta, gamma = torch.chunk(self.net(actions).unsqueeze(-1).unsqueeze(-1),\n chunks=2,\n dim=1)\n return image * beta + gamma\n\n\nclass SimpleNetWithAction(nn.Module):\n\n def __init__(self, action_size, action_network_kwargs=None):\n super().__init__()\n action_network_kwargs = action_network_kwargs or {}\n self.stem = nn.Sequential(\n nn.Conv2d(phyre.NUM_COLORS, 3, kernel_size=1, bias=False),\n nn.BatchNorm2d(3),\n nn.ReLU(inplace=True),\n nn.Conv2d(3, 64, kernel_size=7, stride=4, padding=3, bias=False),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n nn.Conv2d(64, 64, kernel_size=5, stride=2, padding=2, bias=False),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n nn.Conv2d(64, 64, kernel_size=5, stride=2, padding=2, bias=False),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n nn.Conv2d(64, 64, kernel_size=5, stride=2, padding=2, bias=False),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n nn.Conv2d(64, 128, kernel_size=5, stride=2, padding=2, bias=False),\n nn.BatchNorm2d(128),\n nn.ReLU(inplace=True),\n nn.Conv2d(128, 128, kernel_size=5, stride=2, padding=2, bias=False),\n nn.BatchNorm2d(128),\n nn.ReLU(inplace=True),\n nn.Conv2d(128, 128, kernel_size=5, stride=2, padding=2, bias=False),\n nn.BatchNorm2d(128),\n nn.ReLU(inplace=True),\n )\n self.action_net = ActionNetwork(action_size, 128,\n **action_network_kwargs)\n\n @property\n def device(self):\n if hasattr(self, 'parameters') and next(self.parameters()).is_cuda:\n return 'cuda'\n else:\n return 'cpu'\n\n def preprocess(self, observations):\n device = self.device\n image = _image_colors_to_onehot(\n observations.to(dtype=torch.long, device=device))\n return dict(features=self.stem(image).squeeze(-1).squeeze(-1))\n\n def forward(self, observations, actions, preprocessed=None):\n if preprocessed is None:\n preprocessed = self.preprocess(observations)\n return self._forward(actions, **preprocessed)\n\n def _forward(self, actions, features):\n actions = self.action_net(actions.to(features.device))\n return (actions * features).sum(-1) / (actions.shape[-1]**0.5)\n\n def ce_loss(self, decisions, targets):\n targets = torch.ByteTensor(targets).float().to(decisions.device)\n return nn.functional.binary_cross_entropy_with_logits(\n decisions, targets)\n\n\ndef _get_fusution_points(fusion_place_spec, max_points):\n if fusion_place_spec == 'all':\n return tuple(range(max_points))\n elif fusion_place_spec == 'none':\n return tuple()\n else:\n return tuple(int(fusion_place_spec),)\n\n\nclass ResNet18FilmAction(nn.Module):\n\n def __init__(self,\n action_size,\n action_layers=1,\n action_hidden_size=256,\n fusion_place='last'):\n super().__init__()\n net = torchvision.models.resnet18(pretrained=False)\n conv1 = nn.Conv2d(phyre.NUM_COLORS,\n 64,\n kernel_size=7,\n stride=2,\n padding=3,\n bias=False)\n self.register_buffer('embed_weights', torch.eye(phyre.NUM_COLORS))\n self.stem = nn.Sequential(conv1, net.bn1, net.relu, net.maxpool)\n self.stages = nn.ModuleList(\n [net.layer1, net.layer2, net.layer3, net.layer4])\n\n def build_film(output_size):\n return FilmActionNetwork(action_size,\n output_size,\n hidden_size=action_hidden_size,\n num_layers=action_layers)\n\n assert fusion_place in ('first', 'last', 'all', 'none', 'last_single')\n\n self.last_network = None\n if fusion_place == 'all':\n self.action_networks = nn.ModuleList(\n [build_film(size) for size in (64, 64, 128, 256)])\n elif fusion_place == 'last':\n # Save module as attribute.\n self._action_network = build_film(256)\n self.action_networks = [None, None, None, self._action_network]\n elif fusion_place == 'first':\n # Save module as attribute.\n self._action_network = build_film(64)\n self.action_networks = [self._action_network, None, None, None]\n elif fusion_place == 'last_single':\n # Save module as attribute.\n self.last_network = build_film(512)\n self.action_networks = [None, None, None, None]\n elif fusion_place == 'none':\n self.action_networks = [None, None, None, None]\n else:\n raise Exception('Unknown fusion place: %s' % fusion_place)\n self.reason = nn.Linear(512, 1)\n\n @property\n def device(self):\n if hasattr(self, 'parameters') and next(self.parameters()).is_cuda:\n return 'cuda'\n else:\n return 'cpu'\n\n def preprocess(self, observations):\n image = self._image_colors_to_onehot(observations)\n features = self.stem(image)\n for stage, act_layer in zip(self.stages, self.action_networks):\n if act_layer is not None:\n break\n features = stage(features)\n else:\n features = nn.functional.adaptive_max_pool2d(features, 1)\n return dict(features=features)\n\n def forward(self, observations, actions, preprocessed=None):\n if preprocessed is None:\n preprocessed = self.preprocess(observations)\n return self._forward(actions, **preprocessed)\n\n def _forward(self, actions, features):\n actions = actions.to(features.device)\n skip_compute = True\n for stage, film_layer in zip(self.stages, self.action_networks):\n if film_layer is not None:\n skip_compute = False\n features = film_layer(actions, features)\n if skip_compute:\n continue\n features = stage(features)\n if not skip_compute:\n features = nn.functional.adaptive_max_pool2d(features, 1)\n if self.last_network is not None:\n features = self.last_network(actions, features)\n features = features.flatten(1)\n if features.shape[0] == 1 and actions.shape[0] != 1:\n # Haven't had a chance to use actions. So will match batch size as\n # in actions manually.\n features = features.expand(actions.shape[0], -1)\n return self.reason(features).squeeze(-1)\n\n def ce_loss(self, decisions, targets):\n targets = targets.to(dtype=torch.float, device=decisions.device)\n return nn.functional.binary_cross_entropy_with_logits(\n decisions, targets)\n\n def _image_colors_to_onehot(self, indices):\n onehot = torch.nn.functional.embedding(\n indices.to(dtype=torch.long, device=self.embed_weights.device),\n self.embed_weights)\n onehot = onehot.permute(0, 3, 1, 2).contiguous()\n return onehot\n\n\ndef _image_colors_to_onehot(indices):\n onehot = torch.nn.functional.embedding(\n indices, torch.eye(phyre.NUM_COLORS, device=indices.device))\n onehot = onehot.pertmute(0, 3, 1, 2).contiguous()\n return onehot\n"
] |
[
[
"torch.optim.Adam",
"torch.LongTensor",
"numpy.expand_dims",
"torch.nn.Parameter",
"torch.clamp_",
"torch.optim.lr_scheduler.CosineAnnealingLR",
"torch.load",
"numpy.arange",
"torch.tensor",
"numpy.concatenate",
"torch.FloatTensor",
"torch.no_grad",
"numpy.mean",
"numpy.random.RandomState"
],
[
"torch.nn.Sequential",
"torch.ByteTensor",
"torch.nn.functional.binary_cross_entropy_with_logits",
"torch.nn.Conv2d",
"torch.nn.ModuleList",
"torch.eye",
"torch.nn.Linear",
"torch.nn.functional.adaptive_max_pool2d",
"torch.cuda.is_available",
"torch.nn.BatchNorm2d",
"torch.device",
"torch.nn.ReLU"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
hugoladret/bw_invariance
|
[
"ebcd8fb30fa9464e9a4412b16ac435b9a512d3a9",
"ebcd8fb30fa9464e9a4412b16ac435b9a512d3a9"
] |
[
"python/bw_powerlaw.py",
"python/stim_figures.py"
] |
[
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Nov 20 10:18:04 2020\n\n@author: Hugo\n\"\"\"\n\n\nimport stim\nimport plots\nimport LIF\nimport numpy as np \nimport matplotlib.pyplot as plt\nfrom tqdm import tqdm\n\nimport matplotlib\nmatplotlib.rcParams['pdf.fonttype'] = 42\nmatplotlib.rcParams['ps.fonttype'] = 42\n\n# Parameters\n## Simulation parameters \nT = 50 # total simtime ; ms\ndt = 0.01 #timestep ; ms\nn_repeat = 2 # number of time we rerun the whole stimulation set, used for trial-to-trial variance\n\n## LIF parameters, note that we scale everything later to speed up computations\n## You probably don't want to touch this\nRm = 1 # resistance (kOhm)\nCm = 10 # capacitance (uF)\ntau_m = Rm*Cm # time constant (msec)\nrefrac_time = 1. # refractory period (msec)\nVth = 1. # spike threshold (V)\n\n## Stimulation parameters\nn_pars = 3 #number of parameters, either contrast or bandwidth\ncontrasts = np.linspace(1., 8., n_pars) #stimulation contrast, max = 5 is a good idea\nbandwidths = np.linspace(.2, .8, n_pars) # stimulation bandwidth, it's sigma of gaussian\n\n## Finn parameters\nk = 3.5 # power law scale \na = -.5 # power law exponent\nloc = .8 # noise normal law center\nscale = .5 # noise normal law var\n\n## Bandwidth parameters\nk_bw = 3.5 # other neurons' power law scale\na_bw = -.5 # multiplier of bw on other neurons\n\n## Plotting parameters\nlabels = bandwidths #rescale for actual stim values\ncolors = plt.cm.inferno(np.linspace(.9, .2, len(bandwidths))) #tc colormap\n\n# Initialization\ntot_steps = int(T/dt) # length of the time vector \ntime = np.linspace(0, T+dt, tot_steps) # time vector\n\n\n# Stimulations\ninput_tcs = [] \nmod_tcs = []\npwlaws = [] \nfor i, bw in enumerate(bandwidths) :\n inp = stim.generate_stim(mu = 0., sig = bw, max_amp = np.max(contrasts))\n \n # Compute the right part\n new_pwlaw = stim.power_law(k = np.max(inp), \n x = np.linspace(1, 3, len(inp)//2),\n a = -3.5*np.exp(bw))\n mult = inp[len(inp)//2:]-(1/new_pwlaw)\n mult[mult<0] = 0\n \n # Compute the left part \n mult_left = mult[::-1]\n \n mod_tcs.append(np.concatenate((mult_left, mult)))\n pwlaws.append(new_pwlaw)\n input_tcs.append(inp)\n\n\n# Simulation\nout_vms, out_spikes = [], []\nfor inp in tqdm(mod_tcs, 'Simulating') :\n vm, spikes, noise = LIF.simulate(input_tc = inp,\n time = time, tot_steps = tot_steps, n_repeat = n_repeat,\n dt = dt, \n Rm = Rm, Cm = Cm, tau_m = tau_m, refrac_time = refrac_time,\n Vth = Vth,\n loc = loc, scale = scale)\n out_vms.append(vm)\n out_spikes.append(spikes)\n \nout_vms = np.asarray(out_vms) # shape stims, ori, repeats, timesteps\nout_spikes = np.asarray(out_spikes) # shape stims, ori, repeats\n\n\n# Stimulation without pwlaw \nn_out_vms, n_out_spikes = [], []\nfor inp in tqdm(input_tcs, 'Simulating') :\n vm, spikes, noise = LIF.simulate(input_tc = inp,\n time = time, tot_steps = tot_steps, n_repeat = n_repeat,\n dt = dt, \n Rm = Rm, Cm = Cm, tau_m = tau_m, refrac_time = refrac_time,\n Vth = Vth,\n loc = loc, scale = scale)\n n_out_vms.append(vm)\n n_out_spikes.append(spikes)\n \nn_out_vms = np.asarray(n_out_vms) # shape stims, ori, repeats, timesteps\nn_out_spikes = np.asarray(n_out_spikes) # shape stims, ori, repeats\n\n# Plotting \n# plot_stim = False\n# if plot_stim :\n# fig, ax = plt.subplots(figsize = (8,6))\n# for i in range(n_pars) :\n# plots.plot_stimulation(ax, input_tc = input_tcs[i],\n# lab = labels[i], col = colors[i])\n# plots.plot_stimulation(ax, input_tc = mod_tcs[i],\n# lab = labels[i], col = colors[i])\n \nplot_spike = True \nhwhhs = []\nif plot_spike:\n fig, ax = plt.subplots(figsize = (8,6))\n for i in range(n_pars) :\n hwhh = plots.plot_spike_tc(ax = ax, \n all_spiketimes = out_spikes[i,:,:],\n lab = labels[i], col = colors[i])\n _ = plots.plot_spike_tc(ax = ax,\n all_spiketimes = n_out_spikes[i,:,:],\n lab = labels[i], col = colors[i],\n ls = '--')\n #ax.plot()\n hwhhs.append(hwhh)\n ax.legend(ncol = 1, fontsize = 14, frameon = True, title = r'B$_\\theta$')\n ax.set_xticks([-3, -1.5, 0, 1.5, 3])\n ax.set_xticklabels(['-90', '-45', r'$\\theta_{0}$', '+45', '+90'])\n ax.tick_params(axis='both', labelsize=14)\n ax.set_xlabel('Stimulation orientation (°)', fontsize = 18)\n fig.savefig('./figs/fig2d.pdf' , format = 'pdf', dpi = 100, bbox_inches = 'tight', transparent = True)",
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Dec 7 17:38:20 2020\n\n@author: Admin\nThis is to generate the illustration of stimulations\n\n\"\"\"\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport MotionClouds as mc\n\nimport matplotlib\nmatplotlib.rcParams['pdf.fonttype'] = 42\nmatplotlib.rcParams['ps.fonttype'] = 42\n\nB_thetas = np.linspace(np.pi/2, 0.05, 3)/ 2.5\ncolors = plt.cm.inferno(np.linspace(.8, .2, len(B_thetas)))\n\n# Spatial frequency, in cpd\nsf = 0.0616279 # see stimulation notebooks\nsf_0 = sf\nB_sf = sf_0\n\n\ndef generate_cloud(theta, b_theta, phase,\n N_X, N_Y, seed, contrast=1.,\n transition=False):\n \n \n fx, fy, ft = mc.get_grids(N_X, N_Y, 1)\n disk = mc.frequency_radius(fx, fy, ft) < .5\n\n if b_theta == 0 : \n mc_i = mc.envelope_gabor(fx, fy, ft,\n V_X=0., V_Y=0., B_V=0.,\n sf_0=sf_0, B_sf=B_sf,\n theta=0, B_theta=b_theta)\n mc_i = np.rot90(mc_i)\n else :\n mc_i = mc.envelope_gabor(fx, fy, ft,\n V_X=0., V_Y=0., B_V=0.,\n sf_0=sf_0, B_sf=B_sf,\n theta=theta, B_theta=b_theta)\n \n im_ = np.zeros((N_X, N_Y, 1))\n im_ += mc.rectif(mc.random_cloud(mc_i, seed=seed),\n contrast=2)\n im_ += -.5\n return im_[:,:,0]\n\ndef generate_gratings(n_sins, imsize, div):\n sinwave = np.sin(np.linspace(0, np.pi * n_sins, imsize))\n grating = np.tile(sinwave, (imsize, 1))\n \n return grating/div\n \n\n# Generate the MotionClouds\nfig, axs = plt.subplots(figsize = (16,8), ncols = len(B_thetas), nrows = 1,\n gridspec_kw = {'wspace':0.01, 'hspace':0.05})\n\ntheta = np.pi/2\nN_X, N_Y = 512, 512\n\nfor ibt in range(0,len(B_thetas)) :\n ax = axs[ibt]\n img = generate_cloud(theta = theta, b_theta = B_thetas[ibt], phase = 0,\n N_X = N_X, N_Y = N_Y, seed = 42, contrast=1.,\n transition=False)\n im = ax.imshow(img, cmap = 'gray', interpolation = 'bilinear')\n im.set_clim(-1,1)\n ax.set_yticks([])\n ax.set_xticks([])\n ax.set_aspect('equal')\n\n for edge in ['top', 'bottom', 'left', 'right'] :\n ax.spines[edge].set_color(colors[ibt])\n ax.spines[edge].set_linewidth(2) \n \nfig.savefig('./figs/fig2z.pdf' , format = 'pdf', dpi = 100, bbox_inches = 'tight', transparent = True)\n\n\n# Generate the MotionClouds distributions (fig1e)\nfrom scipy.special import i0 as I0\n\ndef vm(theta, amp, theta0, Btheta):\n return amp * np.exp((np.cos(2*(theta-theta0))-1) / 4 / Btheta**2)\n\nfig, ax = plt.subplots(figsize = (8,6))\n\n# These aren't exactly the values of Bthetas we used, but \n# they are on the same illustrative range (can't do 0 in a real VM)\nB_thetas = np.linspace(np.pi/2, 0.115 , 3) / 2.5\nlab_bt = np.linspace(np.pi/2, 0. , 8) / 2.5\n\nlabels = [r'$\\frac{\\pi}{5}$',\n r'$\\frac{\\pi}{2.5}$',\n '0']\nfor i, bt in enumerate(B_thetas) :\n xs = np.linspace(0, np.pi, 15000)\n vonmises = vm(theta = xs,\n theta0 = np.pi/2,\n Btheta = bt,\n amp = 1)\n ax.plot(np.linspace(0, np.pi, 15000),\n vonmises,\n color = colors[i],\n label = labels[i])\n\nax.set_xticks([0, np.pi/4, np.pi/2, 3*np.pi/4, np.pi])\nax.set_xticklabels(['-90', '-45', r'$\\theta_{0}$', '+45', '+90'])\nax.tick_params(axis='both', labelsize=12)\n\nax.set_xlabel('Stimulation orientation (°)', fontsize = 14)\nax.set_ylabel('Distribution energy (u.a.)', fontsize = 14)\n\nax.spines['right'].set_visible(False)\nax.spines['top'].set_visible(False)\nax.set_ylim(-.05, 1.1)\nfig.legend(ncol = 1, fontsize = 14, frameon = True, title = r'B$_\\theta$')\nfig.tight_layout()\nfig.savefig('./figs/fig1f.pdf' , format = 'pdf', dpi = 100, bbox_inches = 'tight', transparent = True)\n\n\n\n\n\n# Generate the gratings (fig1a)\nfig, axs = plt.subplots(figsize = (16,8), ncols = len(B_thetas), nrows = 1,\n gridspec_kw = {'wspace':0.01, 'hspace':0.05})\ncontrasts = [1, 2, 3]\nfor ibt in range(0,len(B_thetas)) :\n ax = axs[ibt]\n grat = generate_gratings(n_sins = 20, imsize = 500, div = contrasts[ibt])\n im = ax.imshow(grat, cmap = 'gray', interpolation = 'bilinear')\n \n im.set_clim(-1,1)\n ax.set_yticks([])\n ax.set_xticks([])\n ax.set_aspect('equal')\n \nfig.savefig('./figs/fig1a.pdf' , format = 'pdf', dpi = 100, bbox_inches = 'tight', transparent = True)"
] |
[
[
"numpy.linspace",
"numpy.asarray",
"matplotlib.pyplot.subplots",
"numpy.concatenate",
"numpy.max",
"numpy.exp"
],
[
"numpy.rot90",
"numpy.linspace",
"numpy.tile",
"matplotlib.pyplot.subplots",
"numpy.cos",
"numpy.zeros"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
612twilight/CogDL-TensorFlow
|
[
"f850bae3f7fc2cf98501623b0b2b291ff68c9097"
] |
[
"cogdl/models/emb/grarep.py"
] |
[
"import numpy as np\nimport networkx as nx\nimport scipy.sparse as sp\nfrom sklearn import preprocessing\nfrom .. import register_model\nfrom ..base_model import BaseModel\n\n\n@register_model(\"grarep\")\nclass GraRep(BaseModel):\n @staticmethod\n def add_args(parser):\n \"\"\"Add model-specific arguments to the parser.\"\"\"\n # fmt: off\n parser.add_argument('--step', type=int, default=5,\n help='Number of matrix step in GraRep. Default is 5.')\n # fmt: on\n\n @classmethod\n def build_model_from_args(cls, args):\n return cls(args.hidden_size, args.step)\n\n def __init__(self, dimension, step):\n super(GraRep, self).__init__()\n self.dimension = dimension\n self.step = step\n\n def train(self, G):\n self.G = G\n self.num_node = G.number_of_nodes()\n A = np.asarray(nx.adjacency_matrix(self.G).todense(), dtype=float)\n A = preprocessing.normalize(A, \"l1\")\n\n log_beta = np.log(1.0 / self.num_node)\n A_list = [A]\n T_list = [sum(A).tolist()]\n temp = A\n # calculate A^1, A^2, ... , A^step, respectively\n for i in range(self.step - 1):\n temp = temp.dot(A)\n A_list.append(A)\n T_list.append(sum(temp).tolist())\n\n final_emb = np.zeros((self.num_node, 1))\n for k in range(self.step):\n for j in range(A.shape[1]):\n A_list[k][:, j] = (\n np.log(A_list[k][:, j] / T_list[k][j] + 1e-20) - log_beta\n )\n for i in range(A.shape[0]):\n A_list[k][i, j] = max(A_list[k][i, j], 0)\n # concatenate all k-step representations\n if k == 0:\n dimension = self.dimension - int(self.dimension / self.step) * (\n self.step - 1\n )\n final_emb = self._get_embedding(A_list[k], dimension)\n else:\n W = self._get_embedding(A_list[k], self.dimension / self.step)\n final_emb = np.hstack((final_emb, W))\n\n self.embeddings = final_emb\n return self.embeddings\n\n def _get_embedding(self, matrix, dimension):\n # get embedding from svd and process normalization for ut\n ut, s, _ = sp.linalg.svds(matrix, int(dimension))\n emb_matrix = ut * np.sqrt(s)\n emb_matrix = preprocessing.normalize(emb_matrix, \"l2\")\n return emb_matrix\n"
] |
[
[
"numpy.hstack",
"numpy.log",
"numpy.sqrt",
"sklearn.preprocessing.normalize",
"numpy.zeros"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
discover-volve/welly
|
[
"056c1ad6f89363a4c520865af9dbbfb089c6bed2"
] |
[
"welly/curve.py"
] |
[
"# -*- coding: utf-8 -*-\n\"\"\"\nDefines log curves.\n\n:copyright: 2016 Agile Geoscience\n:license: Apache 2.0\n\"\"\"\nfrom __future__ import division\n\nimport numpy as np\nfrom scipy.interpolate import interp1d\n\nimport matplotlib.pyplot as plt\nimport matplotlib as mpl\nfrom matplotlib.patches import PathPatch\nimport warnings\n\n\nfrom . import utils\n\n\nclass CurveError(Exception):\n \"\"\"\n Generic error class.\n \"\"\"\n pass\n\n\nclass Curve(np.ndarray):\n \"\"\"\n A fancy ndarray. Gives some utility functions, plotting, etc, for curve\n data.\n \"\"\"\n def __new__(cls, data, basis=None, params=None):\n \"\"\"\n I am just following the numpy guide for subclassing ndarray...\n \"\"\"\n obj = np.asarray(data).view(cls).copy()\n\n params = params or {}\n\n for k, v in params.items():\n setattr(obj, k, v)\n\n if basis is not None:\n if basis[0] > basis[1]:\n basis = np.flipud(basis)\n setattr(obj, 'start', basis[0])\n setattr(obj, 'step', basis[1]-basis[0])\n\n return obj\n\n def __array_finalize__(self, obj):\n \"\"\"\n I am just following the numpy guide for subclassing ndarray...\n \"\"\"\n if obj is None:\n return\n\n if obj.size == 1:\n return float(obj)\n\n self.start = getattr(obj, 'start', 0)\n self.step = getattr(obj, 'step', 1)\n self.mnemonic = getattr(obj, 'mnemonic', None)\n self.units = getattr(obj, 'units', None)\n self.run = getattr(obj, 'run', 0)\n self.null = getattr(obj, 'null', -999.25)\n self.service_company = getattr(obj, 'service_company', None)\n self.date = getattr(obj, 'date', None)\n self.code = getattr(obj, 'code', None)\n self.basis_units = getattr(obj, 'basis_units', None)\n\n def __getitem__(self, items):\n \"\"\"\n Update the basis when a Curve is sliced.\n \"\"\"\n newarr = self.copy()\n if isinstance(items, slice):\n if (items.start is not None) and (items.start > 0):\n newarr.start = newarr.basis.copy()[items.start]\n if items.step is not None:\n newarr.step = newarr.step * items.step\n\n return np.ndarray.__getitem__(newarr, items)\n\n def __copy__(self):\n cls = self.__class__\n result = cls.__new__(cls)\n result.__dict__.update(self.__dict__)\n return result\n\n def _repr_html_(self):\n \"\"\"\n Jupyter Notebook magic repr function.\n \"\"\"\n if self.size < 10:\n return np.ndarray.__repr__(self)\n attribs = self.__dict__.copy()\n\n # Header.\n row1 = '<tr><th style=\"text-align:center;\" colspan=\"2\">{} [{{}}]</th></tr>'\n rows = row1.format(attribs.pop('mnemonic'))\n rows = rows.format(attribs.pop('units', '–'))\n row2 = '<tr><td style=\"text-align:center;\" colspan=\"2\">{:.4f} : {:.4f} : {:.4f}</td></tr>'\n rows += row2.format(attribs.pop('start'), self.stop, attribs.pop('step'))\n\n # Curve attributes.\n s = '<tr><td><strong>{k}</strong></td><td>{v}</td></tr>'\n for k, v in attribs.items():\n rows += s.format(k=k, v=v)\n\n # Curve stats.\n rows += '<tr><th style=\"border-top: 2px solid #000; text-align:center;\" colspan=\"2\"><strong>Stats</strong></th></tr>'\n stats = self.get_stats()\n s = '<tr><td><strong>samples (NaNs)</strong></td><td>{samples} ({nulls})</td></tr>'\n s += '<tr><td><strong><sub>min</sub> mean <sup>max</sup></strong></td>'\n s += '<td><sub>{min:.2f}</sub> {mean:.3f} <sup>{max:.2f}</sup></td></tr>'\n rows += s.format(**stats)\n\n # Curve preview.\n s = '<tr><th style=\"border-top: 2px solid #000;\">Depth</th><th style=\"border-top: 2px solid #000;\">Value</th></tr>'\n rows += s.format(self.start, self[0])\n s = '<tr><td>{:.4f}</td><td>{:.4f}</td></tr>'\n for depth, value in zip(self.basis[:3], self[:3]):\n rows += s.format(depth, value)\n rows += '<tr><td>⋮</td><td>⋮</td></tr>'\n for depth, value in zip(self.basis[-3:], self[-3:]):\n rows += s.format(depth, value)\n\n # Footer.\n # ...\n\n # End.\n html = '<table>{}</table>'.format(rows)\n return html\n\n @property\n def values(self):\n return np.array(self)\n\n @property\n def stop(self):\n \"\"\"\n The stop depth. Computed on the fly from the start,\n the step, and the length of the curve.\n \"\"\"\n return self.start + (self.shape[0] - 1) * self.step\n\n @property\n def basis(self):\n \"\"\"\n The depth or time basis of the curve's points. Computed\n on the fly from the start, stop and step.\n\n Returns\n ndarray. The array, the same length as the curve.\n \"\"\"\n return np.linspace(self.start, self.stop, self.shape[0], endpoint=True)\n\n def describe(self):\n \"\"\"\n Return basic statistics about the curve.\n \"\"\"\n stats = {}\n stats['samples'] = self.shape[0]\n stats['nulls'] = self[np.isnan(self)].shape[0]\n stats['mean'] = float(np.nanmean(self.real))\n stats['min'] = float(np.nanmin(self.real))\n stats['max'] = float(np.nanmax(self.real))\n return stats\n\n get_stats = describe\n\n @classmethod\n def from_lasio_curve(cls, curve,\n depth=None,\n basis=None,\n start=None,\n stop=None,\n step=0.1524,\n run=-1,\n null=-999.25,\n service_company=None,\n date=None,\n basis_units=None):\n \"\"\"\n Makes a curve object from a lasio curve object and either a depth\n basis or start and step information.\n\n Args:\n curve (ndarray)\n depth (ndarray)\n basis (ndarray)\n start (float)\n stop (float)\n step (float): default: 0.1524\n run (int): default: -1\n null (float): default: -999.25\n service_company (str): Optional.\n date (str): Optional.\n basis_units (str): the units of the basis.\n\n Returns:\n Curve. An instance of the class.\n \"\"\"\n data = curve.data\n unit = curve.unit\n\n # See if we have uneven sampling.\n if depth is not None:\n d = np.diff(depth)\n if not np.allclose(d - np.mean(d), np.zeros_like(d)):\n # Sampling is uneven.\n m = \"Irregular sampling in depth is not supported. \"\n m += \"Interpolating to regular basis.\"\n warnings.warn(m)\n step = np.nanmedian(d)\n start, stop = depth[0], depth[-1]+0.00001 # adjustment\n basis = np.arange(start, stop, step)\n data = np.interp(basis, depth, data)\n else:\n step = np.nanmedian(d)\n start = depth[0]\n\n # Carry on with easier situations.\n if start is None:\n if basis is not None:\n start = basis[0]\n step = basis[1] - basis[0]\n else:\n raise CurveError(\"You must provide a basis or a start depth.\")\n\n if step == 0:\n if stop is None:\n raise CurveError(\"You must provide a step or a stop depth.\")\n else:\n step = (stop - start) / (curve.data.shape[0] - 1)\n\n # Interpolate into this.\n\n params = {}\n params['mnemonic'] = curve.mnemonic\n params['description'] = curve.descr\n params['start'] = start\n params['step'] = step\n params['units'] = unit\n params['run'] = run\n params['null'] = null\n params['service_company'] = service_company\n params['date'] = date\n params['code'] = curve.API_code\n params['basis_units'] = basis_units\n\n return cls(data, params=params)\n\n def get_alias(self, alias):\n \"\"\"\n Given a mnemonic, get the alias name(s) it falls under. If there aren't\n any, you get an empty list.\n \"\"\"\n alias = alias or {}\n return [k for k, v in alias.items() if self.mnemonic in v]\n\n def plot_2d(self, ax=None,\n width=None,\n aspect=60,\n cmap=None,\n curve=False,\n ticks=(1, 10),\n return_fig=False,\n **kwargs,\n ):\n \"\"\"\n Plot a 2D curve.\n\n Args:\n ax (ax): A matplotlib axis.\n width (int): The width of the image.\n aspect (int): The aspect ratio (not quantitative at all).\n cmap (str): The colourmap to use.\n curve (bool): Whether to plot the curve as well.\n ticks (tuple): The tick interval on the y-axis.\n return_fig (bool): whether to return the matplotlib figure.\n Default False.\n\n Returns:\n ax. If you passed in an ax, otherwise None.\n \"\"\"\n # Set up the figure.\n if ax is None:\n fig = plt.figure(figsize=(2, 10))\n ax = fig.add_subplot(111)\n return_ax = False\n else:\n return_ax = True\n\n # Set up the data.\n cmap = cmap or 'viridis'\n default = int(self.shape[0] / aspect)\n if self.ndim == 1:\n a = np.expand_dims(self, axis=1)\n a = np.repeat(a, width or default, axis=1)\n elif self.ndim == 2:\n a = self[:, :width] if width < self.shape[1] else self\n elif self.ndim == 3:\n if 2 < self.shape[-1] < 5:\n # Interpret as RGB or RGBA.\n a = utils.normalize(np.copy(self))\n cmap = None # Actually doesn't matter.\n else:\n # Take first slice.\n a = self[:, :width, 0] if width < self.shape[1] else self[..., 0]\n else:\n raise NotImplementedError(\"Can only handle up to 3 dimensions.\")\n\n # At this point, a is either a 2D array, or a 2D (rgb) array.\n extent = [0, width or default, self.stop, self.start]\n im = ax.imshow(a, cmap=cmap, extent=extent)\n\n if curve:\n # Draw the path.\n # TODO: add default kwargs?\n paths = ax.fill_betweenx(self.basis, self, self.min(),\n facecolor='none',\n **kwargs,\n )\n\n # Make the 'fill' mask and clip the background image with it.\n patch = PathPatch(paths._paths[0], visible=False)\n ax.add_artist(patch)\n im.set_clip_path(patch)\n\n ax.set_xticks([])\n\n # Rely on interval order.\n lower, upper = self.stop, self.start\n rng = abs(upper - lower)\n\n ax.set_ylim([lower, upper])\n\n # Make sure ticks is a tuple.\n try:\n ticks = tuple(ticks)\n except TypeError:\n ticks = (1, ticks)\n\n # Avoid MAXTICKS error.\n while rng/ticks[0] > 250:\n mi, ma = 10*ticks[0], ticks[1]\n if ma <= mi:\n ma = 10 * mi\n ticks = (mi, ma)\n\n # Carry on plotting...\n minorLocator = mpl.ticker.MultipleLocator(ticks[0])\n ax.yaxis.set_minor_locator(minorLocator)\n\n majorLocator = mpl.ticker.MultipleLocator(ticks[1])\n majorFormatter = mpl.ticker.FormatStrFormatter('%d')\n ax.yaxis.set_major_locator(majorLocator)\n ax.yaxis.set_major_formatter(majorFormatter)\n\n ax.yaxis.set_ticks_position('left')\n ax.get_yaxis().set_tick_params(which='both', direction='out')\n\n if return_ax:\n return ax\n elif return_fig:\n return fig\n else:\n return None\n\n def plot(self, ax=None, legend=None, return_fig=False, **kwargs):\n \"\"\"\n Plot a curve.\n\n Args:\n ax (ax): A matplotlib axis.\n legend (striplog.legend): A legend. Optional.\n return_fig (bool): whether to return the matplotlib figure.\n Default False.\n kwargs: Arguments for ``ax.set()``\n\n Returns:\n ax. If you passed in an ax, otherwise None.\n \"\"\"\n if ax is None:\n fig = plt.figure(figsize=(2, 10))\n ax = fig.add_subplot(111)\n return_ax = False\n else:\n return_ax = True\n\n d = None\n if legend is not None:\n try:\n d = legend.get_decor(self)\n except:\n pass\n\n if d is not None:\n kwargs['color'] = d.colour\n kwargs['lw'] = getattr(d, 'lineweight', None) or getattr(d, 'lw', 1)\n kwargs['ls'] = getattr(d, 'linestyle', None) or getattr(d, 'ls', '-')\n\n # Attempt to get axis parameters from decor.\n axkwargs = {}\n\n xlim = getattr(d, 'xlim', None)\n if xlim is not None:\n axkwargs['xlim'] = list(map(float, xlim.split(',')))\n\n xticks = getattr(d, 'xticks', None)\n if xticks is not None:\n axkwargs['xticks'] = list(map(float, xticks.split(',')))\n\n xscale = getattr(d, 'xscale', None)\n if xscale is not None:\n axkwargs['xscale'] = xscale\n\n ax.set(**axkwargs)\n\n ax.plot(self, self.basis, **kwargs)\n ax.set_title(self.mnemonic) # no longer needed\n ax.set_xlabel(self.units)\n\n if False: # labeltop of axes?\n ax.xaxis.tick_top()\n\n if True: # rotate x-tick labels\n labels = ax.get_xticklabels()\n for label in labels:\n label.set_rotation(90)\n\n ax.set_ylim([self.stop, self.start])\n ax.grid('on', color='k', alpha=0.33, lw=0.33, linestyle='-')\n\n if return_ax:\n return ax\n elif return_fig:\n return fig\n else:\n return None\n\n def extrapolate(self):\n \"\"\"\n From ``bruges``\n\n Extrapolate up and down an array from the first and last non-NaN samples.\n\n E.g. Continue the first and last non-NaN values of a log up and down.\n \"\"\"\n return utils.extrapolate(self)\n\n def top_and_tail(self):\n pass\n\n def interpolate(self):\n \"\"\"\n Interpolate across any missing zones.\n\n TODO\n Allow spline interpolation.\n \"\"\"\n nans, x = utils.nan_idx(self)\n self[nans] = np.interp(x(nans), x(~nans), self[~nans])\n return self\n\n def interpolate_where(self, condition):\n \"\"\"\n Remove then interpolate across\n \"\"\"\n raise NotImplementedError()\n self[self < 0] = np.nan\n return self.interpolate() \n\n def to_basis_like(self, basis):\n \"\"\"\n Make a new curve in a new basis, given an existing one. Wraps\n ``to_basis()``.\n\n Pass in a curve or the basis of a curve.\n\n Args:\n basis (ndarray): A basis, but can also be a Curve instance.\n\n Returns:\n Curve. The current instance in the new basis.\n \"\"\"\n try: # To treat as a curve.\n curve = basis\n basis = curve.basis\n undefined = curve.null\n except:\n undefined = None\n\n return self.to_basis(basis=basis,\n undefined=undefined)\n\n def to_basis(self, basis=None,\n start=None,\n stop=None,\n step=None,\n undefined=None):\n \"\"\"\n Make a new curve in a new basis, given a basis, or a new start, step,\n and/or stop. You only need to set the parameters you want to change.\n If the new extents go beyond the current extents, the curve is padded\n with the ``undefined`` parameter.\n\n Args:\n basis (ndarray)\n start (float)\n stop (float)\n step (float)\n undefined (float)\n\n Returns:\n Curve. The current instance in the new basis.\n \"\"\"\n if basis is None:\n if start is None:\n new_start = self.start\n else:\n new_start = start\n new_step = step or self.step\n new_stop = stop or self.stop\n # new_adj_stop = new_stop + new_step/100 # To guarantee inclusion.\n # basis = np.arange(new_start, new_adj_stop, new_step)\n steps = 1 + (new_stop - new_start) / new_step\n basis = np.linspace(new_start, new_stop, int(steps), endpoint=True)\n else:\n new_start = basis[0]\n new_step = basis[1] - basis[0]\n\n if undefined is None:\n undefined = np.nan\n else:\n undefined = undefined\n\n interp = interp1d(self.basis, self,\n bounds_error=False,\n fill_value=undefined)\n\n data = interp(basis)\n\n params = self.__dict__.copy()\n params['step'] = float(new_step)\n params['start'] = float(new_start)\n\n return Curve(data, params=params)\n\n def _read_at(self, d,\n interpolation='linear',\n index=False,\n return_basis=False):\n \"\"\"\n Private function. Implements read_at() for a single depth.\n\n Args:\n d (float)\n interpolation (str)\n index(bool)\n return_basis (bool)\n\n Returns:\n float\n \"\"\"\n method = {'linear': utils.linear,\n 'none': None}\n\n i, d = utils.find_previous(self.basis,\n d,\n index=True,\n return_distance=True)\n\n if index:\n return i\n else:\n return method[interpolation](self[i], self[i+1], d)\n\n def read_at(self, d, **kwargs):\n \"\"\"\n Read the log at a specific depth or an array of depths.\n\n Args:\n d (float or array-like)\n interpolation (str)\n index(bool)\n return_basis (bool)\n\n Returns:\n float or ndarray.\n \"\"\"\n try:\n return np.array([self._read_at(depth, **kwargs) for depth in d])\n except:\n return self._read_at(d, **kwargs)\n\n def quality(self, tests, alias=None):\n \"\"\"\n Run a series of tests and return the corresponding results.\n\n Args:\n tests (list): a list of functions.\n alias (dict): a dictionary mapping mnemonics to lists of mnemonics.\n\n Returns:\n list. The results. Stick to booleans (True = pass) or ints.\n \"\"\"\n # Gather the test s.\n # First, anything called 'all', 'All', or 'ALL'.\n # Second, anything with the name of the curve we're in now.\n # Third, anything that the alias list has for this curve.\n # (This requires a reverse look-up so it's a bit messy.)\n this_tests =\\\n tests.get('each', [])+tests.get('Each', [])+tests.get('EACH', [])\\\n + tests.get(self.mnemonic, [])\\\n + utils.flatten_list([tests.get(a) for a in self.get_alias(alias=alias)])\n this_tests = filter(None, this_tests)\n\n # If we explicitly set zero tests for a particular key, then this\n # overrides the 'all' and 'alias' tests.\n if not tests.get(self.mnemonic, 1):\n this_tests = []\n\n return {test.__name__: test(self) for test in this_tests}\n\n def qflag(self, tests, alias=None):\n \"\"\"\n Run a test and return the corresponding results on a sample-by-sample\n basis.\n\n Args:\n tests (list): a list of functions.\n alias (dict): a dictionary mapping mnemonics to lists of mnemonics.\n\n Returns:\n list. The results. Stick to booleans (True = pass) or ints.\n \"\"\"\n # Gather the tests.\n # First, anything called 'all', 'All', or 'ALL'.\n # Second, anything with the name of the curve we're in now.\n # Third, anything that the alias list has for this curve.\n # (This requires a reverse look-up so it's a bit messy.)\n this_tests =\\\n tests.get('each', [])+tests.get('Each', [])+tests.get('EACH', [])\\\n + tests.get(self.mnemonic, [])\\\n + utils.flatten_list([tests.get(a) for a in self.get_alias(alias=alias)])\n this_tests = filter(None, this_tests)\n\n return {test.__name__: test(self) for test in this_tests}\n\n def qflags(self, tests, alias=None):\n \"\"\"\n Run a series of tests and return the corresponding results.\n\n Args:\n tests (list): a list of functions.\n alias (dict): a dictionary mapping mnemonics to lists of mnemonics.\n\n Returns:\n list. The results. Stick to booleans (True = pass) or ints.\n \"\"\"\n # Gather the tests.\n # First, anything called 'all', 'All', or 'ALL'.\n # Second, anything with the name of the curve we're in now.\n # Third, anything that the alias list has for this curve.\n # (This requires a reverse look-up so it's a bit messy.)\n this_tests =\\\n tests.get('each', [])+tests.get('Each', [])+tests.get('EACH', [])\\\n + tests.get(self.mnemonic, [])\\\n + utils.flatten_list([tests.get(a) for a in self.get_alias(alias=alias)])\n this_tests = filter(None, this_tests)\n\n return {test.__name__: test(self) for test in this_tests}\n\n def quality_score(self, tests, alias=None):\n \"\"\"\n Run a series of tests and return the normalized score.\n 1.0: Passed all tests.\n (0-1): Passed a fraction of tests.\n 0.0: Passed no tests.\n -1.0: Took no tests.\n\n Args:\n tests (list): a list of functions.\n alias (dict): a dictionary mapping mnemonics to lists of mnemonics.\n\n Returns:\n float. The fraction of tests passed, or -1 for 'took no tests'.\n \"\"\"\n results = self.quality(tests, alias=alias).values()\n if results:\n return sum(results) / len(results)\n return -1\n\n def block(self,\n cutoffs=None,\n values=None,\n n_bins=0,\n right=False,\n function=None):\n \"\"\"\n Block a log based on number of bins, or on cutoffs.\n\n Args:\n cutoffs (array)\n values (array): the values to map to. Defaults to [0, 1, 2,...]\n n_bins (int)\n right (bool)\n function (function): transform the log if you want.\n\n Returns:\n Curve.\n \"\"\"\n # We'll return a copy.\n params = self.__dict__.copy()\n\n if (values is not None) and (cutoffs is None):\n cutoffs = values[1:]\n\n if (cutoffs is None) and (n_bins == 0):\n cutoffs = np.mean(self)\n\n if (n_bins != 0) and (cutoffs is None):\n mi, ma = np.amin(self), np.amax(self)\n cutoffs = np.linspace(mi, ma, n_bins+1)\n cutoffs = cutoffs[:-1]\n\n try: # To use cutoff as a list.\n data = np.digitize(self, cutoffs, right)\n except ValueError: # It's just a number.\n data = np.digitize(self, [cutoffs], right)\n\n if (function is None) and (values is None):\n return Curve(data, params=params)\n\n data = data.astype(float)\n\n # Set the function for reducing.\n f = function or utils.null\n\n # Find the tops of the 'zones'.\n tops, vals = utils.find_edges(data)\n\n # End of array trick... adding this should remove the\n # need for the marked lines below. But it doesn't.\n # np.append(tops, None)\n # np.append(vals, None)\n\n if values is None:\n # Transform each segment in turn, then deal with the last segment.\n for top, base in zip(tops[:-1], tops[1:]):\n data[top:base] = f(np.copy(self[top:base]))\n data[base:] = f(np.copy(self[base:])) # See above\n else:\n for top, base, val in zip(tops[:-1], tops[1:], vals[:-1]):\n data[top:base] = values[int(val)]\n data[base:] = values[int(vals[-1])] # See above\n\n return Curve(data, params=params)\n\n def _rolling_window(self, window_length, func1d, step=1, return_rolled=False):\n \"\"\"\n Private function. Smoother for other smoothing/conditioning functions.\n\n Args:\n window_length (int): the window length.\n func1d (function): a function that takes a 1D array and returns a\n scalar.\n step (int): if you want to skip samples in the shifted versions.\n Don't use this for smoothing, you will get strange results.\n\n Returns:\n ndarray: the resulting array.\n \"\"\"\n # Force odd.\n if window_length % 2 == 0:\n window_length += 1\n\n shape = self.shape[:-1] + (self.shape[-1], window_length)\n strides = self.strides + (step*self.strides[-1],)\n data = np.nan_to_num(self)\n data = np.pad(data, int(step*window_length//2), mode='edge')\n rolled = np.lib.stride_tricks.as_strided(data,\n shape=shape,\n strides=strides)\n result = np.apply_along_axis(func1d, -1, rolled)\n result[np.isnan(self)] = np.nan\n\n if return_rolled:\n return result, rolled\n else:\n return result\n\n def despike(self, window_length=33, samples=True, z=2):\n \"\"\"\n Args:\n window (int): window length in samples. Default 33 (or 5 m for\n most curves sampled at 0.1524 m intervals).\n samples (bool): window length is in samples. Use False for a window\n length given in metres.\n z (float): Z score\n\n Returns:\n Curve.\n \"\"\"\n window_length //= 1 if samples else self.step\n z *= np.nanstd(self) # Transform to curve's units\n curve_sm = self._rolling_window(window_length, np.median)\n spikes = np.where(np.nan_to_num(self - curve_sm) > z)[0]\n spukes = np.where(np.nan_to_num(curve_sm - self) > z)[0]\n out = np.copy(self)\n params = self.__dict__.copy()\n out[spikes] = curve_sm[spikes] + z\n out[spukes] = curve_sm[spukes] - z\n return Curve(out, params=params)\n\n def apply(self, window_length, samples=True, func1d=None):\n \"\"\"\n Runs any kind of function over a window.\n\n Args:\n window_length (int): the window length. Required.\n samples (bool): window length is in samples. Use False for a window\n length given in metres.\n func1d (function): a function that takes a 1D array and returns a\n scalar. Default: ``np.mean()``.\n\n Returns:\n Curve.\n \"\"\"\n window_length /= 1 if samples else self.step\n if func1d is None:\n func1d = np.mean\n params = self.__dict__.copy()\n out = self._rolling_window(int(window_length), func1d)\n return Curve(out, params=params)\n\n smooth = apply\n\n def plot_kde(self,\n ax=None,\n amax=None,\n amin=None,\n label=None,\n return_fig=False):\n \"\"\"\n Plot a KDE for the curve. Very nice summary of KDEs:\n https://jakevdp.github.io/blog/2013/12/01/kernel-density-estimation/\n\n Args:\n ax (axis): Optional matplotlib (MPL) axis to plot into. Returned.\n amax (float): Optional max value to permit.\n amin (float): Optional min value to permit.\n label (string): What to put on the y-axis. Defaults to curve name.\n return_fig (bool): If you want to return the MPL figure object.\n\n Returns:\n None, axis, figure: depending on what you ask for.\n \"\"\"\n from scipy.stats import gaussian_kde\n\n if ax is None:\n fig = plt.figure()\n ax = fig.add_subplot(111)\n return_ax = False\n else:\n return_ax = True\n\n a = self[~np.isnan(self)]\n\n # Find values for common axis to exclude outliers.\n if amax is None:\n amax = np.percentile(a, 99)\n if amin is None:\n amin = np.percentile(a, 1)\n\n x = a[np.abs(a - 0.5 * (amax + amin)) < 0.5 * (amax - amin)]\n x_grid = np.linspace(amin, amax, 100)\n\n kde = gaussian_kde(x)\n std_a = kde.evaluate(x_grid)\n\n img = np.array([std_a]) / np.max([std_a])\n extent = [amin, amax, 0, 1]\n ax.imshow(img, aspect='auto', cmap='viridis', extent=extent)\n ax.set_yticklabels([])\n ax.set_ylabel(label or self.mnemonic)\n\n if return_ax:\n return ax\n elif return_fig:\n return fig\n else:\n return None\n"
] |
[
[
"numpy.nanmax",
"matplotlib.ticker.MultipleLocator",
"numpy.expand_dims",
"numpy.nanmedian",
"numpy.amax",
"numpy.linspace",
"numpy.asarray",
"numpy.nanmin",
"numpy.flipud",
"numpy.nan_to_num",
"numpy.lib.stride_tricks.as_strided",
"numpy.max",
"scipy.stats.gaussian_kde",
"numpy.mean",
"numpy.nanmean",
"numpy.zeros_like",
"numpy.nanstd",
"numpy.digitize",
"matplotlib.patches.PathPatch",
"numpy.arange",
"numpy.copy",
"scipy.interpolate.interp1d",
"numpy.apply_along_axis",
"numpy.diff",
"numpy.interp",
"matplotlib.ticker.FormatStrFormatter",
"numpy.repeat",
"matplotlib.pyplot.figure",
"numpy.amin",
"numpy.isnan",
"numpy.ndarray.__getitem__",
"numpy.array",
"numpy.abs",
"numpy.percentile",
"numpy.ndarray.__repr__"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"1.3",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"0.16",
"1.8"
],
"tensorflow": []
}
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.