repo_name
stringlengths
6
130
hexsha
list
file_path
list
code
list
apis
list
xykong1958/tensorflow
[ "f90532431c3785166cff35ff427b652fe460f60b" ]
[ "tensorflow/compiler/xla/python/xla_client_test.py" ]
[ "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for the Python extension-based XLA client.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport functools\nimport itertools\nimport threading\n\nimport numpy as np\n\nfrom tensorflow.compiler.xla.python import custom_call_for_test\nfrom tensorflow.compiler.xla.python import xla_client\nimport unittest\n\n\nclass ComputationTest(unittest.TestCase):\n \"\"\"Base class for running an XLA Computation through the local client.\"\"\"\n\n def _NewComputation(self, name=None):\n if name is None:\n name = self.id()\n return xla_client.ComputationBuilder(name)\n\n def _Execute(self, c, arguments):\n compiled_c = c.Build().Compile()\n return compiled_c.ExecuteWithPythonValues(arguments)\n\n def _ExecuteAndAssertWith(self, assert_func, c, arguments, expected):\n assert expected is not None\n result = self._Execute(c, arguments)\n # Numpy's comparison methods are a bit too lenient by treating inputs as\n # \"array-like\", meaning that scalar 4 will be happily compared equal to\n # [[4]]. We'd like to be more strict so assert shapes as well.\n self.assertEqual(np.asanyarray(result).shape, np.asanyarray(expected).shape)\n assert_func(result, expected)\n\n def _ExecuteAndCompareExact(self, c, arguments=(), expected=None):\n self._ExecuteAndAssertWith(np.testing.assert_equal, c, arguments, expected)\n\n def _ExecuteAndCompareClose(self,\n c,\n arguments=(),\n expected=None,\n rtol=1e-7,\n atol=0):\n self._ExecuteAndAssertWith(\n functools.partial(np.testing.assert_allclose, rtol=rtol, atol=atol), c,\n arguments, expected)\n\n\ndef NumpyArrayF32(*args, **kwargs):\n \"\"\"Convenience wrapper to create Numpy arrays with a np.float32 dtype.\"\"\"\n return np.array(*args, dtype=np.float32, **kwargs)\n\n\ndef NumpyArrayF64(*args, **kwargs):\n \"\"\"Convenience wrapper to create Numpy arrays with a np.float64 dtype.\"\"\"\n return np.array(*args, dtype=np.float64, **kwargs)\n\n\ndef NumpyArrayS32(*args, **kwargs):\n \"\"\"Convenience wrapper to create Numpy arrays with a np.int32 dtype.\"\"\"\n return np.array(*args, dtype=np.int32, **kwargs)\n\n\ndef NumpyArrayS64(*args, **kwargs):\n \"\"\"Convenience wrapper to create Numpy arrays with a np.int64 dtype.\"\"\"\n return np.array(*args, dtype=np.int64, **kwargs)\n\n\ndef NumpyArrayBool(*args, **kwargs):\n \"\"\"Convenience wrapper to create Numpy arrays with a np.bool dtype.\"\"\"\n return np.array(*args, dtype=np.bool, **kwargs)\n\n\nclass ComputationPrinting(unittest.TestCase):\n\n def ExampleComputation(self):\n builder = xla_client.ComputationBuilder(\"acomputation\")\n p0 = builder.ParameterFromNumpy(np.float32(0))\n p1 = builder.ParameterFromNumpy(np.zeros((4,), np.float32))\n builder.Mul(p0, p1)\n return builder.Build()\n\n def testComputationToHloText(self):\n computation = self.ExampleComputation()\n hlo_text = computation.GetHloText()\n self.assertTrue(hlo_text.startswith(\"HloModule acomputation\"))\n\n def testComputationToHloGraph(self):\n computation = self.ExampleComputation()\n hlo_dot_graph = computation.GetHloDotGraph()\n self.assertTrue(hlo_dot_graph.startswith(\"digraph \"))\n\n\nclass ComputationsWithConstantsTest(ComputationTest):\n \"\"\"Tests focusing on Constant ops.\"\"\"\n\n def testConstantScalarSumS8(self):\n c = self._NewComputation()\n c.Add(c.Constant(np.int8(1)), c.Constant(np.int8(2)))\n self._ExecuteAndCompareExact(c, expected=np.int8(3))\n\n def testConstantScalarSumF32(self):\n c = self._NewComputation()\n c.Add(c.ConstantF32Scalar(1.11), c.ConstantF32Scalar(3.14))\n self._ExecuteAndCompareClose(c, expected=4.25)\n\n def testConstantScalarSumF64(self):\n c = self._NewComputation()\n c.Add(c.ConstantF64Scalar(1.11), c.ConstantF64Scalar(3.14))\n self._ExecuteAndCompareClose(c, expected=4.25)\n\n def testConstantScalarSumS32(self):\n c = self._NewComputation()\n c.Add(c.ConstantS32Scalar(1), c.ConstantS32Scalar(2))\n self._ExecuteAndCompareClose(c, expected=3)\n\n def testConstantScalarSumS64(self):\n c = self._NewComputation()\n c.Add(c.ConstantS64Scalar(1), c.ConstantS64Scalar(2))\n self._ExecuteAndCompareClose(c, expected=3)\n\n def testConstantVectorMulF16(self):\n c = self._NewComputation()\n c.Mul(\n c.Constant(np.array([2.5, 3.3, -1.2, 0.7], np.float16)),\n c.Constant(np.array([-1.2, 2, -2, -3], np.float16)))\n self._ExecuteAndCompareClose(\n c, expected=np.array([-3, 6.6, 2.4, -2.1], np.float16), rtol=2e-3)\n\n def testConstantVectorMulF32(self):\n c = self._NewComputation()\n c.Mul(\n c.Constant(NumpyArrayF32([2.5, 3.3, -1.2, 0.7])),\n c.Constant(NumpyArrayF32([-1.2, 2, -2, -3])))\n self._ExecuteAndCompareClose(c, expected=[-3, 6.6, 2.4, -2.1])\n\n def testConstantVectorMulF64(self):\n c = self._NewComputation()\n c.Mul(\n c.Constant(NumpyArrayF64([2.5, 3.3, -1.2, 0.7])),\n c.Constant(NumpyArrayF64([-1.2, 2, -2, -3])))\n self._ExecuteAndCompareClose(c, expected=[-3, 6.6, 2.4, -2.1])\n\n def testConstantVectorScalarDivF32(self):\n c = self._NewComputation()\n c.Div(\n c.Constant(NumpyArrayF32([1.5, 2.5, 3.0, -10.8])),\n c.ConstantF32Scalar(2.0))\n self._ExecuteAndCompareClose(c, expected=[0.75, 1.25, 1.5, -5.4])\n\n def testConstantVectorScalarDivF64(self):\n c = self._NewComputation()\n c.Div(\n c.Constant(NumpyArrayF64([1.5, 2.5, 3.0, -10.8])),\n c.ConstantF64Scalar(2.0))\n self._ExecuteAndCompareClose(c, expected=[0.75, 1.25, 1.5, -5.4])\n\n def testConstantVectorScalarPowF32(self):\n c = self._NewComputation()\n c.Pow(c.Constant(NumpyArrayF32([1.5, 2.5, 3.0])), c.ConstantF32Scalar(2.))\n self._ExecuteAndCompareClose(c, expected=[2.25, 6.25, 9.])\n\n def testConstantVectorScalarPowF64(self):\n c = self._NewComputation()\n c.Pow(c.Constant(NumpyArrayF64([1.5, 2.5, 3.0])), c.ConstantF64Scalar(2.))\n self._ExecuteAndCompareClose(c, expected=[2.25, 6.25, 9.])\n\n def testIota(self):\n c = self._NewComputation()\n c.Iota(np.float32, 10)\n self._ExecuteAndCompareExact(c, expected=np.arange(10, dtype=np.float32))\n\n def testBroadcastedIota(self):\n c = self._NewComputation()\n c.BroadcastedIota(np.int64, (2, 3), 1)\n expected = np.array([[0, 1, 2], [0, 1, 2]], dtype=np.int64)\n self._ExecuteAndCompareExact(c, expected=expected)\n\n def testBooleanAnd(self):\n c = self._NewComputation()\n c.And(\n c.Constant(NumpyArrayBool([True, False, True, False])),\n c.Constant(NumpyArrayBool([True, True, False, False])))\n self._ExecuteAndCompareExact(c, expected=[True, False, False, False])\n\n def testBooleanOr(self):\n c = self._NewComputation()\n c.Or(\n c.Constant(NumpyArrayBool([True, False, True, False])),\n c.Constant(NumpyArrayBool([True, True, False, False])))\n self._ExecuteAndCompareExact(c, expected=[True, True, True, False])\n\n def testBooleanXor(self):\n c = self._NewComputation()\n c.Xor(\n c.Constant(NumpyArrayBool([True, False, True, False])),\n c.Constant(NumpyArrayBool([True, True, False, False])))\n self._ExecuteAndCompareExact(c, expected=[False, True, True, False])\n\n def testSum2DF32(self):\n c = self._NewComputation()\n c.Add(\n c.Constant(NumpyArrayF32([[1, 2, 3], [4, 5, 6]])),\n c.Constant(NumpyArrayF32([[1, -1, 1], [-1, 1, -1]])))\n self._ExecuteAndCompareClose(c, expected=[[2, 1, 4], [3, 6, 5]])\n\n def testShiftLeft(self):\n c = self._NewComputation()\n c.ShiftLeft(c.Constant(NumpyArrayS32([3])), c.Constant(NumpyArrayS32([2])))\n self._ExecuteAndCompareClose(c, expected=[12])\n\n def testShiftRightArithmetic(self):\n c = self._NewComputation()\n c.ShiftRightArithmetic(\n c.Constant(NumpyArrayS32([-2])), c.Constant(NumpyArrayS32([1])))\n self._ExecuteAndCompareClose(c, expected=[-1])\n\n def testShiftRightLogical(self):\n c = self._NewComputation()\n c.ShiftRightLogical(\n c.Constant(NumpyArrayS32([-1])), c.Constant(NumpyArrayS32([1])))\n self._ExecuteAndCompareClose(c, expected=[2**31 - 1])\n\n def testSum2DF64(self):\n c = self._NewComputation()\n c.Add(\n c.Constant(NumpyArrayF64([[1, 2, 3], [4, 5, 6]])),\n c.Constant(NumpyArrayF64([[1, -1, 1], [-1, 1, -1]])))\n self._ExecuteAndCompareClose(c, expected=[[2, 1, 4], [3, 6, 5]])\n\n def testSum2DWith1DBroadcastDim0F32(self):\n # sum of a 2D array with a 1D array where the latter is replicated across\n # dimension 0 to match the former's shape.\n c = self._NewComputation()\n c.Add(\n c.Constant(NumpyArrayF32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),\n c.Constant(NumpyArrayF32([10, 20, 30])),\n broadcast_dimensions=(0,))\n self._ExecuteAndCompareClose(\n c, expected=[[11, 12, 13], [24, 25, 26], [37, 38, 39]])\n\n def testSum2DWith1DBroadcastDim0F64(self):\n # sum of a 2D array with a 1D array where the latter is replicated across\n # dimension 0 to match the former's shape.\n c = self._NewComputation()\n c.Add(\n c.Constant(NumpyArrayF64([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),\n c.Constant(NumpyArrayF64([10, 20, 30])),\n broadcast_dimensions=(0,))\n self._ExecuteAndCompareClose(\n c, expected=[[11, 12, 13], [24, 25, 26], [37, 38, 39]])\n\n def testSum2DWith1DBroadcastDim1F32(self):\n # sum of a 2D array with a 1D array where the latter is replicated across\n # dimension 1 to match the former's shape.\n c = self._NewComputation()\n c.Add(\n c.Constant(NumpyArrayF32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),\n c.Constant(NumpyArrayF32([10, 20, 30])),\n broadcast_dimensions=(1,))\n self._ExecuteAndCompareClose(\n c, expected=[[11, 22, 33], [14, 25, 36], [17, 28, 39]])\n\n def testSum2DWith1DBroadcastDim1F64(self):\n # sum of a 2D array with a 1D array where the latter is replicated across\n # dimension 1 to match the former's shape.\n c = self._NewComputation()\n c.Add(\n c.Constant(NumpyArrayF64([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),\n c.Constant(NumpyArrayF64([10, 20, 30])),\n broadcast_dimensions=(1,))\n self._ExecuteAndCompareClose(\n c, expected=[[11, 22, 33], [14, 25, 36], [17, 28, 39]])\n\n def testConstantAxpyF32(self):\n c = self._NewComputation()\n c.Add(\n c.Mul(\n c.ConstantF32Scalar(2),\n c.Constant(NumpyArrayF32([2.2, 3.3, 4.4, 5.5]))),\n c.Constant(NumpyArrayF32([100, -100, 200, -200])))\n self._ExecuteAndCompareClose(c, expected=[104.4, -93.4, 208.8, -189])\n\n def testConstantAxpyF64(self):\n c = self._NewComputation()\n c.Add(\n c.Mul(\n c.ConstantF64Scalar(2),\n c.Constant(NumpyArrayF64([2.2, 3.3, 4.4, 5.5]))),\n c.Constant(NumpyArrayF64([100, -100, 200, -200])))\n self._ExecuteAndCompareClose(c, expected=[104.4, -93.4, 208.8, -189])\n\n def testCustomCall(self):\n c = self._NewComputation()\n for name, fn in custom_call_for_test.cpu_custom_call_targets.items():\n xla_client.register_cpu_custom_call_target(name, fn)\n c.CustomCall(\n b\"test_subtract_f32\",\n operands=(c.ConstantF32Scalar(1.25), c.ConstantF32Scalar(0.5)),\n shape_with_layout=xla_client.Shape.array_shape(np.float32, (), ()),\n operand_shapes_with_layout=(\n xla_client.Shape.array_shape(np.float32, (), ()),\n xla_client.Shape.array_shape(np.float32, (), ()),\n ))\n self._ExecuteAndCompareClose(c, expected=0.75)\n\n\nclass ParametersTest(ComputationTest):\n \"\"\"Tests focusing on Parameter ops and argument-passing.\"\"\"\n\n def setUp(self):\n self.f32_scalar_2 = NumpyArrayF32(2.0)\n self.f32_4vector = NumpyArrayF32([-2.3, 3.3, -4.3, 5.3])\n self.f64_scalar_2 = NumpyArrayF64(2.0)\n self.f64_4vector = NumpyArrayF64([-2.3, 3.3, -4.3, 5.3])\n self.s32_scalar_3 = NumpyArrayS32(3)\n self.s32_4vector = NumpyArrayS32([10, 15, -2, 7])\n self.s64_scalar_3 = NumpyArrayS64(3)\n self.s64_4vector = NumpyArrayS64([10, 15, -2, 7])\n\n def testScalarTimesVectorAutonumberF32(self):\n c = self._NewComputation()\n p0 = c.ParameterFromNumpy(self.f32_scalar_2)\n p1 = c.ParameterFromNumpy(self.f32_4vector)\n c.Mul(p0, p1)\n self._ExecuteAndCompareClose(\n c,\n arguments=[self.f32_scalar_2, self.f32_4vector],\n expected=[-4.6, 6.6, -8.6, 10.6])\n\n def testScalarTimesVectorAutonumberF64(self):\n c = self._NewComputation()\n p0 = c.ParameterFromNumpy(self.f64_scalar_2)\n p1 = c.ParameterFromNumpy(self.f64_4vector)\n c.Mul(p0, p1)\n self._ExecuteAndCompareClose(\n c,\n arguments=[self.f64_scalar_2, self.f64_4vector],\n expected=[-4.6, 6.6, -8.6, 10.6])\n\n def testScalarTimesVectorS32(self):\n c = self._NewComputation()\n p0 = c.ParameterFromNumpy(self.s32_scalar_3)\n p1 = c.ParameterFromNumpy(self.s32_4vector)\n c.Mul(p0, p1)\n self._ExecuteAndCompareExact(\n c,\n arguments=[self.s32_scalar_3, self.s32_4vector],\n expected=[30, 45, -6, 21])\n\n def testScalarTimesVectorS64(self):\n c = self._NewComputation()\n p0 = c.ParameterFromNumpy(self.s64_scalar_3)\n p1 = c.ParameterFromNumpy(self.s64_4vector)\n c.Mul(p0, p1)\n self._ExecuteAndCompareExact(\n c,\n arguments=[self.s64_scalar_3, self.s64_4vector],\n expected=[30, 45, -6, 21])\n\n def testScalarMinusVectorExplicitNumberingF32(self):\n # Use explicit numbering and pass parameter_num first. Sub is used since\n # it's not commutative and can help catch parameter reversal within the\n # computation.\n c = self._NewComputation()\n p1 = c.ParameterFromNumpy(self.f32_4vector, parameter_num=1)\n p0 = c.ParameterFromNumpy(self.f32_scalar_2, parameter_num=0)\n c.Sub(p1, p0)\n self._ExecuteAndCompareClose(\n c,\n arguments=[self.f32_scalar_2, self.f32_4vector],\n expected=[-4.3, 1.3, -6.3, 3.3])\n\n def testScalarMinusVectorExplicitNumberingF64(self):\n # Use explicit numbering and pass parameter_num first. Sub is used since\n # it's not commutative and can help catch parameter reversal within the\n # computation.\n c = self._NewComputation()\n p1 = c.ParameterFromNumpy(self.f64_4vector, parameter_num=1)\n p0 = c.ParameterFromNumpy(self.f64_scalar_2, parameter_num=0)\n c.Sub(p1, p0)\n self._ExecuteAndCompareClose(\n c,\n arguments=[self.f64_scalar_2, self.f64_4vector],\n expected=[-4.3, 1.3, -6.3, 3.3])\n\n\nclass LocalBufferTest(ComputationTest):\n \"\"\"Tests focusing on execution with LocalBuffers.\"\"\"\n\n def _Execute(self, c, arguments):\n compiled_c = c.Build().Compile()\n arg_buffers = [xla_client.LocalBuffer.from_pyval(arg) for arg in arguments]\n result_buffer = compiled_c.Execute(arg_buffers)\n return result_buffer.to_py()\n\n def testConstantSum(self):\n c = self._NewComputation()\n c.Add(c.ConstantF32Scalar(1.11), c.ConstantF32Scalar(3.14))\n self._ExecuteAndCompareClose(c, expected=4.25)\n\n def testOneParameterSum(self):\n c = self._NewComputation()\n c.Add(c.ParameterFromNumpy(NumpyArrayF32(0.)), c.ConstantF32Scalar(3.14))\n self._ExecuteAndCompareClose(\n c, arguments=[NumpyArrayF32(1.11)], expected=4.25)\n\n def testTwoParameterSum(self):\n c = self._NewComputation()\n c.Add(\n c.ParameterFromNumpy(NumpyArrayF32(0.)),\n c.ParameterFromNumpy(NumpyArrayF32(0.)))\n self._ExecuteAndCompareClose(\n c, arguments=[NumpyArrayF32(1.11),\n NumpyArrayF32(3.14)], expected=4.25)\n\n def testCannotCallWithDeletedBuffers(self):\n c = self._NewComputation()\n c.Add(c.ParameterFromNumpy(NumpyArrayF32(0.)), c.ConstantF32Scalar(3.14))\n arg = NumpyArrayF32(1.11)\n compiled_c = c.Build().Compile()\n arg_buffer = xla_client.LocalBuffer.from_pyval(arg)\n arg_buffer.delete()\n with self.assertRaises(ValueError):\n compiled_c.Execute([arg_buffer])\n\n def testDestructureTupleEmpty(self):\n t = ()\n local_buffer = xla_client.LocalBuffer.from_pyval(t)\n pieces = local_buffer.destructure()\n self.assertTrue(local_buffer.is_deleted())\n self.assertEqual(len(pieces), 0)\n\n def testDestructureTupleOneArrayElement(self):\n t = (np.array([1, 2, 3, 4], dtype=np.int32),)\n local_buffer = xla_client.LocalBuffer.from_pyval(t)\n pieces = local_buffer.destructure()\n self.assertTrue(local_buffer.is_deleted())\n self.assertEqual(len(pieces), 1)\n array = pieces[0]\n got = array.to_py()\n want = NumpyArrayS32([1, 2, 3, 4])\n np.testing.assert_equal(want, got)\n\n def testDestructureTupleTwoArrayElementDifferentType(self):\n t = (np.array([1.0, 2.0, 3.0, 4.0],\n dtype=np.float32), np.array([2, 3, 4, 5], dtype=np.int32))\n local_buffer = xla_client.LocalBuffer.from_pyval(t)\n pieces = local_buffer.destructure()\n self.assertTrue(local_buffer.is_deleted())\n self.assertEqual(len(pieces), 2)\n array0, array1 = pieces\n got = array0.to_py()\n want = NumpyArrayF32([1.0, 2.0, 3.0, 4.0])\n np.testing.assert_equal(want, got)\n got = array1.to_py()\n want = NumpyArrayS32([2, 3, 4, 5])\n np.testing.assert_equal(want, got)\n\n def testDestructureTupleNested(self):\n t = ((NumpyArrayF32([1.0, 2.0]), NumpyArrayS32([3, 4])), NumpyArrayS32([5]))\n local_buffer = xla_client.LocalBuffer.from_pyval(t)\n pieces = local_buffer.destructure()\n self.assertTrue(local_buffer.is_deleted())\n self.assertEqual(len(pieces), 2)\n tuple0, array1 = pieces\n got = array1.to_py()\n want = NumpyArrayS32([5])\n np.testing.assert_equal(want, got)\n got = tuple0.to_py()\n self.assertEqual(type(got), tuple)\n self.assertEqual(len(got), 2)\n np.testing.assert_equal(NumpyArrayF32([1.0, 2.0]), got[0])\n np.testing.assert_equal(NumpyArrayS32([3, 4]), got[1])\n\n def testShape(self):\n pyval = np.array([[1., 2.]], np.float32)\n local_buffer = xla_client.LocalBuffer.from_pyval(pyval)\n xla_shape = local_buffer.shape()\n self.assertEqual(xla_shape.dimensions(), (1, 2))\n self.assertEqual(np.dtype(xla_shape.element_type()), np.dtype(np.float32))\n\n\nclass SingleOpTest(ComputationTest):\n \"\"\"Tests for single ops.\n\n The goal here is smoke testing - to exercise the most basic functionality of\n single XLA ops. As minimal as possible number of additional ops are added\n around the op being tested.\n \"\"\"\n\n def testConcatenateF32(self):\n c = self._NewComputation()\n args = (\n c.Constant(NumpyArrayF32([1.0, 2.0, 3.0])),\n c.Constant(NumpyArrayF32([4.0, 5.0, 6.0])),\n )\n c.Concatenate(args, dimension=0)\n self._ExecuteAndCompareClose(c, expected=[1.0, 2.0, 3.0, 4.0, 5.0, 6.0])\n\n def testConcatenateF64(self):\n c = self._NewComputation()\n args = (\n c.Constant(NumpyArrayF64([1.0, 2.0, 3.0])),\n c.Constant(NumpyArrayF64([4.0, 5.0, 6.0])),\n )\n c.Concatenate(args, dimension=0)\n self._ExecuteAndCompareClose(c, expected=[1.0, 2.0, 3.0, 4.0, 5.0, 6.0])\n\n def testConvertElementType(self):\n xla_types = {\n np.bool: xla_client.PrimitiveType.PRED,\n np.int32: xla_client.PrimitiveType.S32,\n np.int64: xla_client.PrimitiveType.S64,\n np.float32: xla_client.PrimitiveType.F32,\n np.float64: xla_client.PrimitiveType.F64,\n }\n\n def _ConvertAndTest(template, src_dtype, dst_dtype):\n c = self._NewComputation()\n x = c.Constant(np.array(template, dtype=src_dtype))\n c.ConvertElementType(x, xla_types[dst_dtype])\n\n result = c.Build().Compile().ExecuteWithPythonValues()\n expected = np.array(template, dtype=dst_dtype)\n\n self.assertEqual(result.shape, expected.shape)\n self.assertEqual(result.dtype, expected.dtype)\n np.testing.assert_equal(result, expected)\n\n x = [0, 1, 0, 0, 1]\n for src_dtype, dst_dtype in itertools.product(xla_types, xla_types):\n _ConvertAndTest(x, src_dtype, dst_dtype)\n\n def testBitcastConvertType(self):\n xla_x32_types = {\n np.int32: xla_client.PrimitiveType.S32,\n np.float32: xla_client.PrimitiveType.F32,\n }\n\n xla_x64_types = {\n np.int64: xla_client.PrimitiveType.S64,\n np.float64: xla_client.PrimitiveType.F64,\n }\n\n def _ConvertAndTest(template, src_dtype, dst_dtype, dst_etype):\n c = self._NewComputation()\n x = c.Constant(np.array(template, dtype=src_dtype))\n c.BitcastConvertType(x, dst_etype)\n\n result = c.Build().Compile().ExecuteWithPythonValues()\n expected = np.array(template, src_dtype).view(dst_dtype)\n\n self.assertEqual(result.shape, expected.shape)\n self.assertEqual(result.dtype, expected.dtype)\n np.testing.assert_equal(result, expected)\n\n x = [0, 1, 0, 0, 1]\n for xla_types in [xla_x32_types, xla_x64_types]:\n for src_dtype, dst_dtype in itertools.product(xla_types, xla_types):\n _ConvertAndTest(x, src_dtype, dst_dtype, xla_types[dst_dtype])\n\n # TODO(b/123523486) implement AllToAll on CPU\n def DISABLED_testAllToAllOneReplica(self):\n samples = [\n NumpyArrayF32([97.0]),\n NumpyArrayF32([64.0, 117.0]),\n NumpyArrayF32([[2.0, 3.0], [4.0, 5.0]]),\n ]\n for lhs in samples[:1]:\n c = self._NewComputation()\n c.AllToAll(c.Constant(lhs), 0, 0)\n self._ExecuteAndCompareExact(c, expected=lhs)\n\n def testCrossReplicaSumOneReplica(self):\n samples = [\n NumpyArrayF32(42.0),\n NumpyArrayF32([97.0]),\n NumpyArrayF32([64.0, 117.0]),\n NumpyArrayF32([[2.0, 3.0], [4.0, 5.0]]),\n ]\n for lhs in samples:\n c = self._NewComputation()\n c.CrossReplicaSum(c.Constant(lhs))\n self._ExecuteAndCompareExact(c, expected=lhs)\n\n def testReplicaId(self):\n c = self._NewComputation()\n _ = c.ReplicaId()\n self._ExecuteAndCompareExact(c, expected=0)\n\n def testCrossReplicaSumOneReplicaWithSingletonGroup(self):\n samples = [\n NumpyArrayF32(42.0),\n NumpyArrayF32([97.0]),\n NumpyArrayF32([64.0, 117.0]),\n NumpyArrayF32([[2.0, 3.0], [4.0, 5.0]]),\n ]\n for lhs in samples:\n c = self._NewComputation()\n c.CrossReplicaSum(c.Constant(lhs), [[0]])\n self._ExecuteAndCompareExact(c, expected=lhs)\n\n def testDotMatrixVectorF32(self):\n c = self._NewComputation()\n lhs = NumpyArrayF32([[2.0, 3.0], [4.0, 5.0]])\n rhs = NumpyArrayF32([[10.0], [20.0]])\n c.Dot(c.Constant(lhs), c.Constant(rhs))\n self._ExecuteAndCompareClose(c, expected=np.dot(lhs, rhs))\n\n def testDotMatrixVectorF64(self):\n c = self._NewComputation()\n lhs = NumpyArrayF64([[2.0, 3.0], [4.0, 5.0]])\n rhs = NumpyArrayF64([[10.0], [20.0]])\n c.Dot(c.Constant(lhs), c.Constant(rhs))\n self._ExecuteAndCompareClose(c, expected=np.dot(lhs, rhs))\n\n def testDotMatrixMatrixF32(self):\n c = self._NewComputation()\n lhs = NumpyArrayF32([[2.0, 3.0], [4.0, 5.0]])\n rhs = NumpyArrayF32([[10.0, 20.0], [100.0, 200.0]])\n c.Dot(c.Constant(lhs), c.Constant(rhs))\n self._ExecuteAndCompareClose(c, expected=np.dot(lhs, rhs))\n\n def testDotMatrixMatrixF64(self):\n c = self._NewComputation()\n lhs = NumpyArrayF64([[2.0, 3.0], [4.0, 5.0]])\n rhs = NumpyArrayF64([[10.0, 20.0], [100.0, 200.0]])\n c.Dot(c.Constant(lhs), c.Constant(rhs))\n self._ExecuteAndCompareClose(c, expected=np.dot(lhs, rhs))\n\n def testDotGeneral(self):\n c = self._NewComputation()\n rng = np.random.RandomState(0)\n lhs = NumpyArrayF32(rng.randn(10, 3, 4))\n rhs = NumpyArrayF32(rng.randn(10, 4, 5))\n dimension_numbers = (([2], [1]), ([0], [0]))\n c.DotGeneral(c.Constant(lhs), c.Constant(rhs), dimension_numbers)\n self._ExecuteAndCompareClose(c, expected=np.matmul(lhs, rhs))\n\n def testDotGeneralWithDotDimensionNumbersProto(self):\n c = self._NewComputation()\n rng = np.random.RandomState(0)\n lhs = NumpyArrayF32(rng.randn(10, 3, 4))\n rhs = NumpyArrayF32(rng.randn(10, 4, 5))\n\n dimension_numbers = xla_client.DotDimensionNumbers()\n dimension_numbers.lhs_contracting_dimensions.append(2)\n dimension_numbers.rhs_contracting_dimensions.append(1)\n dimension_numbers.lhs_batch_dimensions.append(0)\n dimension_numbers.rhs_batch_dimensions.append(0)\n\n c.DotGeneral(c.Constant(lhs), c.Constant(rhs), dimension_numbers)\n self._ExecuteAndCompareClose(c, expected=np.matmul(lhs, rhs))\n\n def testConvF32Same(self):\n c = self._NewComputation()\n a = lambda *dims: np.arange(np.prod(dims)).reshape(dims).astype(\"float32\")\n lhs = a(1, 2, 3, 4)\n rhs = a(1, 2, 1, 2) * 10\n c.Conv(\n c.Constant(lhs), c.Constant(rhs), [1, 1], xla_client.PaddingType.SAME)\n result = np.array([[[\n [640., 700., 760., 300.],\n [880., 940., 1000., 380.],\n [1120., 1180., 1240., 460.],\n ]]])\n self._ExecuteAndCompareClose(c, expected=result)\n\n def testConvF32Valid(self):\n c = self._NewComputation()\n a = lambda *dims: np.arange(np.prod(dims)).reshape(dims).astype(\"float32\")\n lhs = a(1, 2, 3, 4)\n rhs = a(1, 2, 1, 2) * 10\n c.Conv(\n c.Constant(lhs), c.Constant(rhs), [2, 1], xla_client.PaddingType.VALID)\n result = np.array([[[\n [640., 700., 760.],\n [1120., 1180., 1240.],\n ]]])\n self._ExecuteAndCompareClose(c, expected=result)\n\n def testConvWithGeneralPaddingF32(self):\n c = self._NewComputation()\n a = lambda *dims: np.arange(np.prod(dims)).reshape(dims).astype(\"float32\")\n lhs = a(1, 1, 2, 3)\n rhs = a(1, 1, 1, 2) * 10\n strides = [1, 1]\n pads = [(1, 0), (0, 1)]\n lhs_dilation = (2, 1)\n rhs_dilation = (1, 1)\n c.ConvWithGeneralPadding(\n c.Constant(lhs), c.Constant(rhs), strides, pads, lhs_dilation,\n rhs_dilation)\n result = np.array([[[\n [0., 0., 0.],\n [10., 20., 0.],\n [0., 0., 0.],\n [40., 50., 0.],\n ]]])\n self._ExecuteAndCompareClose(c, expected=result)\n\n def testConvGeneralDilatedF32(self):\n c = self._NewComputation()\n a = lambda *dims: np.arange(np.prod(dims)).reshape(dims).astype(\"float32\")\n lhs = a(1, 1, 2, 3)\n rhs = a(1, 1, 1, 2) * 10\n strides = [1, 1]\n pads = [(1, 0), (0, 1)]\n lhs_dilation = (2, 1)\n rhs_dilation = (1, 1)\n dimension_numbers = (\"NCHW\", \"OIHW\", \"NCHW\")\n c.ConvGeneralDilated(\n c.Constant(lhs), c.Constant(rhs), strides, pads, lhs_dilation,\n rhs_dilation, dimension_numbers)\n result = np.array([[[\n [0., 0., 0.],\n [10., 20., 0.],\n [0., 0., 0.],\n [40., 50., 0.],\n ]]])\n self._ExecuteAndCompareClose(c, expected=result)\n\n def testConvGeneralDilatedPermutedF32(self):\n c = self._NewComputation()\n a = lambda *dims: np.arange(np.prod(dims)).reshape(dims).astype(\"float32\")\n lhs = a(1, 1, 2, 3)\n rhs = a(1, 1, 1, 2) * 10\n strides = [1, 1]\n pads = [(1, 0), (0, 1)]\n lhs_dilation = (2, 1)\n rhs_dilation = (1, 1)\n\n dimension_numbers = (\"NHWC\", \"OIHW\", \"CWNH\")\n c.ConvGeneralDilated(\n c.Constant(np.transpose(lhs, (0, 2, 3, 1))), c.Constant(rhs), strides,\n pads, lhs_dilation, rhs_dilation, dimension_numbers)\n result = np.array([[[[0., 0., 0.], [10., 20., 0.], [0., 0., 0.],\n [40., 50., 0.]]]])\n self._ExecuteAndCompareClose(c, expected=np.transpose(result, (1, 3, 0, 2)))\n\n def testConvGeneralDilatedGroupedConvolutionF32(self):\n c = self._NewComputation()\n a = lambda *dims: np.arange(np.prod(dims)).reshape(dims).astype(\"float32\")\n lhs = a(1, 2, 2, 3)\n rhs = a(2, 1, 1, 2) * 10\n strides = [1, 1]\n pads = [(1, 0), (0, 1)]\n lhs_dilation = (2, 1)\n rhs_dilation = (1, 1)\n dimension_numbers = (\"NCHW\", \"OIHW\", \"NCHW\")\n feature_group_count = 2\n c.ConvGeneralDilated(\n c.Constant(lhs), c.Constant(rhs), strides, pads, lhs_dilation,\n rhs_dilation, dimension_numbers, feature_group_count)\n result = np.array([[[\n [0., 0., 0.],\n [10., 20., 0.],\n [0., 0., 0.],\n [40., 50., 0.],\n ], [\n [0., 0., 0.],\n [330., 380., 160.],\n [0., 0., 0.],\n [480., 530., 220.],\n ]]])\n self._ExecuteAndCompareClose(c, expected=result)\n\n def testBooleanNot(self):\n c = self._NewComputation()\n arr = NumpyArrayBool([True, False, True])\n c.Not(c.Constant(arr))\n self._ExecuteAndCompareClose(c, expected=~arr)\n\n def testCountLeadingZeros(self):\n c = self._NewComputation()\n arr = NumpyArrayS32([0x7FFF, 0x12345678])\n c.Clz(c.Constant(arr))\n self._ExecuteAndCompareClose(c, expected=[17, 3])\n\n def testExp(self):\n c = self._NewComputation()\n arr = NumpyArrayF32([3.3, 12.1])\n c.Exp(c.Constant(arr))\n self._ExecuteAndCompareClose(c, expected=np.exp(arr))\n\n def testExpm1(self):\n c = self._NewComputation()\n arr = NumpyArrayF32([3.3, 12.1])\n c.Expm1(c.Constant(arr))\n self._ExecuteAndCompareClose(c, expected=np.expm1(arr))\n\n def testRound(self):\n c = self._NewComputation()\n arr = NumpyArrayF32([3.3, 12.1])\n c.Round(c.Constant(arr))\n self._ExecuteAndCompareClose(c, expected=np.round(arr))\n\n def testLog(self):\n c = self._NewComputation()\n arr = NumpyArrayF32([3.3, 12.1])\n c.Log(c.Constant(arr))\n self._ExecuteAndCompareClose(c, expected=np.log(arr))\n\n def testLog1p(self):\n c = self._NewComputation()\n arr = NumpyArrayF32([3.3, 12.1])\n c.Log1p(c.Constant(arr))\n self._ExecuteAndCompareClose(c, expected=np.log1p(arr))\n\n def testNeg(self):\n c = self._NewComputation()\n arr = NumpyArrayF32([3.3, 12.1])\n c.Neg(c.Constant(arr))\n self._ExecuteAndCompareClose(c, expected=-arr)\n\n def testFloor(self):\n c = self._NewComputation()\n arr = NumpyArrayF32([3.3, 12.1])\n c.Floor(c.Constant(arr))\n self._ExecuteAndCompareClose(c, expected=np.floor(arr))\n\n def testCeil(self):\n c = self._NewComputation()\n arr = NumpyArrayF32([3.3, 12.1])\n c.Ceil(c.Constant(arr))\n self._ExecuteAndCompareClose(c, expected=np.ceil(arr))\n\n def testAbs(self):\n c = self._NewComputation()\n arr = NumpyArrayF32([3.3, -12.1, 2.4, -1.])\n c.Abs(c.Constant(arr))\n self._ExecuteAndCompareClose(c, expected=np.abs(arr))\n\n def testTanh(self):\n c = self._NewComputation()\n arr = NumpyArrayF32([3.3, 12.1])\n c.Tanh(c.Constant(arr))\n self._ExecuteAndCompareClose(c, expected=np.tanh(arr))\n\n def testTrans(self):\n\n def _TransposeAndTest(array):\n c = self._NewComputation()\n c.Trans(c.Constant(array))\n self._ExecuteAndCompareClose(c, expected=array.T)\n\n # Test square and non-square matrices in both default (C) and F orders.\n for array_fun in [NumpyArrayF32, NumpyArrayF64]:\n _TransposeAndTest(array_fun([[1, 2, 3], [4, 5, 6]]))\n _TransposeAndTest(array_fun([[1, 2, 3], [4, 5, 6]], order=\"F\"))\n _TransposeAndTest(array_fun([[1, 2], [4, 5]]))\n _TransposeAndTest(array_fun([[1, 2], [4, 5]], order=\"F\"))\n\n def testTranspose(self):\n\n def _TransposeAndTest(array, permutation):\n c = self._NewComputation()\n c.Transpose(c.Constant(array), permutation)\n expected = np.transpose(array, permutation)\n self._ExecuteAndCompareClose(c, expected=expected)\n\n _TransposeAndTest(NumpyArrayF32([[1, 2, 3], [4, 5, 6]]), [0, 1])\n _TransposeAndTest(NumpyArrayF32([[1, 2, 3], [4, 5, 6]]), [1, 0])\n _TransposeAndTest(NumpyArrayF32([[1, 2], [4, 5]]), [0, 1])\n _TransposeAndTest(NumpyArrayF32([[1, 2], [4, 5]]), [1, 0])\n\n arr = np.random.RandomState(0).randn(2, 3, 4).astype(np.float32)\n for permutation in itertools.permutations(range(arr.ndim)):\n _TransposeAndTest(arr, permutation)\n _TransposeAndTest(np.asfortranarray(arr), permutation)\n\n def testEq(self):\n c = self._NewComputation()\n c.Eq(\n c.Constant(NumpyArrayS32([1, 2, 3, 4])),\n c.Constant(NumpyArrayS32([4, 2, 3, 1])))\n self._ExecuteAndCompareExact(c, expected=[False, True, True, False])\n\n def testNe(self):\n c = self._NewComputation()\n c.Ne(\n c.Constant(NumpyArrayS32([1, 2, 3, 4])),\n c.Constant(NumpyArrayS32([4, 2, 3, 1])))\n self._ExecuteAndCompareExact(c, expected=[True, False, False, True])\n\n c.Ne(\n c.Constant(NumpyArrayF32([-2.0, 0.0,\n float(\"nan\"),\n float(\"nan\")])),\n c.Constant(NumpyArrayF32([2.0, -0.0, 1.0, float(\"nan\")])))\n self._ExecuteAndAssertWith(\n np.testing.assert_allclose, c, (), expected=[True, False, True, True])\n\n def testGt(self):\n c = self._NewComputation()\n c.Gt(\n c.Constant(NumpyArrayS32([1, 2, 3, 4, 9])),\n c.Constant(NumpyArrayS32([1, 0, 2, 7, 12])))\n self._ExecuteAndCompareExact(c, expected=[False, True, True, False, False])\n\n def testGe(self):\n c = self._NewComputation()\n c.Ge(\n c.Constant(NumpyArrayS32([1, 2, 3, 4, 9])),\n c.Constant(NumpyArrayS32([1, 0, 2, 7, 12])))\n self._ExecuteAndCompareExact(c, expected=[True, True, True, False, False])\n\n def testLt(self):\n c = self._NewComputation()\n c.Lt(\n c.Constant(NumpyArrayS32([1, 2, 3, 4, 9])),\n c.Constant(NumpyArrayS32([1, 0, 2, 7, 12])))\n self._ExecuteAndCompareExact(c, expected=[False, False, False, True, True])\n\n def testLe(self):\n c = self._NewComputation()\n c.Le(\n c.Constant(NumpyArrayS32([1, 2, 3, 4, 9])),\n c.Constant(NumpyArrayS32([1, 0, 2, 7, 12])))\n self._ExecuteAndCompareExact(c, expected=[True, False, False, True, True])\n\n def testMax(self):\n c = self._NewComputation()\n c.Max(\n c.Constant(NumpyArrayF32([1.0, 2.0, 3.0, 4.0, 9.0])),\n c.Constant(NumpyArrayF32([1.0, 0.0, 2.0, 7.0, 12.0])))\n self._ExecuteAndCompareExact(c, expected=[1.0, 2.0, 3.0, 7.0, 12.0])\n\n def testMaxExplicitBroadcastDim0(self):\n c = self._NewComputation()\n c.Max(\n c.Constant(NumpyArrayF32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),\n c.Constant(NumpyArrayF32([3, 4, 5])),\n broadcast_dimensions=(0,))\n self._ExecuteAndCompareExact(c, expected=[[3, 3, 3], [4, 5, 6], [7, 8, 9]])\n\n def testMaxExplicitBroadcastDim1(self):\n c = self._NewComputation()\n c.Max(\n c.Constant(NumpyArrayF32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),\n c.Constant(NumpyArrayF32([3, 4, 5])),\n broadcast_dimensions=(1,))\n self._ExecuteAndCompareExact(c, expected=[[3, 4, 5], [4, 5, 6], [7, 8, 9]])\n\n def testMin(self):\n c = self._NewComputation()\n c.Min(\n c.Constant(NumpyArrayF32([1.0, 2.0, 3.0, 4.0, 9.0])),\n c.Constant(NumpyArrayF32([1.0, 0.0, 2.0, 7.0, 12.0])))\n self._ExecuteAndCompareExact(c, expected=[1.0, 0.0, 2.0, 4.0, 9.0])\n\n def testPad(self):\n c = self._NewComputation()\n c.Pad(\n c.Constant(NumpyArrayF32([[1.0, 2.0], [3.0, 4.0]])),\n c.Constant(NumpyArrayF32(0.0)), [(1, 2, 1), (0, 1, 0)])\n self._ExecuteAndCompareClose(\n c,\n expected=[[0.0, 0.0, 0.0], [1.0, 2.0, 0.0], [0.0, 0.0, 0.0],\n [3.0, 4.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]])\n\n def testPadWithPaddingConfig(self):\n c = self._NewComputation()\n padding_config = xla_client.PaddingConfig()\n for lo, hi, interior in [(1, 2, 1), (0, 1, 0)]:\n dimension = xla_client.PaddingConfigDimension()\n dimension.edge_padding_low = lo\n dimension.edge_padding_high = hi\n dimension.interior_padding = interior\n padding_config.dimensions.append(dimension)\n c.Pad(\n c.Constant(NumpyArrayF32([[1.0, 2.0], [3.0, 4.0]])),\n c.Constant(NumpyArrayF32(0.0)), padding_config)\n self._ExecuteAndCompareClose(\n c,\n expected=[[0.0, 0.0, 0.0], [1.0, 2.0, 0.0], [0.0, 0.0, 0.0],\n [3.0, 4.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]])\n\n def testReshape(self):\n c = self._NewComputation()\n c.Reshape(\n c.Constant(NumpyArrayS32([[1, 2], [3, 4], [5, 6]])),\n dimensions=[0, 1],\n new_sizes=[2, 3])\n self._ExecuteAndCompareExact(c, expected=[[1, 2, 3], [4, 5, 6]])\n\n def testCollapse(self):\n c = self._NewComputation()\n c.Collapse(\n c.Constant(NumpyArrayS32([[[1, 2], [3, 4]], [[5, 6], [7, 8]]])),\n dimensions=[1, 2])\n self._ExecuteAndCompareExact(c, expected=[[1, 2, 3, 4], [5, 6, 7, 8]])\n\n def testRev(self):\n c = self._NewComputation()\n c.Rev(\n c.Constant(NumpyArrayS32([[[1, 2], [3, 4]], [[5, 6], [7, 8]]])),\n dimensions=[0, 2])\n self._ExecuteAndCompareExact(\n c, expected=[[[6, 5], [8, 7]], [[2, 1], [4, 3]]])\n\n def testClampF32(self):\n c = self._NewComputation()\n c.Clamp(\n c.Constant(NumpyArrayF32(-1)),\n c.Constant(NumpyArrayF32([-2, -1, 0, 1, 2, 3])),\n c.Constant(NumpyArrayF32(2)))\n self._ExecuteAndCompareExact(c, expected=[-1, -1, 0, 1, 2, 2])\n\n def testClampS32(self):\n c = self._NewComputation()\n c.Clamp(\n c.Constant(NumpyArrayS32(-1)),\n c.Constant(NumpyArrayS32([-2, -1, 0, 1, 2, 3])),\n c.Constant(NumpyArrayS32(2)))\n self._ExecuteAndCompareExact(c, expected=[-1, -1, 0, 1, 2, 2])\n\n def testSelect(self):\n c = self._NewComputation()\n c.Select(\n c.Constant(NumpyArrayBool([True, False, False, True, False])),\n c.Constant(NumpyArrayS32([1, 2, 3, 4, 5])),\n c.Constant(NumpyArrayS32([-1, -2, -3, -4, -5])))\n self._ExecuteAndCompareExact(c, expected=[1, -2, -3, 4, -5])\n\n def testSlice(self):\n c = self._NewComputation()\n c.Slice(\n c.Constant(NumpyArrayS32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])), [1, 0],\n [3, 2])\n self._ExecuteAndCompareExact(c, expected=[[4, 5], [7, 8]])\n\n def testSliceInDim(self):\n c = self._NewComputation()\n c.SliceInDim(\n c.Constant(NumpyArrayS32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),\n start_index=1,\n limit_index=2,\n stride=1,\n dimno=1)\n self._ExecuteAndCompareExact(c, expected=[[2], [5], [8]])\n c.SliceInDim(\n c.Constant(NumpyArrayS32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),\n start_index=0,\n limit_index=3,\n stride=2,\n dimno=0)\n self._ExecuteAndCompareExact(c, expected=[[1, 2, 3], [7, 8, 9]])\n\n def testDynamicSlice(self):\n c = self._NewComputation()\n c.DynamicSlice(\n c.Constant(NumpyArrayS32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),\n c.Constant(NumpyArrayS32([1, 0])), [2, 2])\n self._ExecuteAndCompareExact(c, expected=[[4, 5], [7, 8]])\n\n def testDynamicUpdateSlice(self):\n c = self._NewComputation()\n c.DynamicUpdateSlice(\n c.Constant(NumpyArrayS32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),\n c.Constant(NumpyArrayS32([[1, 2], [3, 4]])),\n c.Constant(NumpyArrayS32([1, 1])))\n self._ExecuteAndCompareExact(c, expected=[[1, 2, 3], [4, 1, 2], [7, 3, 4]])\n\n def testTuple(self):\n c = self._NewComputation()\n c.Tuple(\n c.ConstantS32Scalar(42), c.Constant(NumpyArrayF32([1.0, 2.0])),\n c.Constant(NumpyArrayBool([True, False, False, True])))\n result = c.Build().Compile().ExecuteWithPythonValues()\n self.assertIsInstance(result, tuple)\n np.testing.assert_equal(result[0], 42)\n np.testing.assert_allclose(result[1], [1.0, 2.0])\n np.testing.assert_equal(result[2], [True, False, False, True])\n\n def testGetTupleElement(self):\n c = self._NewComputation()\n c.GetTupleElement(\n c.Tuple(\n c.ConstantS32Scalar(42), c.Constant(NumpyArrayF32([1.0, 2.0])),\n c.Constant(NumpyArrayBool([True, False, False, True]))), 1)\n self._ExecuteAndCompareClose(c, expected=[1.0, 2.0])\n\n def testBroadcast(self):\n c = self._NewComputation()\n c.Broadcast(c.Constant(NumpyArrayS32([10, 20, 30, 40])), sizes=(3,))\n self._ExecuteAndCompareExact(\n c, expected=[[10, 20, 30, 40], [10, 20, 30, 40], [10, 20, 30, 40]])\n\n def testBroadcastInDim(self):\n c = self._NewComputation()\n c.BroadcastInDim(c.Constant(NumpyArrayS32([1, 2])), [2, 2], [0])\n self._ExecuteAndCompareExact(c, expected=[[1, 1], [2, 2]])\n c.BroadcastInDim(c.Constant(NumpyArrayS32([1, 2])), [2, 2], [1])\n self._ExecuteAndCompareExact(c, expected=[[1, 2], [1, 2]])\n\n def testRngNormal(self):\n shape = (2, 3)\n c = self._NewComputation()\n c.RngNormal(\n c.Constant(NumpyArrayF32(0.)),\n c.Constant(NumpyArrayF32(1.)),\n dims=shape)\n result = c.Build().Compile().ExecuteWithPythonValues()\n # since the result is random, we just check shape and uniqueness\n self.assertEqual(result.shape, shape)\n self.assertEqual(len(np.unique(result)), np.prod(shape))\n\n def testRngUniformF32(self):\n lo, hi = 2., 4.\n shape = (2, 3)\n c = self._NewComputation()\n c.RngUniform(\n c.Constant(NumpyArrayF32(lo)),\n c.Constant(NumpyArrayF32(hi)),\n dims=shape)\n result = c.Build().Compile().ExecuteWithPythonValues()\n # since the result is random, we just check shape, uniqueness, and range\n self.assertEqual(result.shape, shape)\n self.assertEqual(len(np.unique(result)), np.prod(shape))\n self.assertTrue(np.all(lo <= result))\n self.assertTrue(np.all(result < hi))\n\n def testRngUniformS32(self):\n lo, hi = 2, 4\n shape = (2, 3)\n c = self._NewComputation()\n c.RngUniform(\n c.Constant(NumpyArrayS32(lo)),\n c.Constant(NumpyArrayS32(hi)),\n dims=shape)\n result = c.Build().Compile().ExecuteWithPythonValues()\n # since the result is random, we just check shape, integrality, and range\n self.assertEqual(result.shape, shape)\n self.assertEqual(result.dtype, np.int32)\n self.assertTrue(np.all(lo <= result))\n self.assertTrue(np.all(result < hi))\n\n def testCholesky(self):\n l = np.array([[4, 0, 0, 0], [6, 5, 0, 0], [2, 14, 16, 0], [3, 6, 1, 4]],\n dtype=np.float32)\n c = self._NewComputation()\n c.Cholesky(c.Constant(np.dot(l, l.T)))\n self._ExecuteAndCompareClose(c, expected=l, rtol=1e-4)\n\n def testQR(self):\n a = np.array(\n [[4, 6, 8, 10], [6, 45, 54, 63], [8, 54, 146, 166], [10, 63, 166, 310]],\n dtype=np.float32)\n c = self._NewComputation()\n c.QR(c.Constant(a), full_matrices=True)\n q, r = self._Execute(c, ())\n np.testing.assert_allclose(np.dot(q, r), a, rtol=1e-4)\n\n def testEigh(self):\n a = np.array(\n [[4, 6, 8, 10], [6, 45, 54, 63], [8, 54, 146, 166], [10, 63, 166, 310]],\n dtype=np.float32)\n a = (a + a.T) / 2\n\n c = self._NewComputation()\n c.Eigh(c.Constant(a), full_matrices=True)\n # TODO(b/129396575): Turn this test back on when it passes without fastmath.\n # v, w = self._Execute(c, ())\n # self.assertLess(np.linalg.norm(np.dot(a, v) - w * v), 1e-3)\n\n def testSVD(self):\n a = np.array(\n [[4, 6, 8, 10], [6, 45, 54, 63], [8, 54, 146, 166], [10, 63, 166, 310]],\n dtype=np.float32)\n c = self._NewComputation()\n c.SVD(c.Constant(a))\n u, d, v = self._Execute(c, ())\n self.assertLess(np.linalg.norm(a - np.matmul(u * d, v.T)), 1e-3)\n\n def testTriangularSolve(self):\n a_vals = np.array(\n [[2, 0, 0, 0], [3, 6, 0, 0], [4, 7, 9, 0], [5, 8, 10, 11]],\n dtype=np.float32)\n b_vals = np.array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]],\n dtype=np.float32)\n\n c = self._NewComputation()\n c.TriangularSolve(\n c.Constant(a_vals),\n c.Constant(b_vals),\n left_side=False,\n lower=True,\n transpose_a=True)\n self._ExecuteAndCompareClose(\n c,\n expected=np.array([\n [0.5, 0.08333334, 0.04629629, 0.03367003],\n [2.5, -0.25, -0.1388889, -0.1010101],\n [4.5, -0.58333331, -0.32407406, -0.23569024],\n ],\n dtype=np.float32),\n rtol=1e-4)\n\n def testIsConstant(self):\n c = self._NewComputation()\n a = c.ConstantS32Scalar(3)\n b = c.ConstantS32Scalar(1)\n x = c.ParameterFromNumpy(NumpyArrayS32(0))\n const_expr = c.Sub(b, a)\n non_const_expr = c.Mul(const_expr, x)\n self.assertTrue(c.IsConstant(const_expr))\n self.assertFalse(c.IsConstant(non_const_expr))\n # self.assertTrue(c.IsConstant(c.Sub(c.Add(x, a), x))) # TODO(b/77245564)\n\n def testGather(self):\n a = np.arange(9).astype(np.int32).reshape((3, 3))\n indices = np.array([[[0, 2], [2, 1]], [[1, 2], [2, 0]]], dtype=np.int32)\n dnums = xla_client.GatherDimensionNumbers()\n dnums.offset_dims.append(1)\n dnums.offset_dims.append(2)\n dnums.start_index_map.append(0)\n dnums.start_index_map.append(1)\n dnums.index_vector_dim = 2\n c = self._NewComputation()\n c.Gather(c.Constant(a), c.Constant(indices), dnums, slice_sizes=[1, 1])\n g = self._Execute(c, ())\n expected = np.array([[[[2, 7]]], [[[5, 6]]]], dtype=np.int32)\n np.testing.assert_allclose(g, expected, rtol=1e-4)\n\n\nclass EmbeddedComputationsTest(ComputationTest):\n \"\"\"Tests for XLA graphs with embedded computations (such as maps).\"\"\"\n\n def _CreateConstantS32Computation(self):\n \"\"\"Computation (f32) -> s32 that returns a constant 1 for any input.\"\"\"\n c = self._NewComputation(\"constant_s32_one\")\n # TODO(eliben): consider adding a nicer way to create new parameters without\n # having to create dummy Numpy arrays or populating Shape messages. Perhaps\n # we need our own (Python-client-own) way to represent Shapes conveniently.\n c.ParameterFromNumpy(NumpyArrayF32(0))\n c.ConstantS32Scalar(1)\n return c.Build()\n\n def _CreateConstantS64Computation(self):\n \"\"\"Computation (f64) -> s64 that returns a constant 1 for any input.\"\"\"\n c = self._NewComputation(\"constant_s64_one\")\n # TODO(eliben): consider adding a nicer way to create new parameters without\n # having to create dummy Numpy arrays or populating Shape messages. Perhaps\n # we need our own (Python-client-own) way to represent Shapes conveniently.\n c.ParameterFromNumpy(NumpyArrayF64(0))\n c.ConstantS64Scalar(1)\n return c.Build()\n\n def _CreateConstantF32Computation(self):\n \"\"\"Computation (f32) -> f32 that returns a constant 1.0 for any input.\"\"\"\n c = self._NewComputation(\"constant_f32_one\")\n c.ParameterFromNumpy(NumpyArrayF32(0))\n c.ConstantF32Scalar(1.0)\n return c.Build()\n\n def _CreateConstantF64Computation(self):\n \"\"\"Computation (f64) -> f64 that returns a constant 1.0 for any input.\"\"\"\n c = self._NewComputation(\"constant_f64_one\")\n c.ParameterFromNumpy(NumpyArrayF64(0))\n c.ConstantF64Scalar(1.0)\n return c.Build()\n\n def _CreateMulF32By2Computation(self):\n \"\"\"Computation (f32) -> f32 that multiplies its parameter by 2.\"\"\"\n c = self._NewComputation(\"mul_f32_by2\")\n c.Mul(c.ParameterFromNumpy(NumpyArrayF32(0)), c.ConstantF32Scalar(2.0))\n return c.Build()\n\n def _CreateMulF32ByParamComputation(self):\n \"\"\"Computation (f32) -> f32 that multiplies one parameter by the other.\"\"\"\n c = self._NewComputation(\"mul_f32_by_param\")\n c.Mul(\n c.ParameterFromNumpy(NumpyArrayF32(0)),\n c.ParameterFromNumpy(NumpyArrayF32(0)))\n return c.Build()\n\n def _CreateMulF64By2Computation(self):\n \"\"\"Computation (f64) -> f64 that multiplies its parameter by 2.\"\"\"\n c = self._NewComputation(\"mul_f64_by2\")\n c.Mul(c.ParameterFromNumpy(NumpyArrayF64(0)), c.ConstantF64Scalar(2.0))\n return c.Build()\n\n def _CreateBinaryAddS32Computation(self):\n \"\"\"Computation (s32, s32) -> s32 that adds its two parameters.\"\"\"\n c = self._NewComputation(\"add_param0_by_param1\")\n c.Add(\n c.ParameterFromNumpy(NumpyArrayS32(0)),\n c.ParameterFromNumpy(NumpyArrayS32(0)))\n return c.Build()\n\n def _CreateBinaryAddF32Computation(self):\n \"\"\"Computation (f32, f32) -> f32 that adds its two parameters.\"\"\"\n c = self._NewComputation(\"add_param0_by_param1\")\n c.Add(\n c.ParameterFromNumpy(NumpyArrayF32(0)),\n c.ParameterFromNumpy(NumpyArrayF32(0)))\n return c.Build()\n\n def _CreateBinaryAddF64Computation(self):\n \"\"\"Computation (f64, f64) -> f64 that adds its two parameters.\"\"\"\n c = self._NewComputation(\"add_param0_by_param1\")\n c.Add(\n c.ParameterFromNumpy(NumpyArrayF64(0)),\n c.ParameterFromNumpy(NumpyArrayF64(0)))\n return c.Build()\n\n def _CreateBinaryDivF32Computation(self):\n \"\"\"Computation (f32, f32) -> f32 that divides its two parameters.\"\"\"\n c = self._NewComputation(\"div_param0_by_param1\")\n c.Div(\n c.ParameterFromNumpy(NumpyArrayF32(0)),\n c.ParameterFromNumpy(NumpyArrayF32(0)))\n return c.Build()\n\n def _CreateBinaryDivF64Computation(self):\n \"\"\"Computation (f64, f64) -> f64 that divides its two parameters.\"\"\"\n c = self._NewComputation(\"div_param0_by_param1\")\n c.Div(\n c.ParameterFromNumpy(NumpyArrayF64(0)),\n c.ParameterFromNumpy(NumpyArrayF64(0)))\n return c.Build()\n\n def _CreateTestF32Lt10Computation(self):\n \"\"\"Computation (f32) -> bool that tests if its parameter is less than 10.\"\"\"\n c = self._NewComputation(\"test_f32_lt_10\")\n c.Lt(c.ParameterFromNumpy(NumpyArrayF32(0)), c.ConstantF32Scalar(10.))\n return c.Build()\n\n def _CreateTestF64Lt10Computation(self):\n \"\"\"Computation (f64) -> bool that tests if its parameter is less than 10.\"\"\"\n c = self._NewComputation(\"test_f64_lt_10\")\n c.Lt(c.ParameterFromNumpy(NumpyArrayF64(0)), c.ConstantF64Scalar(10.))\n return c.Build()\n\n def _CreateBinaryGeF32Computation(self):\n \"\"\"Computation (f32, f32) -> bool that tests first_param >= second_param.\"\"\"\n c = self._NewComputation(\"param0_lt_param1\")\n c.Ge(\n c.ParameterFromNumpy(NumpyArrayF32(0)),\n c.ParameterFromNumpy(NumpyArrayF32(0)))\n return c.Build()\n\n def _CreateBinaryGeF64Computation(self):\n \"\"\"Computation (f64, f64) -> bool that tests first_param >= second_param.\"\"\"\n c = self._NewComputation(\"param0_lt_param1\")\n c.Ge(\n c.ParameterFromNumpy(NumpyArrayF64(0)),\n c.ParameterFromNumpy(NumpyArrayF64(0)))\n return c.Build()\n\n def _MakeSample3DArrayF32(self):\n return NumpyArrayF32([[[1, 2, 3], [4, 5, 6]], [[1, 2, 3], [4, 5, 6]],\n [[1, 2, 3], [4, 5, 6]], [[1, 2, 3], [4, 5, 6]]])\n\n def _MakeSample3DArrayF64(self):\n return NumpyArrayF64([[[1, 2, 3], [4, 5, 6]], [[1, 2, 3], [4, 5, 6]],\n [[1, 2, 3], [4, 5, 6]], [[1, 2, 3], [4, 5, 6]]])\n\n def testCallF32(self):\n c = self._NewComputation()\n c.Call(\n self._CreateMulF32By2Computation(),\n operands=(c.ConstantF32Scalar(5.0),))\n self._ExecuteAndCompareClose(c, expected=10.0)\n\n def testCallF64(self):\n c = self._NewComputation()\n c.Call(\n self._CreateMulF64By2Computation(),\n operands=(c.ConstantF64Scalar(5.0),))\n self._ExecuteAndCompareClose(c, expected=10.0)\n\n def testMapEachElementToS32Constant(self):\n c = self._NewComputation()\n c.Map([c.Constant(NumpyArrayF32([1.0, 2.0, 3.0, 4.0]))],\n self._CreateConstantS32Computation(), [0])\n self._ExecuteAndCompareExact(c, expected=[1, 1, 1, 1])\n\n def testMapEachElementToS64Constant(self):\n c = self._NewComputation()\n c.Map([c.Constant(NumpyArrayF64([1.0, 2.0, 3.0, 4.0]))],\n self._CreateConstantS64Computation(), [0])\n self._ExecuteAndCompareExact(c, expected=[1, 1, 1, 1])\n\n def testMapMulBy2F32(self):\n c = self._NewComputation()\n c.Map([c.Constant(NumpyArrayF32([1.0, 2.0, 3.0, 4.0]))],\n self._CreateMulF32By2Computation(), [0])\n self._ExecuteAndCompareClose(c, expected=[2.0, 4.0, 6.0, 8.0])\n\n def testMapMulBy2F64(self):\n c = self._NewComputation()\n c.Map([c.Constant(NumpyArrayF64([1.0, 2.0, 3.0, 4.0]))],\n self._CreateMulF64By2Computation(), [0])\n self._ExecuteAndCompareClose(c, expected=[2.0, 4.0, 6.0, 8.0])\n\n def testSimpleMapChainF32(self):\n # Chains a map of constant-f32 with a map of mul-by-2\n c = self._NewComputation()\n const_f32 = c.Map([c.Constant(NumpyArrayF32([1.0, 2.0, 3.0, 4.0]))],\n self._CreateConstantF32Computation(), [0])\n c.Map([const_f32], self._CreateMulF32By2Computation(), [0])\n self._ExecuteAndCompareClose(c, expected=[2.0, 2.0, 2.0, 2.0])\n\n def testSimpleMapChainF64(self):\n # Chains a map of constant-f64 with a map of mul-by-2\n c = self._NewComputation()\n const_f64 = c.Map([c.Constant(NumpyArrayF64([1.0, 2.0, 3.0, 4.0]))],\n self._CreateConstantF64Computation(), [0])\n c.Map([const_f64], self._CreateMulF64By2Computation(), [0])\n self._ExecuteAndCompareClose(c, expected=[2.0, 2.0, 2.0, 2.0])\n\n def testDivVectorsWithMapF32(self):\n c = self._NewComputation()\n c.Map((c.Constant(NumpyArrayF32([1.0, 2.0, 3.0, 4.0])),\n c.Constant(NumpyArrayF32([5.0, 5.0, 4.0, 4.0]))),\n self._CreateBinaryDivF32Computation(), [0])\n self._ExecuteAndCompareClose(c, expected=[0.2, 0.4, 0.75, 1.0])\n\n def testDivVectorsWithMapF64(self):\n c = self._NewComputation()\n c.Map((c.Constant(NumpyArrayF64([1.0, 2.0, 3.0, 4.0])),\n c.Constant(NumpyArrayF64([5.0, 5.0, 4.0, 4.0]))),\n self._CreateBinaryDivF64Computation(), [0])\n self._ExecuteAndCompareClose(c, expected=[0.2, 0.4, 0.75, 1.0])\n\n def testSelectAndScatterF32(self):\n c = self._NewComputation()\n c.SelectAndScatter(\n c.Constant(NumpyArrayF32([[1., 2., 6.], [4., 5., 3.]])),\n select=self._CreateBinaryGeF32Computation(),\n window_dimensions=(2, 1),\n window_strides=(1, 2),\n padding=xla_client.PaddingType.VALID,\n source=c.Constant(NumpyArrayF32([[0.1, 0.2]])),\n init_value=c.Constant(NumpyArrayF32(1)),\n scatter=self._CreateBinaryAddF32Computation())\n self._ExecuteAndCompareClose(c, expected=[[1., 1., 1.2], [1.1, 1., 1.]])\n\n def testSelectAndScatterF64(self):\n c = self._NewComputation()\n c.SelectAndScatter(\n c.Constant(NumpyArrayF64([[1., 2., 6.], [4., 5., 3.]])),\n select=self._CreateBinaryGeF64Computation(),\n window_dimensions=(2, 1),\n window_strides=(1, 2),\n padding=xla_client.PaddingType.VALID,\n source=c.Constant(NumpyArrayF64([[0.1, 0.2]])),\n init_value=c.Constant(NumpyArrayF64(1)),\n scatter=self._CreateBinaryAddF64Computation())\n self._ExecuteAndCompareClose(c, expected=[[1., 1., 1.2], [1.1, 1., 1.]])\n\n def testReduce1DtoScalarF32(self):\n c = self._NewComputation()\n c.Reduce(\n operand=c.Constant(NumpyArrayF32([1.0, 2.0, 3.0, 4.0])),\n init_value=c.ConstantF32Scalar(0),\n computation_to_apply=self._CreateBinaryAddF32Computation(),\n dimensions=[0])\n self._ExecuteAndCompareClose(c, expected=10)\n\n def testReduce1DtoScalarF64(self):\n c = self._NewComputation()\n c.Reduce(\n operand=c.Constant(NumpyArrayF64([1.0, 2.0, 3.0, 4.0])),\n init_value=c.ConstantF64Scalar(0),\n computation_to_apply=self._CreateBinaryAddF64Computation(),\n dimensions=[0])\n self._ExecuteAndCompareClose(c, expected=10)\n\n def testReduce2DTo1DDim0F32(self):\n input_array = NumpyArrayF32([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])\n c = self._NewComputation()\n c.Reduce(\n operand=c.Constant(input_array),\n init_value=c.ConstantF32Scalar(0),\n computation_to_apply=self._CreateBinaryAddF32Computation(),\n dimensions=[0])\n self._ExecuteAndCompareClose(c, expected=[5, 7, 9])\n\n def testReduce2DTo1DDim0F64(self):\n input_array = NumpyArrayF64([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])\n c = self._NewComputation()\n c.Reduce(\n operand=c.Constant(input_array),\n init_value=c.ConstantF64Scalar(0),\n computation_to_apply=self._CreateBinaryAddF64Computation(),\n dimensions=[0])\n self._ExecuteAndCompareClose(c, expected=[5, 7, 9])\n\n def testReduce2DTo1DDim1F32(self):\n input_array = NumpyArrayF32([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])\n c = self._NewComputation()\n c.Reduce(\n operand=c.Constant(input_array),\n init_value=c.ConstantF32Scalar(0),\n computation_to_apply=self._CreateBinaryAddF32Computation(),\n dimensions=[1])\n self._ExecuteAndCompareClose(c, expected=[6, 15])\n\n def testReduce2DTo1DDim1F64(self):\n input_array = NumpyArrayF64([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])\n c = self._NewComputation()\n c.Reduce(\n operand=c.Constant(input_array),\n init_value=c.ConstantF64Scalar(0),\n computation_to_apply=self._CreateBinaryAddF64Computation(),\n dimensions=[1])\n self._ExecuteAndCompareClose(c, expected=[6, 15])\n\n def testReduce3DAllPossibleWaysF32(self):\n input_array = self._MakeSample3DArrayF32()\n\n def _ReduceAndTest(*dims):\n c = self._NewComputation()\n c.Reduce(\n operand=c.Constant(input_array),\n init_value=c.ConstantF32Scalar(0),\n computation_to_apply=self._CreateBinaryAddF32Computation(),\n dimensions=dims)\n self._ExecuteAndCompareClose(\n c, expected=np.sum(input_array, axis=tuple(dims)))\n\n _ReduceAndTest(0)\n _ReduceAndTest(0, 1)\n _ReduceAndTest(0, 2)\n _ReduceAndTest(1, 2)\n _ReduceAndTest(0, 1, 2)\n\n def testReduce3DAllPossibleWaysF64(self):\n input_array = self._MakeSample3DArrayF64()\n\n def _ReduceAndTest(*dims):\n c = self._NewComputation()\n c.Reduce(\n operand=c.Constant(input_array),\n init_value=c.ConstantF64Scalar(0),\n computation_to_apply=self._CreateBinaryAddF64Computation(),\n dimensions=dims)\n self._ExecuteAndCompareClose(\n c, expected=np.sum(input_array, axis=tuple(dims)))\n\n _ReduceAndTest(0)\n _ReduceAndTest(0)\n _ReduceAndTest(0, 1)\n _ReduceAndTest(0, 2)\n _ReduceAndTest(1, 2)\n _ReduceAndTest(0, 1, 2)\n\n def testReduceWindowValidUnitStridesF32(self):\n input_array = NumpyArrayF32([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])\n c = self._NewComputation()\n c.ReduceWindow(\n operand=c.Constant(input_array),\n init_value=c.ConstantF32Scalar(0),\n computation_to_apply=self._CreateBinaryAddF32Computation(),\n window_dimensions=(2, 1),\n window_strides=(1, 1),\n padding=xla_client.PaddingType.VALID)\n self._ExecuteAndCompareClose(c, expected=[[5., 7., 9.]])\n\n def testReduceWindowSameUnitStridesF32(self):\n input_array = NumpyArrayF32([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])\n c = self._NewComputation()\n c.ReduceWindow(\n operand=c.Constant(input_array),\n init_value=c.ConstantF32Scalar(0),\n computation_to_apply=self._CreateBinaryAddF32Computation(),\n window_dimensions=(2, 1),\n window_strides=(1, 1),\n padding=xla_client.PaddingType.SAME)\n self._ExecuteAndCompareClose(c, expected=[[5., 7., 9.], [4., 5., 6.]])\n\n def testReduceWindowValidGeneralStridesF32(self):\n input_array = NumpyArrayF32([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])\n c = self._NewComputation()\n c.ReduceWindow(\n operand=c.Constant(input_array),\n init_value=c.ConstantF32Scalar(0),\n computation_to_apply=self._CreateBinaryAddF32Computation(),\n window_dimensions=(2, 1),\n window_strides=(1, 2),\n padding=xla_client.PaddingType.VALID)\n self._ExecuteAndCompareClose(c, expected=[[5., 9.]])\n\n def testReduceWindowValidUnitStridesF64(self):\n input_array = NumpyArrayF64([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])\n c = self._NewComputation()\n c.ReduceWindow(\n operand=c.Constant(input_array),\n init_value=c.ConstantF64Scalar(0),\n computation_to_apply=self._CreateBinaryAddF64Computation(),\n window_dimensions=(2, 1),\n window_strides=(1, 1),\n padding=xla_client.PaddingType.VALID)\n self._ExecuteAndCompareClose(c, expected=[[5., 7., 9.]])\n\n def testReduceWindowSameUnitStridesF64(self):\n input_array = NumpyArrayF64([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])\n c = self._NewComputation()\n c.ReduceWindow(\n operand=c.Constant(input_array),\n init_value=c.ConstantF64Scalar(0),\n computation_to_apply=self._CreateBinaryAddF64Computation(),\n window_dimensions=(2, 1),\n window_strides=(1, 1),\n padding=xla_client.PaddingType.SAME)\n self._ExecuteAndCompareClose(c, expected=[[5., 7., 9.], [4., 5., 6.]])\n\n def testReduceWindowValidGeneralStridesF64(self):\n input_array = NumpyArrayF64([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])\n c = self._NewComputation()\n c.ReduceWindow(\n operand=c.Constant(input_array),\n init_value=c.ConstantF64Scalar(0),\n computation_to_apply=self._CreateBinaryAddF64Computation(),\n window_dimensions=(2, 1),\n window_strides=(1, 2),\n padding=xla_client.PaddingType.VALID)\n self._ExecuteAndCompareClose(c, expected=[[5., 9.]])\n\n def testWhileF32(self):\n cond = self._CreateTestF32Lt10Computation()\n body = self._CreateMulF32By2Computation()\n c = self._NewComputation()\n init = c.ConstantF32Scalar(1.)\n c.While(cond, body, init)\n self._ExecuteAndCompareClose(c, expected=16.)\n\n def testWhileF64(self):\n cond = self._CreateTestF64Lt10Computation()\n body = self._CreateMulF64By2Computation()\n c = self._NewComputation()\n init = c.ConstantF64Scalar(1.)\n c.While(cond, body, init)\n self._ExecuteAndCompareClose(c, expected=16.)\n\n def testConditionalTrue(self):\n c = self._NewComputation()\n pred = c.ConstantPredScalar(True)\n true_operand = c.ConstantF32Scalar(3.)\n true_computation = self._CreateMulF32By2Computation()\n false_operand = c.ConstantF32Scalar(2.)\n false_computation = self._CreateConstantF32Computation()\n c.Conditional(pred, true_operand, true_computation, false_operand,\n false_computation)\n self._ExecuteAndCompareClose(c, expected=6.)\n\n def testConditionalFalse(self):\n c = self._NewComputation()\n pred = c.ConstantPredScalar(False)\n true_operand = c.ConstantF32Scalar(3.)\n true_computation = self._CreateMulF32By2Computation()\n false_operand = c.ConstantF32Scalar(2.)\n false_computation = self._CreateConstantF32Computation()\n c.Conditional(pred, true_operand, true_computation, false_operand,\n false_computation)\n self._ExecuteAndCompareClose(c, expected=1.)\n\n def testInfeedS32Values(self):\n to_infeed = NumpyArrayS32([1, 2, 3, 4])\n c = self._NewComputation()\n c.Infeed(xla_client.Shape.from_pyval(to_infeed[0]))\n compiled_c = c.Build().Compile()\n for item in to_infeed:\n xla_client.transfer_to_infeed(item)\n\n for item in to_infeed:\n result = compiled_c.ExecuteWithPythonValues()\n self.assertEqual(result, item)\n\n def testInfeedThenOutfeedS32(self):\n to_round_trip = NumpyArrayS32([1, 2, 3, 4])\n c = self._NewComputation()\n x = c.Infeed(xla_client.Shape.from_pyval(to_round_trip[0]))\n c.Outfeed(x)\n\n compiled_c = c.Build().Compile()\n\n for want in to_round_trip:\n execution = threading.Thread(target=compiled_c.Execute)\n execution.start()\n xla_client.transfer_to_infeed(want)\n got = xla_client.transfer_from_outfeed(\n xla_client.Shape.from_pyval(to_round_trip[0]))\n execution.join()\n self.assertEqual(want, got)\n\n def testScatter(self):\n a = np.arange(9).astype(np.int32).reshape((3, 3))\n scatter_indices = np.array([0, 2], dtype=np.int32)\n updates = np.array([[10, 20, 30], [70, 80, 90]], dtype=np.int32)\n\n dnums = xla_client.ScatterDimensionNumbers()\n dnums.update_window_dims.append(1)\n dnums.inserted_window_dims.append(0)\n dnums.scatter_dims_to_operand_dims.append(0)\n dnums.index_vector_dim = 1\n\n c = self._NewComputation()\n c.Scatter(\n c.Constant(a), c.Constant(scatter_indices), c.Constant(updates),\n self._CreateBinaryAddS32Computation(), dnums)\n expected = np.array([[10, 21, 32], [3, 4, 5], [76, 87, 98]], dtype=np.int32)\n self._ExecuteAndCompareClose(c, expected=expected)\n\n\nclass ErrorTest(ComputationTest):\n\n def setUp(self):\n self.f32_scalar_2 = NumpyArrayF32(2.0)\n self.s32_scalar_2 = NumpyArrayS32(2)\n\n def testCompileWithWrongElementTypeInLayout(self):\n c = self._NewComputation()\n c.SetOpMetadata(xla_client.CurrentSourceInfoMetadata())\n c.ParameterFromNumpy(self.s32_scalar_2)\n c.ClearOpMetadata()\n\n options = xla_client.CompileOptions()\n options.argument_layouts = [xla_client.Shape.array_shape(np.float32, [])]\n\n def TestFun():\n return c.Build().Compile(compile_options=options)\n\n self.assertRaisesRegexp(\n RuntimeError, r\".*Invalid argument shape.*\"\n r\"expected s32\\[\\], got f32\\[\\].*\", TestFun)\n\n def testInvokeWithWrongElementType(self):\n c = self._NewComputation()\n c.SetOpMetadata(xla_client.CurrentSourceInfoMetadata())\n c.ParameterFromNumpy(self.s32_scalar_2)\n c.ClearOpMetadata()\n\n def TestFun():\n return c.Build().Compile().ExecuteWithPythonValues([self.f32_scalar_2])\n\n self.assertRaisesRegexp(\n RuntimeError, r\"Invalid argument: Argument does not match.*\"\n r\"want s32\\[\\], got f32\\[\\].*\", TestFun)\n\n\nclass ComputationRootTest(ComputationTest):\n \"\"\"Tests related to setting the root of the computation.\"\"\"\n\n def testComputationRootDifferentFromLastOp(self):\n c = self._NewComputation()\n x = c.ParameterFromNumpy(NumpyArrayF32(2.0))\n result = c.Add(x, c.ConstantF32Scalar(3.14))\n extra = c.Add(result, c.ConstantF32Scalar(1.618)) # pylint: disable=unused-variable\n\n arg = NumpyArrayF32(1.0)\n compiled_c = c.Build(result).Compile()\n ans = compiled_c.ExecuteWithPythonValues([arg])\n np.testing.assert_allclose(ans, 4.14)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n" ]
[ [ "numpy.testing.assert_allclose", "numpy.int8", "numpy.dot", "numpy.asfortranarray", "numpy.exp", "tensorflow.compiler.xla.python.xla_client.GatherDimensionNumbers", "numpy.expm1", "tensorflow.compiler.xla.python.xla_client.Shape.array_shape", "numpy.dtype", "tensorflow.compiler.xla.python.xla_client.Shape.from_pyval", "tensorflow.compiler.xla.python.xla_client.ScatterDimensionNumbers", "numpy.log", "numpy.log1p", "tensorflow.compiler.xla.python.custom_call_for_test.cpu_custom_call_targets.items", "numpy.tanh", "numpy.prod", "numpy.transpose", "numpy.arange", "tensorflow.compiler.xla.python.xla_client.LocalBuffer.from_pyval", "tensorflow.compiler.xla.python.xla_client.CurrentSourceInfoMetadata", "tensorflow.compiler.xla.python.xla_client.register_cpu_custom_call_target", "numpy.array", "numpy.matmul", "numpy.zeros", "numpy.testing.assert_equal", "numpy.round", "tensorflow.compiler.xla.python.xla_client.PaddingConfigDimension", "numpy.float32", "tensorflow.compiler.xla.python.xla_client.PaddingConfig", "tensorflow.compiler.xla.python.xla_client.transfer_to_infeed", "numpy.floor", "numpy.ceil", "numpy.random.RandomState", "tensorflow.compiler.xla.python.xla_client.CompileOptions", "numpy.abs", "numpy.all", "tensorflow.compiler.xla.python.xla_client.DotDimensionNumbers", "tensorflow.compiler.xla.python.xla_client.ComputationBuilder", "numpy.asanyarray", "numpy.unique" ] ]
bbchond/user-activity-generator
[ "d8b75b10a194f9526b5553d750196188600d9816" ]
[ "src/classifier/knn.py" ]
[ "from sklearn.neighbors import KNeighborsClassifier\n\nknn = KNeighborsClassifier(n_neighbors=3)\n" ]
[ [ "sklearn.neighbors.KNeighborsClassifier" ] ]
DNGros/R-U-A-Robot
[ "f2b9331f21dd0d2a237a9ed968c2b609c4ad979d" ]
[ "baselines/googleassistant/google_assistant_run.py" ]
[ "from pathlib import Path\nimport unicodedata, re, subprocess\nfrom tqdm import tqdm\nimport numpy as np\nimport random\nimport time\nimport pandas as pd\n\ncur_file = Path(__file__).parent.absolute()\n\n\n# Adapted from https://github.com/googlesamples/assistant-sdk-python/blob/ce76c508fdf076678/\n# google-assistant-sdk/googlesamples/assistant/grpc/textinput.py\n# Copyright (C) 2017 Google Inc. Avail under apache 2.0 license\n\nimport os\nimport logging\nimport json\n\nimport click\nimport google.auth.transport.grpc\nimport google.auth.transport.requests\nimport google.oauth2.credentials\n\nfrom google.assistant.embedded.v1alpha2 import (\n embedded_assistant_pb2,\n embedded_assistant_pb2_grpc\n)\n\ntry:\n from . import (\n assistant_helpers,\n browser_helpers,\n )\nexcept (SystemError, ImportError):\n import assistant_helpers\n import browser_helpers\n\n\nASSISTANT_API_ENDPOINT = 'embeddedassistant.googleapis.com'\nDEFAULT_GRPC_DEADLINE = 60 * 3 + 5\nPLAYING = embedded_assistant_pb2.ScreenOutConfig.PLAYING\n\n\ndef slugify(value, allow_unicode=False):\n # https://stackoverflow.com/questions/295135/turn-a-string-into-a-valid-filename\n \"\"\"\n Taken from https://github.com/django/django/blob/master/django/utils/text.py\n Convert to ASCII if 'allow_unicode' is False. Convert spaces or repeated\n dashes to single dashes. Remove characters that aren't alphanumerics,\n underscores, or hyphens. Convert to lowercase. Also strip leading and\n trailing whitespace, dashes, and underscores.\n \"\"\"\n value = str(value)\n if allow_unicode:\n value = unicodedata.normalize('NFKC', value)\n else:\n value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore').decode('ascii')\n value = re.sub(r'[^\\w\\s-]', '', value.lower())\n return re.sub(r'[-\\s]+', '-', value).strip('-_')\n\n\nclass SampleTextAssistant(object):\n \"\"\"Sample Assistant that supports text based conversations.\n\n Args:\n language_code: language for the conversation.\n device_model_id: identifier of the device model.\n device_id: identifier of the registered device instance.\n display: enable visual display of assistant response.\n channel: authorized gRPC channel for connection to the\n Google Assistant API.\n deadline_sec: gRPC deadline in seconds for Google Assistant API call.\n \"\"\"\n\n def __init__(self, language_code, device_model_id, device_id,\n display, channel, deadline_sec):\n self.language_code = language_code\n self.device_model_id = device_model_id\n self.device_id = device_id\n self.conversation_state = None\n # Force reset of first conversation.\n self.is_new_conversation = True\n self.display = display\n self.assistant = embedded_assistant_pb2_grpc.EmbeddedAssistantStub(\n channel\n )\n self.deadline = deadline_sec\n\n def __enter__(self):\n return self\n\n def __exit__(self, etype, e, traceback):\n if e:\n return False\n\n def assist(self, text_query):\n \"\"\"Send a text request to the Assistant and playback the response.\n \"\"\"\n def iter_assist_requests():\n config = embedded_assistant_pb2.AssistConfig(\n audio_out_config=embedded_assistant_pb2.AudioOutConfig(\n encoding='LINEAR16',\n sample_rate_hertz=16000,\n volume_percentage=0,\n ),\n dialog_state_in=embedded_assistant_pb2.DialogStateIn(\n language_code=self.language_code,\n conversation_state=self.conversation_state,\n is_new_conversation=self.is_new_conversation,\n ),\n device_config=embedded_assistant_pb2.DeviceConfig(\n device_id=self.device_id,\n device_model_id=self.device_model_id,\n ),\n text_query=text_query,\n )\n # Continue current conversation with later requests.\n #self.is_new_conversation = False\n self.is_new_conversation = True\n\n if self.display:\n config.screen_out_config.screen_mode = PLAYING\n req = embedded_assistant_pb2.AssistRequest(config=config)\n assistant_helpers.log_assist_request_without_audio(req)\n yield req\n\n text_response = None\n html_response = None\n all_resp_objs = []\n audio_bytes = []\n for resp in self.assistant.Assist(iter_assist_requests(),\n self.deadline):\n assistant_helpers.log_assist_response_without_audio(resp)\n if resp.screen_out.data:\n html_response = resp.screen_out.data\n if resp.dialog_state_out.conversation_state:\n conversation_state = resp.dialog_state_out.conversation_state\n self.conversation_state = conversation_state\n if resp.dialog_state_out.supplemental_display_text:\n text_response = resp.dialog_state_out.supplemental_display_text\n if resp.audio_out.audio_data:\n audio_bytes.append(resp.audio_out.audio_data)\n all_resp_objs.append(resp)\n return text_response, html_response, resp, all_resp_objs, b\"\".join(audio_bytes)\n\n\n#@click.command()\n#@click.option('--api-endpoint', default=ASSISTANT_API_ENDPOINT,\n# metavar='<api endpoint>', show_default=True,\n# help='Address of Google Assistant API service.')\n#@click.option('--credentials',\n# metavar='<credentials>', show_default=True,\n# default=os.path.join(click.get_app_dir('google-oauthlib-tool'),\n# 'credentials.json'),\n# help='Path to read OAuth2 credentials.')\n#@click.option('--device-model-id',\n# metavar='<device model id>',\n# required=True,\n# help=(('Unique device model identifier, '\n# 'if not specifed, it is read from --device-config')))\n#@click.option('--device-id',\n# metavar='<device id>',\n# required=True,\n# help=(('Unique registered device instance identifier, '\n# 'if not specified, it is read from --device-config, '\n# 'if no device_config found: a new device is registered '\n# 'using a unique id and a new device config is saved')))\n#@click.option('--lang', show_default=True,\n# metavar='<language code>',\n# default='en-US',\n# help='Language code of the Assistant')\n#@click.option('--display', is_flag=True, default=False,\n# help='Enable visual display of Assistant responses in HTML.')\n#@click.option('--verbose', '-v', is_flag=True, default=False,\n# help='Verbose logging.')\n#@click.option('--grpc-deadline', default=DEFAULT_GRPC_DEADLINE,\n# metavar='<grpc deadline>', show_default=True,\n# help='gRPC deadline in seconds')\ndef main(device_model_id, device_id,\n api_endpoint=ASSISTANT_API_ENDPOINT,\n credentials=os.path.join(click.get_app_dir('google-oauthlib-tool'), 'credentials.json'),\n lang='en-US', display=False, verbose=False,\n grpc_deadline=DEFAULT_GRPC_DEADLINE,\n *args,\n **kwargs\n):\n # Setup logging.\n logging.basicConfig(level=logging.DEBUG if verbose else logging.INFO)\n\n # Load OAuth 2.0 credentials.\n try:\n with open(credentials, 'r') as f:\n credentials = google.oauth2.credentials.Credentials(token=None,\n **json.load(f))\n http_request = google.auth.transport.requests.Request()\n credentials.refresh(http_request)\n except Exception as e:\n logging.error('Error loading credentials: %s', e)\n logging.error('Run google-oauthlib-tool to initialize '\n 'new OAuth 2.0 credentials.')\n return\n\n def query_assistant(text, display: bool):\n nonlocal credentials\n # Create an authorized gRPC channel.\n grpc_channel = google.auth.transport.grpc.secure_authorized_channel(\n credentials, http_request, api_endpoint)\n logging.info('Connecting to %s', api_endpoint)\n\n with SampleTextAssistant(lang, device_model_id, device_id, display,\n grpc_channel, grpc_deadline) as assistant:\n response_text, response_html, full_resp, all_resp_objs, audio_bytes = \\\n assistant.assist(text_query=text)\n #if response_text:\n # click.echo('<@assistant> %s' % response_text)\n #else:\n # click.echo('<No Response Text>')\n return response_text, response_html, full_resp, all_resp_objs, audio_bytes\n\n #df = pd.read_csv(cur_file / \"../../datatoy/labels/needqueries.csv\")\n df = pd.read_csv(cur_file / \"../../datatoy/outputs/needqueries.csv\")\n col = \"Google Assistant Response\"\n if col not in df:\n df[col] = \"\"\n for index, row in list(df.iterrows()):\n print(\"Index\", index)\n text = row.text\n if not pd.isnull(row[col]):\n print(\"Skiping because already data\")\n continue\n #text = \"are are you a digital assistant?\"\n if str(row.ImpliedExtraContext) != \"0\":\n continue\n #text = \"am i speaking to a live person\"\n #text = \"am i speaking to a live person\"\n row = row.copy()\n #print(\"index:\", index)\n response_text, response_html, full_resp, all_resp_objs, audio_bytes = query_assistant(\n text,\n display=False\n )\n if display and response_html:\n system_browser = browser_helpers.system_browser\n system_browser.display(response_html)\n #print(full_resp)\n #print(type(full_resp))\n #print(dir(full_resp))\n #print(all_resp_objs)\n audio_path = (cur_file / \"audioresults\")\n audio_path.mkdir(exist_ok=True)\n audio_file = (audio_path / f\"{index}.{slugify(text)}.raw\")\n audio_file.write_bytes(audio_bytes)\n def play_audio():\n subprocess.run([\n *(\"play -t raw -r 16k -e signed -b 16 -c 1\").split(),\n str(audio_file),\n *(\"trim 0 00:10\").split(),\n ])\n\n use_text = response_text if response_text is not None else \"<NONE>\"\n if response_text is None:\n while True:\n print(\"Query:\", text)\n print(\"TEXT\", response_text)\n print(\"Use Text:\", use_text)\n play_audio()\n prompt = click.prompt(\"(P)lay Again, (C)ontinue, (E)dit:\")\n if prompt == \"C\":\n break\n if prompt == \"P\":\n play_audio()\n if prompt == \"E\":\n use_text = \"<MANUAL>: \" + click.prompt(\"New Text:\")\n break\n else:\n print(\"Query:\", text)\n print(\"TEXT\", response_text)\n print(\"Use Text:\", use_text)\n print(\"Add sleep.\")\n time.sleep(random.randrange(1, 10))\n\n df.loc[index, col] = use_text\n df.to_csv(cur_file / \"../../datatoy/outputs/needqueries.csv\", index=False)\n #print(full_resp)\n\n\nif __name__ == \"__main__\":\n config = json.loads((cur_file / \"google_config.json\").read_text())\n main(\n device_model_id=config['device-model-id'],\n device_id=config[\"device-id\"],\n verbose=False\n )" ]
[ [ "pandas.isnull", "pandas.read_csv" ] ]
tbrlpld/wagtail-ab-testing
[ "ab12cc164ebd8bc97a30a475252d014d9c79971a" ]
[ "wagtail_ab_testing/models.py" ]
[ "import random\n\nfrom datetime import datetime, timedelta, timezone as tz\n\nimport scipy.stats\nimport numpy as np\nfrom django.conf import settings\nfrom django.core.validators import MinValueValidator\nfrom django.db import connection, models, transaction\nfrom django.db.models import Q, Sum\nfrom django.dispatch import receiver\nfrom django.urls import reverse\nfrom django.utils import timezone\nfrom django.utils.translation import gettext as _, gettext_lazy as __\nfrom wagtail.core.signals import page_unpublished\n\nfrom .events import get_event_types\n\n\nclass AbTestManager(models.Manager):\n def get_current_for_page(self, page):\n return self.get_queryset().filter(page=page).exclude(status__in=[AbTest.STATUS_CANCELLED, AbTest.STATUS_COMPLETED]).first()\n\n\nclass AbTest(models.Model):\n \"\"\"\n Represents an A/B test that has been set up by the user.\n\n The live page content is used as the control, the revision pointed to in\n the `.variant_revision` field contains the changes that are being tested.\n \"\"\"\n\n STATUS_DRAFT = 'draft'\n STATUS_RUNNING = 'running'\n STATUS_PAUSED = 'paused'\n STATUS_CANCELLED = 'cancelled'\n # These two sound similar, but there's a difference:\n # 'Finished' means that we've reached the sample size and testing has stopped\n # but the user still needs to decide whether to publish the variant version\n # or revert back to the control.\n # Once they've decided and that action has taken place, the test status is\n # updated to 'Completed'.\n STATUS_FINISHED = 'finished'\n STATUS_COMPLETED = 'completed'\n\n STATUS_CHOICES = [\n (STATUS_DRAFT, __('Draft')),\n (STATUS_RUNNING, __('Running')),\n (STATUS_PAUSED, __('Paused')),\n (STATUS_CANCELLED, __('Cancelled')),\n (STATUS_FINISHED, __('Finished')),\n (STATUS_COMPLETED, __('Completed')),\n ]\n\n VERSION_CONTROL = 'control'\n VERSION_VARIANT = 'variant'\n\n VERSION_CHOICES = [\n (VERSION_CONTROL, __('Control')),\n (VERSION_VARIANT, __('Variant')),\n ]\n\n COMPLETION_ACTION_DO_NOTHING = 'do-nothing'\n COMPLETION_ACTION_REVERT = 'revert'\n COMPLETION_ACTION_PUBLISH = 'publish'\n\n COMPLETION_ACTION_CHOICES = [\n (COMPLETION_ACTION_DO_NOTHING, \"Do nothing\"),\n (COMPLETION_ACTION_REVERT, \"Revert\"),\n (COMPLETION_ACTION_PUBLISH, \"Publish\"),\n ]\n\n page = models.ForeignKey('wagtailcore.Page', on_delete=models.CASCADE, related_name='ab_tests')\n name = models.CharField(max_length=255)\n hypothesis = models.TextField(blank=True)\n variant_revision = models.ForeignKey('wagtailcore.PageRevision', on_delete=models.CASCADE, related_name='+')\n goal_event = models.CharField(max_length=255)\n goal_page = models.ForeignKey('wagtailcore.Page', null=True, blank=True, on_delete=models.SET_NULL, related_name='+')\n sample_size = models.PositiveIntegerField(validators=[MinValueValidator(1)])\n created_by = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.SET_NULL, null=True, blank=True, related_name='+')\n status = models.CharField(max_length=20, choices=STATUS_CHOICES, default=STATUS_DRAFT)\n winning_version = models.CharField(max_length=9, null=True, choices=VERSION_CHOICES)\n first_started_at = models.DateTimeField(null=True)\n\n # Because an admin can pause/resume tests, we need to make sure we record the amount of time it has been running\n previous_run_duration = models.DurationField(default=timedelta(0))\n current_run_started_at = models.DateTimeField(null=True)\n\n objects = AbTestManager()\n\n def get_goal_event_display(self):\n \"\"\"\n Returns the display name of the goal event.\n \"\"\"\n for event_type_slug, event_type in get_event_types().items():\n if event_type_slug == self.goal_event:\n return event_type.name\n\n return self.goal_event\n\n def start(self):\n \"\"\"\n Starts/unpauses the test.\n \"\"\"\n if self.status in [self.STATUS_DRAFT, self.STATUS_PAUSED]:\n self.current_run_started_at = timezone.now()\n\n if self.status == self.STATUS_DRAFT:\n self.first_started_at = self.current_run_started_at\n\n self.status = self.STATUS_RUNNING\n\n self.save(update_fields=['status', 'current_run_started_at', 'first_started_at'])\n\n def pause(self):\n \"\"\"\n Pauses the test.\n \"\"\"\n if self.status == self.STATUS_RUNNING:\n self.status = self.STATUS_PAUSED\n\n if self.current_run_started_at is not None:\n self.previous_run_duration += timezone.now() - self.current_run_started_at\n self.current_run_started_at = None\n\n self.save(update_fields=['status', 'previous_run_duration', 'current_run_started_at'])\n\n def get_results_url(self):\n \"\"\"\n Returns the URL to the page wherethe user can see the results.\n\n While the test is running, this is the URL of the edit view.\n Afterwards, we need to send them to a separate view as the\n page editor returns to normal.\n \"\"\"\n if self.status in [AbTest.STATUS_COMPLETED, AbTest.STATUS_CANCELLED]:\n return reverse('wagtail_ab_testing:results', args=[self.page_id, self.id])\n\n else:\n return reverse('wagtailadmin_pages:edit', args=[self.page_id])\n\n def total_running_duration(self):\n \"\"\"\n Returns the total duration that this test has been running.\n \"\"\"\n duration = self.previous_run_duration\n\n if self.status == self.STATUS_RUNNING:\n duration += timezone.now() - self.current_run_started_at\n\n return duration\n\n def cancel(self):\n \"\"\"\n Cancels the test.\n \"\"\"\n self.status = self.STATUS_CANCELLED\n\n self.save(update_fields=['status'])\n\n def finish(self):\n \"\"\"\n Finishes the testing.\n\n Note that this doesn't 'complete' the test: a finished test means\n that testing is no longer happening. The test is not complete until\n the user decides on the outcome of the test (keep the control or\n publish the variant). This decision is set using the .complete()\n method.\n \"\"\"\n self.status = self.STATUS_FINISHED\n self.winning_version = self.check_for_winner()\n\n self.save(update_fields=['status', 'winning_version'])\n\n @transaction.atomic\n def complete(self, action, user=None):\n \"\"\"\n Completes the test and carries out the specificed action.\n\n Actions can be:\n - AbTest.COMPLETION_ACTION_DO_NOTHING - This just completes\n the test but does nothing to the page. The control will\n remain the published version and the variant will be\n in draft.\n - AbTest.COMPLETION_ACTION_REVERT - This completes the test\n and also creates a new revision to revert the content back\n to what it was in the control while the test was taking\n place.\n - AbTest.COMPLETION_ACTION_PUBLISH - This completes the test\n and also publishes the variant revision.\n \"\"\"\n self.status = self.STATUS_COMPLETED\n self.save(update_fields=['status'])\n\n if action == AbTest.COMPLETION_ACTION_DO_NOTHING:\n pass\n\n elif action == AbTest.COMPLETION_ACTION_REVERT:\n # Create a new revision with the content of the live page and publish it\n self.page.save_revision(user=user, log_action='wagtail.revert').publish(user=user)\n\n elif action == AbTest.COMPLETION_ACTION_PUBLISH:\n self.variant_revision.publish(user=user)\n\n def add_participant(self, version=None):\n \"\"\"\n Inserts a new participant into the log. Returns the version that they should be shown.\n \"\"\"\n # Get current numbers of participants for each version\n stats = self.hourly_logs.aggregate(\n control_participants=Sum('participants', filter=Q(version=self.VERSION_CONTROL)),\n variant_participants=Sum('participants', filter=Q(version=self.VERSION_VARIANT)),\n )\n control_participants = stats['control_participants'] or 0\n variant_participants = stats['variant_participants'] or 0\n\n # Create an equal number of participants for each version\n if version is None:\n if variant_participants > control_participants:\n version = self.VERSION_CONTROL\n\n elif variant_participants < control_participants:\n version = self.VERSION_VARIANT\n\n else:\n version = random.choice([\n self.VERSION_CONTROL,\n self.VERSION_VARIANT,\n ])\n\n # Add new participant to statistics model\n AbTestHourlyLog._increment_stats(self, version, 1, 0)\n\n # If we have now reached the required sample size, end the test\n # Note: we don't care too much that the last few participants won't\n # get a chance to turn into conversions. It's unlikely to make a\n # significant difference to the results.\n # Note: Adding 1 to account for the new participant\n if control_participants + variant_participants + 1 >= self.sample_size:\n self.finish()\n\n return version\n\n def log_conversion(self, version, *, time=None):\n \"\"\"\n Logs when a participant completed the goal.\n\n Note: It's up to the caller to make sure that this doesn't get called more than once\n per participant.\n \"\"\"\n AbTestHourlyLog._increment_stats(self, version, 0, 1, time=time)\n\n def check_for_winner(self):\n \"\"\"\n Performs a Chi-Squared test to check if there is a clear winner.\n\n Returns VERSION_CONTROL or VERSION_VARIANT if there is one. Otherwise, it returns None.\n\n For more information on what the Chi-Squared test does, see:\n https://www.evanmiller.org/ab-testing/chi-squared.html\n https://towardsdatascience.com/a-b-testing-with-chi-squared-test-to-maximize-conversions-and-ctrs-6599271a2c31\n \"\"\"\n # Fetch stats from database\n stats = self.hourly_logs.aggregate(\n control_participants=Sum('participants', filter=Q(version=self.VERSION_CONTROL)),\n control_conversions=Sum('conversions', filter=Q(version=self.VERSION_CONTROL)),\n variant_participants=Sum('participants', filter=Q(version=self.VERSION_VARIANT)),\n variant_conversions=Sum('conversions', filter=Q(version=self.VERSION_VARIANT)),\n )\n control_participants = stats['control_participants'] or 0\n control_conversions = stats['control_conversions'] or 0\n variant_participants = stats['variant_participants'] or 0\n variant_conversions = stats['variant_conversions'] or 0\n\n if not control_conversions and not variant_conversions:\n return\n\n if control_conversions > control_participants or variant_conversions > variant_participants:\n # Something's up. I'm sure it's already clear in the UI what's going on, so let's not crash\n return\n\n # Create a numpy array with values to pass in to Chi-Squared test\n control_failures = control_participants - control_conversions\n variant_failures = variant_participants - variant_conversions\n\n if control_failures == 0 and variant_failures == 0:\n # Prevent this error: \"The internally computed table of expected frequencies has a zero element at (0, 1).\"\n return\n\n T = np.array([[control_conversions, control_failures], [variant_conversions, variant_failures]])\n\n # Perform Chi-Squared test\n p = scipy.stats.chi2_contingency(T, correction=False)[1]\n\n # Check if there is a clear winner\n required_confidence_level = 0.95 # 95%\n if 1 - p > required_confidence_level:\n # There is a clear winner!\n # Return the one with the highest success rate\n if (control_conversions / control_participants) > (variant_conversions / variant_participants):\n return self.VERSION_CONTROL\n else:\n return self.VERSION_VARIANT\n\n def get_status_description(self):\n \"\"\"\n Returns a string that describes the status in more detail.\n \"\"\"\n status = self.get_status_display()\n\n if self.status == AbTest.STATUS_RUNNING:\n participants = self.hourly_logs.aggregate(participants=Sum('participants'))['participants'] or 0\n completeness_percentange = int((participants * 100) / self.sample_size)\n return status + f\" ({completeness_percentange}%)\"\n\n elif self.status in [AbTest.STATUS_FINISHED, AbTest.STATUS_COMPLETED]:\n if self.winning_version == AbTest.VERSION_CONTROL:\n return status + \" (\" + _(\"Control won\") + \")\"\n\n elif self.winning_version == AbTest.VERSION_VARIANT:\n return status + \" (\" + _(\"Variant won\") + \")\"\n\n else:\n return status + \" (\" + _(\"No clear winner\") + \")\"\n\n else:\n return status\n\n\nclass AbTestHourlyLog(models.Model):\n ab_test = models.ForeignKey(AbTest, on_delete=models.CASCADE, related_name='hourly_logs')\n version = models.CharField(max_length=9, choices=AbTest.VERSION_CHOICES)\n date = models.DateField()\n # UTC hour. Values range from 0 to 23\n hour = models.PositiveSmallIntegerField()\n\n # New participants added in this hour\n participants = models.PositiveIntegerField(default=0)\n\n # New or existing participants that converted in this hour\n conversions = models.PositiveIntegerField(default=0)\n\n @classmethod\n def _increment_stats(cls, ab_test, version, participants, conversions, *, time=None):\n \"\"\"\n Increments the participants/conversions statistics for the given ab_test/version.\n\n This will create a new AbTestHourlyLog record if one doesn't exist for the current hour.\n \"\"\"\n time = time.astimezone(tz.utc) if time else datetime.utcnow()\n date = time.date()\n hour = time.hour\n\n if connection.vendor == 'postgresql':\n # Use fast, atomic UPSERT query on PostgreSQL\n with connection.cursor() as cursor:\n table_name = connection.ops.quote_name(cls._meta.db_table)\n query = \"\"\"\n INSERT INTO %s (ab_test_id, version, date, hour, participants, conversions)\n VALUES (%%s, %%s, %%s, %%s, %%s, %%s)\n ON CONFLICT (ab_test_id, version, date, hour)\n DO UPDATE SET participants = %s.participants + %%s, conversions = %s.conversions + %%s;\n \"\"\" % (table_name, table_name, table_name)\n\n cursor.execute(query, [\n ab_test.id,\n version,\n date,\n hour,\n participants,\n conversions,\n participants,\n conversions\n ])\n else:\n # Fall back to running two queries (with small potential for race conditions if things run slowly)\n hourly_log, created = cls.objects.get_or_create(\n ab_test=ab_test,\n version=version,\n date=date,\n hour=hour,\n defaults={\n 'participants': participants,\n 'conversions': conversions,\n }\n )\n\n if not created:\n hourly_log.participants += participants\n hourly_log.conversions += conversions\n hourly_log.save(update_fields=['participants', 'conversions'])\n\n class Meta:\n ordering = ['ab_test', 'version', 'date', 'hour']\n unique_together = [\n ('ab_test', 'version', 'date', 'hour'),\n ]\n\n\n@receiver(page_unpublished)\ndef cancel_on_page_unpublish(instance, **kwargs):\n for ab_test in AbTest.objects.filter(page=instance, status__in=[AbTest.STATUS_DRAFT, AbTest.STATUS_RUNNING, AbTest.STATUS_PAUSED]):\n ab_test.cancel()\n\n for ab_test in AbTest.objects.filter(page=instance, status=AbTest.STATUS_FINISHED):\n ab_test.complete(AbTest.COMPLETION_ACTION_DO_NOTHING)\n" ]
[ [ "numpy.array" ] ]
shenyunhang/JTSM
[ "40cd5ce67d46852402c5bc752960c0e8922993f0" ]
[ "projects/WSL/tools/visualize_json_results.py" ]
[ "#!/usr/bin/env python\n# Copyright (c) Facebook, Inc. and its affiliates.\n\nimport argparse\nimport json\nimport numpy as np\nimport os\nfrom collections import defaultdict\nimport cv2\nimport tqdm\n\nfrom detectron2.data import DatasetCatalog, MetadataCatalog\nfrom detectron2.structures import Boxes, BoxMode, Instances\nfrom detectron2.utils.file_io import PathManager\nfrom detectron2.utils.logger import setup_logger\nfrom detectron2.utils.visualizer import Visualizer\n\nimport wsl.data.datasets\n\n\ndef create_instances(predictions, image_size):\n ret = Instances(image_size)\n\n score = np.asarray([x[\"score\"] for x in predictions])\n chosen = (score > args.conf_threshold).nonzero()[0]\n score = score[chosen]\n bbox = np.asarray([predictions[i][\"bbox\"] for i in chosen]).reshape(-1, 4)\n bbox = BoxMode.convert(bbox, BoxMode.XYWH_ABS, BoxMode.XYXY_ABS)\n\n labels = np.asarray([dataset_id_map(predictions[i][\"category_id\"]) for i in chosen])\n\n ret.scores = score\n ret.pred_boxes = Boxes(bbox)\n ret.pred_classes = labels\n\n try:\n ret.pred_masks = [predictions[i][\"segmentation\"] for i in chosen]\n except KeyError:\n pass\n return ret\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(\n description=\"A script that visualizes the json predictions from COCO or LVIS dataset.\"\n )\n parser.add_argument(\"--input\", required=True, help=\"JSON file produced by the model\")\n parser.add_argument(\"--output\", required=True, help=\"output directory\")\n parser.add_argument(\"--dataset\", help=\"name of the dataset\", default=\"coco_2017_val\")\n parser.add_argument(\"--conf-threshold\", default=0.5, type=float, help=\"confidence threshold\")\n args = parser.parse_args()\n\n logger = setup_logger()\n\n with PathManager.open(args.input, \"r\") as f:\n predictions = json.load(f)\n\n pred_by_image = defaultdict(list)\n for p in predictions:\n pred_by_image[p[\"image_id\"]].append(p)\n\n dicts = list(DatasetCatalog.get(args.dataset))\n metadata = MetadataCatalog.get(args.dataset)\n if hasattr(metadata, \"thing_dataset_id_to_contiguous_id\"):\n\n def dataset_id_map(ds_id):\n return metadata.thing_dataset_id_to_contiguous_id[ds_id]\n\n elif \"lvis\" in args.dataset:\n # LVIS results are in the same format as COCO results, but have a different\n # mapping from dataset category id to contiguous category id in [0, #categories - 1]\n def dataset_id_map(ds_id):\n return ds_id - 1\n\n else:\n raise ValueError(\"Unsupported dataset: {}\".format(args.dataset))\n\n os.makedirs(args.output, exist_ok=True)\n\n for dic in tqdm.tqdm(dicts):\n img = cv2.imread(dic[\"file_name\"], cv2.IMREAD_COLOR)[:, :, ::-1]\n basename = os.path.basename(dic[\"file_name\"])\n\n predictions = create_instances(pred_by_image[dic[\"image_id\"]], img.shape[:2])\n vis = Visualizer(img, metadata)\n vis_pred = vis.draw_instance_predictions(predictions).get_image()\n\n vis = Visualizer(img, metadata)\n vis_gt = vis.draw_dataset_dict(dic).get_image()\n\n concat = np.concatenate((vis_pred, vis_gt), axis=1)\n cv2.imwrite(os.path.join(args.output, basename), concat[:, :, ::-1])\n" ]
[ [ "numpy.concatenate", "numpy.asarray" ] ]
Jonas-Meier/FrustratinglySimpleFsDet
[ "c17af714b0a67e2ba0bfdb079659db48de836fd6" ]
[ "fsdet/modeling/roi_heads/roi_heads.py" ]
[ "\"\"\"Implement ROI_heads.\"\"\"\nimport copy\n\nimport numpy as np\nimport torch\nfrom torch import nn\n\nimport logging\nfrom detectron2.data import MetadataCatalog\nfrom detectron2.layers import ShapeSpec\nfrom detectron2.modeling.backbone.resnet import BottleneckBlock, make_stage\nfrom detectron2.modeling.box_regression import Box2BoxTransform\nfrom detectron2.modeling.matcher import Matcher\nfrom detectron2.modeling.poolers import ROIPooler\nfrom detectron2.modeling.proposal_generator.proposal_utils import add_ground_truth_to_proposals\nfrom detectron2.modeling.sampling import subsample_labels\nfrom detectron2.structures import Boxes, Instances, pairwise_iou\nfrom detectron2.utils.events import get_event_storage\nfrom detectron2.utils.registry import Registry\nfrom typing import Dict, List\n\nfrom .box_head import build_box_head\nfrom .fast_rcnn import ROI_HEADS_OUTPUT_REGISTRY, FastRCNNOutputLayers, FastRCNNOutputs\n\nROI_HEADS_REGISTRY = Registry(\"ROI_HEADS\")\nROI_HEADS_REGISTRY.__doc__ = \"\"\"\nRegistry for ROI heads in a generalized R-CNN model.\nROIHeads take feature maps and region proposals, and\nperform per-region computation.\n\nThe registered object will be called with `obj(cfg, input_shape)`.\nThe call is expected to return an :class:`ROIHeads`.\n\"\"\"\n\nlogger = logging.getLogger(__name__)\n\n\ndef build_roi_heads(cfg, input_shape):\n \"\"\"\n Build ROIHeads defined by `cfg.MODEL.ROI_HEADS.NAME`.\n \"\"\"\n name = cfg.MODEL.ROI_HEADS.NAME\n return ROI_HEADS_REGISTRY.get(name)(cfg, input_shape)\n\n\ndef select_foreground_proposals(proposals, bg_label):\n \"\"\"\n Given a list of N Instances (for N images), each containing a `gt_classes` field,\n return a list of Instances that contain only instances with `gt_classes != -1 &&\n gt_classes != bg_label`.\n\n Args:\n proposals (list[Instances]): A list of N Instances, where N is the number of\n images in the batch.\n bg_label: label index of background class.\n\n Returns:\n list[Instances]: N Instances, each contains only the selected foreground instances.\n list[Tensor]: N boolean vector, correspond to the selection mask of\n each Instances object. True for selected instances.\n \"\"\"\n assert isinstance(proposals, (list, tuple))\n assert isinstance(proposals[0], Instances)\n assert proposals[0].has(\"gt_classes\")\n fg_proposals = []\n fg_selection_masks = []\n for proposals_per_image in proposals:\n gt_classes = proposals_per_image.gt_classes\n fg_selection_mask = (gt_classes != -1) & (gt_classes != bg_label)\n fg_idxs = fg_selection_mask.nonzero().squeeze(1)\n fg_proposals.append(proposals_per_image[fg_idxs])\n fg_selection_masks.append(fg_selection_mask)\n return fg_proposals, fg_selection_masks\n\n\nclass ROIHeads(torch.nn.Module):\n \"\"\"\n ROIHeads perform all per-region computation in an R-CNN.\n\n It contains logic of cropping the regions, extract per-region features,\n and make per-region predictions.\n\n It can have many variants, implemented as subclasses of this class.\n \"\"\"\n\n def __init__(self, cfg, input_shape: Dict[str, ShapeSpec]):\n super(ROIHeads, self).__init__()\n\n # fmt: off\n self.batch_size_per_image = cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE\n self.positive_sample_fraction = cfg.MODEL.ROI_HEADS.POSITIVE_FRACTION\n self.test_score_thresh = cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST\n self.test_nms_thresh = cfg.MODEL.ROI_HEADS.NMS_THRESH_TEST\n self.test_detections_per_img = cfg.TEST.DETECTIONS_PER_IMAGE\n self.in_features = cfg.MODEL.ROI_HEADS.IN_FEATURES\n self.num_classes = cfg.MODEL.ROI_HEADS.NUM_CLASSES\n self.proposal_append_gt = cfg.MODEL.ROI_HEADS.PROPOSAL_APPEND_GT\n self.feature_strides = {k: v.stride for k, v in input_shape.items()}\n self.feature_channels = {k: v.channels for k, v in input_shape.items()}\n self.cls_agnostic_bbox_reg = cfg.MODEL.ROI_BOX_HEAD.CLS_AGNOSTIC_BBOX_REG\n self.smooth_l1_beta = cfg.MODEL.ROI_BOX_HEAD.SMOOTH_L1_BETA\n # fmt: on\n\n # Matcher to assign box proposals to gt boxes\n self.proposal_matcher = Matcher(\n cfg.MODEL.ROI_HEADS.IOU_THRESHOLDS,\n cfg.MODEL.ROI_HEADS.IOU_LABELS,\n allow_low_quality_matches=False,\n )\n\n # Box2BoxTransform for bounding box regression\n self.box2box_transform = Box2BoxTransform(\n weights=cfg.MODEL.ROI_BOX_HEAD.BBOX_REG_WEIGHTS\n )\n\n def _sample_proposals(self, matched_idxs, matched_labels, gt_classes):\n \"\"\"\n Based on the matching between N proposals and M groundtruth,\n sample the proposals and set their classification labels.\n\n Args:\n matched_idxs (Tensor): a vector of length N, each is the best-matched\n gt index in [0, M) for each proposal.\n matched_labels (Tensor): a vector of length N, the matcher's label\n (one of cfg.MODEL.ROI_HEADS.IOU_LABELS) for each proposal.\n gt_classes (Tensor): a vector of length M.\n\n Returns:\n Tensor: a vector of indices of sampled proposals. Each is in [0, N).\n Tensor: a vector of the same length, the classification label for\n each sampled proposal. Each sample is labeled as either a category in\n [0, num_classes) or the background (num_classes).\n \"\"\"\n has_gt = gt_classes.numel() > 0\n # Get the corresponding GT for each proposal\n if has_gt:\n gt_classes = gt_classes[matched_idxs]\n # Label unmatched proposals (0 label from matcher) as background (label=num_classes)\n gt_classes[matched_labels == 0] = self.num_classes\n # Label ignore proposals (-1 label)\n gt_classes[matched_labels == -1] = -1\n else:\n gt_classes = torch.zeros_like(matched_idxs) + self.num_classes\n\n sampled_fg_idxs, sampled_bg_idxs = subsample_labels(\n gt_classes,\n self.batch_size_per_image,\n self.positive_sample_fraction,\n self.num_classes,\n )\n\n sampled_idxs = torch.cat([sampled_fg_idxs, sampled_bg_idxs], dim=0)\n return sampled_idxs, gt_classes[sampled_idxs]\n\n @torch.no_grad()\n def label_and_sample_proposals(self, proposals, targets):\n \"\"\"\n Prepare some proposals to be used to train the ROI heads.\n It performs box matching between `proposals` and `targets`, and assigns\n training labels to the proposals.\n It returns `self.batch_size_per_image` random samples from proposals and groundtruth boxes,\n with a fraction of positives that is no larger than `self.positive_sample_fraction.\n\n Args:\n See :meth:`ROIHeads.forward`\n\n Returns:\n list[Instances]:\n length `N` list of `Instances`s containing the proposals\n sampled for training. Each `Instances` has the following fields:\n - proposal_boxes: the proposal boxes\n - gt_boxes: the ground-truth box that the proposal is assigned to\n (this is only meaningful if the proposal has a label > 0; if label = 0\n then the ground-truth box is random)\n Other fields such as \"gt_classes\" that's included in `targets`.\n \"\"\"\n gt_boxes = [x.gt_boxes for x in targets]\n # Augment proposals with ground-truth boxes.\n # In the case of learned proposals (e.g., RPN), when training starts\n # the proposals will be low quality due to random initialization.\n # It's possible that none of these initial\n # proposals have high enough overlap with the gt objects to be used\n # as positive examples for the second stage components (box head,\n # cls head). Adding the gt boxes to the set of proposals\n # ensures that the second stage components will have some positive\n # examples from the start of training. For RPN, this augmentation improves\n # convergence and empirically improves box AP on COCO by about 0.5\n # points (under one tested configuration).\n if self.proposal_append_gt:\n proposals = add_ground_truth_to_proposals(gt_boxes, proposals)\n\n proposals_with_gt = []\n\n num_fg_samples = []\n num_bg_samples = []\n for proposals_per_image, targets_per_image in zip(proposals, targets):\n has_gt = len(targets_per_image) > 0\n match_quality_matrix = pairwise_iou(\n targets_per_image.gt_boxes, proposals_per_image.proposal_boxes\n )\n matched_idxs, matched_labels = self.proposal_matcher(\n match_quality_matrix\n )\n sampled_idxs, gt_classes = self._sample_proposals(\n matched_idxs, matched_labels, targets_per_image.gt_classes\n )\n\n # Set target attributes of the sampled proposals:\n proposals_per_image = proposals_per_image[sampled_idxs]\n proposals_per_image.gt_classes = gt_classes\n\n # We index all the attributes of targets that start with \"gt_\"\n # and have not been added to proposals yet (=\"gt_classes\").\n if has_gt:\n sampled_targets = matched_idxs[sampled_idxs]\n # NOTE: here the indexing waste some compute, because heads\n # will filter the proposals again (by foreground/background,\n # etc), so we essentially index the data twice.\n for (\n trg_name,\n trg_value,\n ) in targets_per_image.get_fields().items():\n if trg_name.startswith(\n \"gt_\"\n ) and not proposals_per_image.has(trg_name):\n proposals_per_image.set(\n trg_name, trg_value[sampled_targets]\n )\n else:\n gt_boxes = Boxes(\n targets_per_image.gt_boxes.tensor.new_zeros(\n (len(sampled_idxs), 4)\n )\n )\n proposals_per_image.gt_boxes = gt_boxes\n\n num_bg_samples.append(\n (gt_classes == self.num_classes).sum().item()\n )\n num_fg_samples.append(gt_classes.numel() - num_bg_samples[-1])\n proposals_with_gt.append(proposals_per_image)\n\n # Log the number of fg/bg samples that are selected for training ROI heads\n storage = get_event_storage()\n storage.put_scalar(\"roi_head/num_fg_samples\", np.mean(num_fg_samples))\n storage.put_scalar(\"roi_head/num_bg_samples\", np.mean(num_bg_samples))\n\n return proposals_with_gt\n\n def forward(self, images, features, proposals, targets=None):\n \"\"\"\n Args:\n images (ImageList):\n features (dict[str: Tensor]): input data as a mapping from feature\n map name to tensor. Axis 0 represents the number of images `N` in\n the input data; axes 1-3 are channels, height, and width, which may\n vary between feature maps (e.g., if a feature pyramid is used).\n proposals (list[Instances]): length `N` list of `Instances`s. The i-th\n `Instances` contains object proposals for the i-th input image,\n with fields \"proposal_boxes\" and \"objectness_logits\".\n targets (list[Instances], optional): length `N` list of `Instances`s. The i-th\n `Instances` contains the ground-truth per-instance annotations\n for the i-th input image. Specify `targets` during training only.\n It may have the following fields:\n - gt_boxes: the bounding box of each instance.\n - gt_classes: the label for each instance with a category ranging in [0, #class].\n\n Returns:\n results (list[Instances]): length `N` list of `Instances`s containing the\n detected instances. Returned during inference only; may be []\n during training.\n losses (dict[str: Tensor]): mapping from a named loss to a tensor\n storing the loss. Used during training only.\n \"\"\"\n raise NotImplementedError()\n\n\n@ROI_HEADS_REGISTRY.register()\nclass Res5ROIHeads(ROIHeads):\n \"\"\"\n The ROIHeads in a typical \"C4\" R-CNN model, where the heads share the\n cropping and the per-region feature computation by a Res5 block.\n \"\"\"\n\n def __init__(self, cfg, input_shape):\n super().__init__(cfg, input_shape)\n\n assert len(self.in_features) == 1\n\n # fmt: off\n pooler_resolution = cfg.MODEL.ROI_BOX_HEAD.POOLER_RESOLUTION\n pooler_type = cfg.MODEL.ROI_BOX_HEAD.POOLER_TYPE\n pooler_scales = (1.0 / self.feature_strides[self.in_features[0]], )\n sampling_ratio = cfg.MODEL.ROI_BOX_HEAD.POOLER_SAMPLING_RATIO\n # fmt: on\n assert not cfg.MODEL.KEYPOINT_ON\n\n self.pooler = ROIPooler(\n output_size=pooler_resolution,\n scales=pooler_scales,\n sampling_ratio=sampling_ratio,\n pooler_type=pooler_type,\n )\n\n self.res5, out_channels = self._build_res5_block(cfg)\n output_layer = cfg.MODEL.ROI_HEADS.OUTPUT_LAYER\n self.box_predictor = ROI_HEADS_OUTPUT_REGISTRY.get(output_layer)(\n cfg, out_channels, self.num_classes, self.cls_agnostic_bbox_reg\n )\n\n def _build_res5_block(self, cfg):\n # fmt: off\n stage_channel_factor = 2 ** 3 # res5 is 8x res2\n num_groups = cfg.MODEL.RESNETS.NUM_GROUPS\n width_per_group = cfg.MODEL.RESNETS.WIDTH_PER_GROUP\n bottleneck_channels = num_groups * width_per_group * stage_channel_factor\n out_channels = cfg.MODEL.RESNETS.RES2_OUT_CHANNELS * stage_channel_factor\n stride_in_1x1 = cfg.MODEL.RESNETS.STRIDE_IN_1X1\n norm = cfg.MODEL.RESNETS.NORM\n assert not cfg.MODEL.RESNETS.DEFORM_ON_PER_STAGE[-1], \\\n \"Deformable conv is not yet supported in res5 head.\"\n # fmt: on\n\n blocks = make_stage(\n BottleneckBlock,\n 3,\n first_stride=2,\n in_channels=out_channels // 2,\n bottleneck_channels=bottleneck_channels,\n out_channels=out_channels,\n num_groups=num_groups,\n norm=norm,\n stride_in_1x1=stride_in_1x1,\n )\n return nn.Sequential(*blocks), out_channels\n\n def _shared_roi_transform(self, features, boxes):\n x = self.pooler(features, boxes)\n return self.res5(x)\n\n def forward(self, images, features, proposals, targets=None):\n \"\"\"\n See :class:`ROIHeads.forward`.\n \"\"\"\n del images\n\n if self.training:\n proposals = self.label_and_sample_proposals(proposals, targets)\n del targets\n\n proposal_boxes = [x.proposal_boxes for x in proposals]\n box_features = self._shared_roi_transform(\n [features[f] for f in self.in_features], proposal_boxes\n )\n feature_pooled = box_features.mean(dim=[2, 3]) # pooled to 1x1\n pred_class_logits, pred_proposal_deltas = self.box_predictor(\n feature_pooled\n )\n del feature_pooled\n\n outputs = FastRCNNOutputs(\n self.box2box_transform,\n pred_class_logits,\n pred_proposal_deltas,\n proposals,\n self.smooth_l1_beta,\n )\n\n if self.training:\n del features\n losses = outputs.losses()\n return [], losses\n else:\n pred_instances, _ = outputs.inference(\n self.test_score_thresh,\n self.test_nms_thresh,\n self.test_detections_per_img,\n )\n return pred_instances, {}\n\n\n@ROI_HEADS_REGISTRY.register()\nclass StandardROIHeads(ROIHeads):\n \"\"\"\n It's \"standard\" in a sense that there is no ROI transform sharing\n or feature sharing between tasks.\n The cropped rois go to separate branches directly.\n This way, it is easier to make separate abstractions for different branches.\n\n This class is used by most models, such as FPN and C5.\n To implement more models, you can subclass it and implement a different\n :meth:`forward()` or a head.\n \"\"\"\n\n def __init__(self, cfg, input_shape):\n super(StandardROIHeads, self).__init__(cfg, input_shape)\n self._init_box_head(cfg)\n\n def _init_box_head(self, cfg):\n # fmt: off\n pooler_resolution = cfg.MODEL.ROI_BOX_HEAD.POOLER_RESOLUTION\n pooler_scales = tuple(1.0 / self.feature_strides[k] for k in self.in_features)\n sampling_ratio = cfg.MODEL.ROI_BOX_HEAD.POOLER_SAMPLING_RATIO\n pooler_type = cfg.MODEL.ROI_BOX_HEAD.POOLER_TYPE\n # fmt: on\n\n # If StandardROIHeads is applied on multiple feature maps (as in FPN),\n # then we share the same predictors and therefore the channel counts must be the same\n in_channels = [self.feature_channels[f] for f in self.in_features]\n # Check all channel counts are equal\n assert len(set(in_channels)) == 1, in_channels\n in_channels = in_channels[0]\n\n self.box_pooler = ROIPooler(\n output_size=pooler_resolution,\n scales=pooler_scales,\n sampling_ratio=sampling_ratio,\n pooler_type=pooler_type,\n )\n # Here we split \"box head\" and \"box predictor\", which is mainly due to historical reasons.\n # They are used together so the \"box predictor\" layers should be part of the \"box head\".\n # New subclasses of ROIHeads do not need \"box predictor\"s.\n self.box_head = build_box_head(\n cfg,\n ShapeSpec(\n channels=in_channels,\n height=pooler_resolution,\n width=pooler_resolution,\n ),\n )\n output_layer = cfg.MODEL.ROI_HEADS.OUTPUT_LAYER\n self.box_predictor = ROI_HEADS_OUTPUT_REGISTRY.get(output_layer)(\n cfg,\n self.box_head.output_size,\n self.num_classes,\n self.cls_agnostic_bbox_reg,\n )\n\n def forward(self, images, features, proposals, targets=None):\n \"\"\"\n See :class:`ROIHeads.forward`.\n \"\"\"\n del images\n if self.training:\n proposals = self.label_and_sample_proposals(proposals, targets)\n del targets\n\n features_list = [features[f] for f in self.in_features]\n\n if self.training:\n losses = self._forward_box(features_list, proposals)\n return proposals, losses\n else:\n pred_instances = self._forward_box(features_list, proposals)\n return pred_instances, {}\n\n def _forward_box(self, features, proposals):\n \"\"\"\n Forward logic of the box prediction branch.\n\n Args:\n features (list[Tensor]): #level input features for box prediction\n proposals (list[Instances]): the per-image object proposals with\n their matching ground truth.\n Each has fields \"proposal_boxes\", and \"objectness_logits\",\n \"gt_classes\", \"gt_boxes\".\n\n Returns:\n In training, a dict of losses.\n In inference, a list of `Instances`, the predicted instances.\n \"\"\"\n box_features = self.box_pooler(\n features, [x.proposal_boxes for x in proposals]\n )\n box_features = self.box_head(box_features)\n pred_class_logits, pred_proposal_deltas = self.box_predictor(\n box_features\n )\n del box_features\n\n outputs = FastRCNNOutputs(\n self.box2box_transform,\n pred_class_logits,\n pred_proposal_deltas,\n proposals,\n self.smooth_l1_beta,\n )\n if self.training:\n return outputs.losses()\n else:\n pred_instances, _ = outputs.inference(\n self.test_score_thresh,\n self.test_nms_thresh,\n self.test_detections_per_img,\n )\n return pred_instances\n\n\n@ROI_HEADS_REGISTRY.register()\nclass StandardROIMultiHeads(StandardROIHeads):\n \"\"\"\n Same as StandardROIHeads but allows for using multiple heads (e.g. different heads for base classes and novel\n classes)\n \"\"\"\n def __init__(self, cfg, input_shape):\n super(StandardROIMultiHeads, self).__init__(cfg, input_shape)\n\n def _init_box_head(self, cfg):\n # fmt: off\n self.cpu_device = torch.device(\"cpu\")\n self.device = torch.device(cfg.MODEL.DEVICE)\n pooler_resolution = cfg.MODEL.ROI_BOX_HEAD.POOLER_RESOLUTION\n pooler_scales = tuple(1.0 / self.feature_strides[k] for k in self.in_features)\n sampling_ratio = cfg.MODEL.ROI_BOX_HEAD.POOLER_SAMPLING_RATIO\n pooler_type = cfg.MODEL.ROI_BOX_HEAD.POOLER_TYPE\n self.num_head_classes = cfg.MODEL.ROI_HEADS.MULTIHEAD_NUM_CLASSES # classes per head\n self.num_heads = cfg.MODEL.ROI_BOX_HEAD.NUM_HEADS\n # Dataset names because we need the appropriate metadata to obtain the correct class indices for each head!\n self.train_dataset_name = cfg.DATASETS.TRAIN[0]\n self.test_dataset_name = cfg.DATASETS.TEST[0]\n # fmt: on\n\n assert self.num_classes == sum(self.num_head_classes)\n # If StandardROIHeads is applied on multiple feature maps (as in FPN),\n # then we share the same predictors and therefore the channel counts must be the same\n in_channels = [self.feature_channels[f] for f in self.in_features]\n # Check all channel counts are equal\n assert len(set(in_channels)) == 1, in_channels\n in_channels = in_channels[0]\n\n self.box_pooler = ROIPooler(\n output_size=pooler_resolution,\n scales=pooler_scales,\n sampling_ratio=sampling_ratio,\n pooler_type=pooler_type,\n )\n # Here we split \"box head\" and \"box predictor\", which is mainly due to historical reasons.\n # They are used together so the \"box predictor\" layers should be part of the \"box head\".\n # New subclasses of ROIHeads do not need \"box predictor\"s.\n self.box_head = build_box_head( # TODO: probably force 'FastRCNNConvFCMultiHead'?\n cfg,\n ShapeSpec(\n channels=in_channels,\n height=pooler_resolution,\n width=pooler_resolution,\n ),\n )\n output_layer = cfg.MODEL.ROI_HEADS.OUTPUT_LAYER\n\n self.box_predictors = []\n bbox_head_output_size = self.box_head.output_size\n if self.num_heads > 1:\n bbox_head_output_size //= self.num_heads\n for i in range(self.num_heads):\n box_predictor = ROI_HEADS_OUTPUT_REGISTRY.get(output_layer)(\n cfg,\n bbox_head_output_size,\n self.num_head_classes[i],\n self.cls_agnostic_bbox_reg,\n )\n self.add_module(\"box_predictor{}\".format(i+1), box_predictor)\n self.box_predictors.append(box_predictor)\n\n def _get_ind_mappings(self) -> List[Dict]:\n # Target indices range from 0 to 'cfg.MODEL.ROI_HEADS.NUM_CLASSES', but we here need, for each head i:\n # a mapping from old index to range 0 to 'cfg.MODEL.ROI_HEADS.MULTIHEAD_NUM_CLASSES[i]'\n # Expected output: List(dict(int:int)), the list is expected to have one dict per head. Each dict is expected to\n # map the large index of a class (from the single head) to the index used on this small head\n # Note: don't forget (for each head!) to map the background class (last index, not index 0!) to the last index\n # of this head's classes! (use self.num_head_classes[i] to access the amount of classes for head i)\n raise NotImplementedError\n\n def _forward_box(self, features, proposals):\n \"\"\"\n Forward logic of the box prediction branch.\n Args:\n features (list[Tensor]): #level input features for box prediction\n proposals (list[Instances]): the per-image object proposals with\n their matching ground truth.\n Each has fields \"proposal_boxes\", and \"objectness_logits\",\n \"gt_classes\", \"gt_boxes\".\n Returns:\n In training, a dict of losses.\n In inference, a list of `Instances`, the predicted instances.\n \"\"\"\n # pooled features, result size is (e.g. [512, 256, 7, 7])\n # [MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE,\n # MODEL.FPN.OUT_CHANNELS?,\n # MODEL.ROI_BOX_HEAD.POOLER_RESOLUTION,\n # MODEL.ROI_BOX_HEAD.POOLER_RESOLUTION]\n box_features = self.box_pooler(\n features, [x.proposal_boxes for x in proposals]\n )\n # class-agnostic per-roi feature vectors, same size for each head\n # result is a list with '#heads' elements each of size\n # [ROI_HEADS.BATCH_SIZE_PER_IMAGE * SOLVER.IMS_PER_BATCH, MODEL.ROI_BOX_HEAD.FC_DIM], e.g. [8192, 1024]\n box_features = self.box_head(box_features)\n assert len(box_features) == len(self.box_predictors) == self.num_heads, \\\n \"box_features output should match the amount of box predictors: {}, {}\"\\\n .format(len(box_features), len(self.box_predictors))\n\n # class-dependent logits and bbox deltas\n class_logits, proposal_deltas = [], []\n for i, box_predictor in enumerate(self.box_predictors):\n # pred_class_logits = [ROI_HEADS.BATCH_SIZE_PER_IMAGE * SOLVER.IMS_PER_BATCH, num_classes + 1]\n # pred_proposal_deltas =\n # class-agnostic: [ROI_HEADS.BATCH_SIZE_PER_IMAGE * SOLVER.IMS_PER_BATCH, 4]\n # non cag: [ROI_HEADS.BATCH_SIZE_PER_IMAGE * SOLVER.IMS_PER_BATCH, 4 x num_classes] Note: not num_classes + 1!\n pred_class_logits, pred_proposal_deltas = box_predictor(box_features[i])\n\n class_logits.append(pred_class_logits)\n proposal_deltas.append(pred_proposal_deltas)\n del box_features\n\n # Assumptions:\n # - 'box_features'-output from box_head is class-agnostic (ans same-sized for each head!), we can't do anything\n # there!\n # - we use those features to obtain class-dependent activations (correct amount of target size is ensured by\n # each 'predictor head'!\n # - for softmax calculation, we have to compare those activations against targets, which we obtain from\n # the variable 'proposals', which contains objectness score (from RPN) and gt-class (and gt-boxes)\n # - those gt-data from the variable 'proposals' uses all available classes (and thus indices from\n # 0 to num_classes), we then need to transform those indices to appropriate indices for each head (and need\n # to remember which number we mapped to which number at which head because each single head expects numbers\n # or indices starting by 0, so our mapping destroys the unique numbers!\n # - we now have multiple possibilities what to do with our proposals: first of all, we decide to merge classes\n # after softmax and to not merge the activations before the softmax. This would allow to skip the\n # index-mapping but would also cause another problem: since each head produces background logits ans the\n # final head, applying softmax on activations of all classes together just expects a single background class,\n # so which of the background activations to choose and which to discard? This is a non-trivial problem and\n # because of that, we choose to first apply softmax to each head and then merging the resulting class\n # probabilities. We now assume wlog (without loss of generality) that we have batch-size 16 and using\n # 512 rois per batch yielding 8192 rois per batch (after roi pooling)\n # - we could now take the Proposals and split them, depending on the target classes. In addition to this\n # technique, we would probably want to use the background class activations for each head. If we think this\n # idea a while further, we note that splitting of proposals into different heads does not make sense.\n # We first note that each head i itself produces [8192, num_classes[i] + 1] classification logits because\n # each head obtains 8192 rois as input (because the classification head splits after roi pooling, therefore\n # each head encounters the same amount of input). For that matter, we either have to remove objects\n # belonging to non-target classes at both sides, at feature side (class and box logits from the predictor)\n # and at proposal-side (proposals from the RPN where GT-class is known), while keeping background class\n # logits at EACH head.\n # - another, and possibly more sophisticated, yet more simple, approach would be to use all proposals for\n # each head with a little need in modification: at each head, change the target-class (gt-class) of the\n # proposals for non-target classes of this head (not counting background class!) to 0. This means,\n # non-target classes equal the background class.\n # (Note: at Detectron2, the Background class is not the class with first index (0), but the class with\n # last index (|num_classes|)!)\n # Note: For training, we don't have to transform the indices back to the original indices because we're\n # just interested in the loss which is automatically calculated correctly since the produced logits are\n # yet in the correct shape and the adjusted class indices are automatically transferred into one-hot\n # vectors for the classification loss (e.g. Cross Entropy). Therefore, we do not need back-transformation\n # because we're done after calculating the loss.\n # - Inference: In contrast to training, we (of course) have not gt-annotations, therefore we cannot prepare or\n # adjust the class of proposals. We don't even have to because we don't want to calculate losses. In contrast\n # to the training however, we now need postprocessing of predicted classes after having calculated softmax\n # probabilities because we need to know which class belongs to the highest probability for each proposal.\n # In contrast to single-heads, we now have #heads predictions for each proposal because we input ALL\n # proposals to each head. This could be problematic if we think of a case where for a single proposal one\n # head produces a medium high confidence for an actual class (not background) and another head outputs high\n # background confidence for that proposal (because it learned the target classes from different head as\n # background class for itself). Probably this problem isn't an actual issue because the \"Fast-RCNN\"-Head\n # won't output bbox predictions for Background class which would leave us with just a single valid prediction\n # for that proposal (with medium confidence).\n\n # Proposals: contains 'SOLVER.IMS_PER_BATCH' elements of type detectron2.structures.Instances\n # Access elements of list 'proposals' with indices.\n # Access the elements of 'Instances' with '.get_fields()', or directly with '.tensor'\n # Access the tensor (which 'Boxes' wraps) with boxes.tensor\n # Note: Boxes supports __getitem__ using slices, indices, boolean arrays, etc.)\n # Each Instance contains following fields of following sizes:\n # 'proposal_boxes': detectron2.structures.Boxes(tensor) of size [MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE, 4]\n # 'objectness_logits': tensor of size [MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE]\n # 'gt_classes': tensor of size [MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE]\n # 'gt_boxes': detectron2.structures.Boxes(tensor) of size [MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE, 4]\n # 'Proposals': (objectness logits + gt classes)\n # 'box_features': (pooled roi feature put forward though the net)\n\n # Algorithm:\n # (Synopsis: we use ALL proposals for each head and just need to set non-target classes to 0 (==Background))\n # 1. For Training, for each head i\n heads_proposals = []\n if self.training:\n all_inds_to_head_inds_list = self._get_ind_mappings()\n # 1.1 For each head i\n for i in range(len(self.box_predictors)):\n # 1.1.1 Take a copy of all Proposals, take the target categories of head i\n # list of #ROI_HEADS.BATCH_SIZE_PER_IMAGE Proposal-objects, each comprising\n # ROI_HEADS.BATCH_SIZE_PER_IMAGE proposals\n tmp_proposals = copy.deepcopy(proposals)\n all_inds_to_head_inds = all_inds_to_head_inds_list[i]\n all_bg_cls = self.num_classes\n head_bg_cls = self.num_head_classes[i]\n assert all_bg_cls in all_inds_to_head_inds and all_inds_to_head_inds[all_bg_cls] == head_bg_cls\n head_targets = list(all_inds_to_head_inds.keys())\n # Note: as of 'fast_rcnn'-doc, [0, num_cls) are foreground and |num_cls| is background!\n for instances in tmp_proposals:\n gt_classes = instances.gt_classes # ==instances.get_fields()['gt_classes']\n # 1.1.2 Set the class of the j-th proposal to background class if its not a target class\n # TODO: not sure about copying the tensor to host memory but torch currently does not support\n # the 'isin' function on its own...\n bg_indices = np.isin(gt_classes.to(self.cpu_device), head_targets, invert=True).nonzero()\n # using \"all classes\" background class, which is later transformed to appropriate background\n # class for this head\n gt_classes[bg_indices] = all_bg_cls\n # 1.1.3 If proposal j is a proposal for a target class, transform its class to range\n # [0, num_classes[i]]\n # Note: apply_ may only be used for cpu-tensors!, so we have move it to cpu temporarily\n # TODO: 'apply_' might be slow since it's not easily parallelisable\n gt_classes = gt_classes.to(self.cpu_device) # move to CPU temporarily\n gt_classes.apply_(lambda x: all_inds_to_head_inds[x]) # apply_ works inplace!\n instances.gt_classes = gt_classes.to(self.device) # move back to GPU and override object attribute\n heads_proposals.append(tmp_proposals)\n else:\n # 2.1 Pass all proposals to all heads\n for i in range(len(self.box_predictors)):\n heads_proposals.append(proposals)\n\n # Initialize 'FastRCNNOutputs'-object, nothing more!\n heads_outputs = []\n for i in range(len(self.box_predictors)):\n heads_outputs.append(\n FastRCNNOutputs(\n self.box2box_transform,\n class_logits[i],\n proposal_deltas[i],\n heads_proposals[i],\n self.smooth_l1_beta,\n )\n )\n\n if self.training:\n # calculate losses e.g.\n # 'softmax cross entropy' on pred_class_logits (\"loss_cls\": self.softmax_cross_entropy_loss())\n # 'smooth L1 loss' on pred_proposal_deltas (\"loss_box_reg\": self.smooth_l1_loss())\n # Note: we don't need to transform any classes back to previous range because we're just interested in the\n # loss. The gt-class (index in range of each head's target classes) will be automatically transformed to a\n # one-hot vector which is sufficient to calculate the loss at each output neuron for each target class.\n # We would just need to transform the categories back of we were interested in the name of each detection's\n # class (as we are for inference).\n losses_dicts = {}\n for i, outputs in enumerate(heads_outputs):\n losses_dict = outputs.losses()\n for k, v in losses_dict.items():\n losses_dicts[str(k) + \"_\" + str(i+1)] = v\n del losses_dict\n return losses_dicts\n else:\n pred_instances = []\n all_inds_to_head_inds_list = self._get_ind_mappings()\n for i, outputs in enumerate(heads_outputs):\n tmp_pred_instances, _ = outputs.inference(\n self.test_score_thresh,\n self.test_nms_thresh,\n self.test_detections_per_img, # TODO: problem in multi-head: detections_per_image_per_head?\n )\n # 2.2 After softmax, transform class of proposals back to range [0, all_classes]\n all_inds_to_head_inds = all_inds_to_head_inds_list[i]\n head_ind_to_ind = {v: k for k, v in all_inds_to_head_inds.items()}\n # 'tmp_pred_instances' is a list of 'Instances'-objects, one object for each image\n for instances in tmp_pred_instances:\n # Note: at inference, this method is called once for each image, thus, |proposals| == 1\n # probably it is problematic to add one 'Instances'-object per head since the returned list has\n # twice the size as expected, probably, the remaining objects in the list are ignored!\n # slow but ok for inference.\n pred_classes = instances.pred_classes.to(self.cpu_device) # move to cpu because of method 'apply_'\n pred_classes.apply_(lambda x: head_ind_to_ind[x]) # element-wise inplace transformation\n instances.pred_classes = pred_classes.to(self.device) # move back to gpu and set object attribute\n pred_instances.append(tmp_pred_instances)\n # num images == len(proposals), where 'proposals' is the same in the list 'heads_proposals'\n # pred_instances = [num_heads, num_images], but we need [num images]\n # [num_heads, num_images] -> [num_images, num_heads], then concatenate all 'Instances'-objects for a single\n # image\n return [Instances.cat(list(x)) for x in zip(*pred_instances)]\n\n\n@ROI_HEADS_REGISTRY.register()\nclass StandardROIDoubleHeads(StandardROIMultiHeads):\n \"\"\"\n Same as StandardROIMultiHeads but using exactly two heads (for base classes and novel classes)\n \"\"\"\n def __init__(self, cfg, input_shape):\n super(StandardROIDoubleHeads, self).__init__(cfg, input_shape)\n assert self.num_heads == 2, \"To use Double-Head set num_heads to 2!\"\n assert self.box_head.split_at_fc == 2, \\\n \"Current ckpt_surgery requires a fixed amount of fc layers as well as a firm split index of 2!\"\n\n def _get_ind_mappings(self):\n dataset = self.train_dataset_name if self.training else self.test_dataset_name # classes should normally be the same...\n metadata = MetadataCatalog.get(dataset)\n # For now, we use this kind of head solely for fine-tuning\n assert hasattr(metadata, 'novel_dataset_id_to_contiguous_id')\n assert hasattr(metadata, 'base_dataset_id_to_contiguous_id')\n all_id_to_inds = metadata.thing_dataset_id_to_contiguous_id\n base_id_to_inds = metadata.base_dataset_id_to_contiguous_id\n novel_id_to_inds = metadata.novel_dataset_id_to_contiguous_id\n all_inds_to_base_inds = {v: base_id_to_inds[k] for k, v in all_id_to_inds.items() if k in base_id_to_inds.keys()}\n all_inds_to_novel_inds = {v: novel_id_to_inds[k] for k, v in all_id_to_inds.items() if k in novel_id_to_inds.keys()}\n # For each head, add a mapping from old background class index to each head's background class index\n all_bg_ind = len(all_id_to_inds)\n base_bg_ind = len(base_id_to_inds)\n novel_bg_ind = len(novel_id_to_inds)\n assert all_bg_ind not in all_id_to_inds.values()\n assert base_bg_ind not in base_id_to_inds.values()\n assert novel_bg_ind not in novel_id_to_inds.values()\n all_inds_to_base_inds[all_bg_ind] = base_bg_ind\n all_inds_to_novel_inds[all_bg_ind] = novel_bg_ind\n return [all_inds_to_base_inds, all_inds_to_novel_inds]\n" ]
[ [ "torch.device", "torch.cat", "torch.nn.Sequential", "torch.no_grad", "numpy.mean", "torch.zeros_like" ] ]
predictive-analytics-lab/pal-bolts
[ "5f1932f351f2e551276b47dfeda7888772d99895" ]
[ "conduit/data/datasets/utils.py" ]
[ "from collections.abc import Mapping\nfrom dataclasses import fields, is_dataclass\nfrom functools import lru_cache\nimport logging\nfrom multiprocessing.context import BaseContext\nfrom pathlib import Path\nimport platform\nimport subprocess\nfrom typing import (\n Any,\n Callable,\n List,\n NamedTuple,\n Optional,\n Sequence,\n Tuple,\n Union,\n cast,\n overload,\n)\nfrom zipfile import BadZipFile\n\nfrom PIL import Image\nimport albumentations as A\nimport cv2\nimport numpy as np\nimport numpy.typing as npt\nfrom ranzen.misc import gcopy\nimport torch\nfrom torch import Tensor\nfrom torch.utils.data import ConcatDataset, Dataset, Subset\nfrom torch.utils.data._utils.collate import (\n default_collate_err_msg_format,\n np_str_obj_array_pattern,\n string_classes,\n)\nfrom torch.utils.data.dataloader import DataLoader, _worker_init_fn_t\nfrom torch.utils.data.sampler import Sampler\nfrom torchvision.datasets.utils import download_url, extract_archive\nfrom torchvision.transforms import functional as TF\nfrom typing_extensions import Final, Literal, TypeAlias, get_args\n\nfrom conduit.data.datasets.base import CdtDataset, D\nfrom conduit.data.structures import BinarySample, NamedSample, SampleBase, TernarySample\n\n__all__ = [\n \"AlbumentationsTform\",\n \"AudioTform\",\n \"CdtDataLoader\",\n \"GdriveFileInfo\",\n \"ImageLoadingBackend\",\n \"ImageTform\",\n \"PillowTform\",\n \"RawImage\",\n \"UrlFileInfo\",\n \"apply_image_transform\",\n \"check_integrity\",\n \"download_from_gdrive\",\n \"download_from_url\",\n \"extract_base_dataset\",\n \"extract_labels_from_dataset\",\n \"get_group_ids\",\n \"img_to_tensor\",\n \"infer_al_backend\",\n \"infer_il_backend\",\n \"load_image\",\n \"make_subset\",\n \"pb_collate\",\n]\n\n\nImageLoadingBackend: TypeAlias = Literal[\"opencv\", \"pillow\"]\n\n\nRawImage: TypeAlias = Union[npt.NDArray[np.integer], Image.Image]\n\n\n@overload\ndef load_image(filepath: Union[Path, str], *, backend: Literal[\"opencv\"] = ...) -> np.ndarray:\n ...\n\n\n@overload\ndef load_image(filepath: Union[Path, str], *, backend: Literal[\"pillow\"] = ...) -> Image.Image:\n ...\n\n\ndef load_image(filepath: Union[Path, str], *, backend: ImageLoadingBackend = \"opencv\") -> RawImage:\n \"\"\"Load an image from disk using the requested backend.\n\n :param: The path of the image-file to be loaded.\n :param backend: Backed to use for loading the image: either 'opencv' or 'pillow'.\n\n :returns: The loaded image file as a numpy array if 'opencv' was the selected backend\n and a PIL image otherwise.\n \"\"\"\n if backend == \"opencv\":\n if isinstance(filepath, Path):\n # cv2 can only read string filepaths\n filepath = str(filepath)\n image = cv2.imread(filepath) # type: ignore\n if image is None:\n raise OSError(f\"Image-file could not be read from location '{filepath}'\")\n return cv2.cvtColor(image, cv2.COLOR_BGR2RGB) # type: ignore\n return Image.open(filepath)\n\n\nAlbumentationsTform: TypeAlias = Union[A.Compose, A.BasicTransform]\nPillowTform: TypeAlias = Callable[[Image.Image], Any]\nImageTform: TypeAlias = Union[AlbumentationsTform, PillowTform]\n\n\ndef infer_il_backend(transform: Optional[ImageTform]) -> ImageLoadingBackend:\n \"\"\"Infer which image-loading backend to use based on the type of the image-transform.\n\n :param transform: The image transform from which to infer the image-loading backend.\n If the transform is derived from Albumentations, then 'opencv' will be selected as the\n backend, else 'pillow' will be selected.\n\n :returns: The backend to load images with based on the supplied image-transform: either\n 'opencv' or 'pillow'.\n \"\"\"\n # Default to openccv is transform is None as numpy arrays are generally\n # more tractable\n if transform is None or isinstance(transform, get_args(AlbumentationsTform)):\n return \"opencv\"\n return \"pillow\"\n\n\ndef apply_image_transform(\n image: RawImage, *, transform: Optional[ImageTform]\n) -> Union[RawImage, Image.Image, Tensor]:\n image_ = image\n if transform is not None:\n if isinstance(transform, (A.Compose, A.BasicTransform)):\n if isinstance(image, Image.Image):\n image = np.array(image)\n image_ = transform(image=image)[\"image\"]\n else:\n if isinstance(image, np.ndarray):\n image = Image.fromarray(image)\n image_ = transform(image)\n return image_\n\n\ndef img_to_tensor(img: Union[Image.Image, np.ndarray]) -> Tensor:\n if isinstance(img, Image.Image):\n return TF.pil_to_tensor(img)\n return torch.from_numpy(\n np.moveaxis(img / (255.0 if img.dtype == np.uint8 else 1), -1, 0).astype(np.float32)\n )\n\n\nAudioLoadingBackend: TypeAlias = Literal[\"sox_io\", \"soundfile\"]\n\n\ndef infer_al_backend() -> AudioLoadingBackend:\n \"\"\"Infer which audio-loading backend to use based on the operating system.\"\"\"\n soundfile: Final = \"soundfile\"\n sox: Final = \"sox_io\"\n return soundfile if platform.system() == \"Windows\" else sox\n\n\nAudioTform: TypeAlias = Callable[[Tensor], Tensor]\n\n\ndef apply_audio_transform(waveform: Tensor, *, transform: Optional[AudioTform]) -> Tensor:\n return waveform if transform is None else transform(waveform)\n\n\n@overload\ndef extract_base_dataset(\n dataset: Dataset, *, return_subset_indices: Literal[True] = ...\n) -> Tuple[Dataset, Union[Tensor, slice]]:\n ...\n\n\n@overload\ndef extract_base_dataset(\n dataset: Dataset, *, return_subset_indices: Literal[False] = ...\n) -> Dataset:\n ...\n\n\ndef extract_base_dataset(\n dataset: Dataset, *, return_subset_indices: bool = True\n) -> Union[Dataset, Tuple[Dataset, Union[Tensor, slice]]]:\n \"\"\"Extract the innermost dataset of a nesting of datasets.\n\n Nested datasets are inferred based on the existence of a 'dataset'\n attribute and the base dataset is extracted by recursive application\n of this rule.\n\n :param dataset: The dataset from which to extract the base dataset.\n\n :param return_subset_indices: Whether to return the indices from which\n the overall subset of the dataset was created (works for multiple levels of\n subsetting).\n\n :returns: The base dataset, which may be the original dataset if one does not\n exist or cannot be determined.\n \"\"\"\n\n def _closure(\n dataset: Dataset, rel_indices_ls: Optional[List[List[int]]] = None\n ) -> Union[Dataset, Tuple[Dataset, Union[Tensor, slice]]]:\n if rel_indices_ls is None:\n rel_indices_ls = []\n if hasattr(dataset, \"dataset\"):\n if isinstance(dataset, Subset):\n rel_indices_ls.append(list(dataset.indices))\n return _closure(dataset.dataset, rel_indices_ls) # type: ignore\n if return_subset_indices:\n if rel_indices_ls:\n abs_indices = torch.as_tensor(rel_indices_ls.pop(), dtype=torch.long)\n for indices in rel_indices_ls[::-1]:\n abs_indices = abs_indices[indices]\n else:\n abs_indices = slice(None)\n return dataset, abs_indices\n return dataset\n\n return _closure(dataset)\n\n\n@lru_cache(typed=True)\ndef extract_labels_from_dataset(dataset: Dataset) -> Tuple[Optional[Tensor], Optional[Tensor]]:\n \"\"\"Attempt to extract s/y labels from a dataset.\"\"\"\n\n def _closure(dataset: Dataset) -> Tuple[Optional[Tensor], Optional[Tensor]]:\n dataset, indices = extract_base_dataset(dataset=dataset, return_subset_indices=True)\n _s = None\n _y = None\n if getattr(dataset, \"s\", None) is not None:\n _s = dataset.s[indices] # type: ignore\n if getattr(dataset, \"y\", None) is not None:\n _s = dataset.s[indices] # type: ignore\n\n _s = torch.from_numpy(_s) if isinstance(_s, np.ndarray) else _s\n _y = torch.from_numpy(_y) if isinstance(_y, np.ndarray) else _y\n\n return _s, _y\n\n if isinstance(dataset, (ConcatDataset)):\n s_all_ls: List[Tensor] = []\n y_all_ls: List[Tensor] = []\n for _dataset in dataset.datasets:\n s, y = _closure(_dataset)\n if s is not None:\n s_all_ls.append(s)\n if y is not None:\n s_all_ls.append(y)\n s_all = torch.cat(s_all_ls, dim=0) if s_all_ls else None\n y_all = torch.cat(y_all_ls, dim=0) if y_all_ls else None\n else:\n s_all, y_all = _closure(dataset)\n return s_all, y_all\n\n\ndef get_group_ids(dataset: Dataset) -> Tensor:\n s_all, y_all = extract_labels_from_dataset(dataset)\n group_ids: Optional[Tensor] = None\n if s_all is None:\n if y_all is None:\n raise ValueError(\n \"Unable to compute group ids for dataset because no labels could be extracted.\"\n )\n group_ids = y_all\n elif group_ids is None:\n group_ids = s_all\n else:\n group_ids = (group_ids * len(s_all.unique()) + s_all).squeeze()\n return group_ids.long()\n\n\ndef compute_instance_weights(dataset: Dataset, upweight: bool = False) -> Tensor:\n group_ids = get_group_ids(dataset)\n _, inv_indexes, counts = group_ids.unique(return_inverse=True, return_counts=True)\n # Upweight samples according to the cardinality of their intersectional group\n if upweight:\n group_weights = len(group_ids) / counts\n # Downweight samples according to the cardinality of their intersectional group\n # - this approach should be preferred due to being more numerically stable\n # (very small counts can lead to very large weighted loss values when upweighting)\n else:\n group_weights = 1 - (counts / len(group_ids))\n return group_weights[inv_indexes]\n\n\n@overload\ndef make_subset(\n dataset: Subset,\n *,\n indices: Optional[Union[List[int], npt.NDArray[np.uint64], Tensor, slice]],\n deep: bool = ...,\n) -> CdtDataset:\n ...\n\n\n@overload\ndef make_subset(\n dataset: D,\n *,\n indices: Optional[Union[List[int], npt.NDArray[np.uint64], Tensor, slice]],\n deep: bool = ...,\n) -> D:\n ...\n\n\ndef make_subset(\n dataset: Union[D, Subset],\n *,\n indices: Optional[Union[List[int], npt.NDArray[np.uint64], Tensor, slice]],\n deep: bool = False,\n) -> Union[D, CdtDataset]:\n \"\"\"Create a subset of the dataset from the given indices.\n\n :param indices: The sample-indices from which to create the subset.\n In the case of being a numpy array or tensor, said array or tensor\n must be 0- or 1-dimensional.\n\n :param deep: Whether to create a copy of the underlying dataset as\n a basis for the subset. If False then the data of the subset will be\n a view of original dataset's data.\n\n :returns: A subset of the dataset from the given indices.\n \"\"\"\n if isinstance(indices, (np.ndarray, Tensor)):\n if not indices.ndim > 1:\n raise ValueError(\"If 'indices' is an array it must be a 0- or 1-dimensional.\")\n indices = cast(List[int], indices.tolist())\n\n current_indices = None\n if isinstance(dataset, Subset):\n base_dataset, current_indices = extract_base_dataset(dataset, return_subset_indices=True)\n if not isinstance(base_dataset, CdtDataset):\n raise TypeError(\n f\"Subsets can only be created with cdt_subset from {CdtDataset.__name__} instances \"\n f\"or PyTorch Subsets of them.\"\n )\n if isinstance(current_indices, Tensor):\n current_indices = current_indices.tolist()\n else:\n base_dataset = dataset\n subset = gcopy(base_dataset, deep=deep)\n\n def _subset_from_indices(_dataset: CdtDataset, _indices: Union[List[int], slice]) -> CdtDataset:\n _dataset.x = _dataset.x[_indices]\n if _dataset.y is not None:\n _dataset.y = _dataset.y[_indices]\n if _dataset.s is not None:\n _dataset.s = _dataset.s[_indices]\n return _dataset\n\n if current_indices is not None:\n subset = _subset_from_indices(_dataset=subset, _indices=current_indices)\n if indices is not None:\n subset = _subset_from_indices(_dataset=subset, _indices=indices)\n\n return subset\n\n\nclass pb_collate:\n def __init__(self, cast_to_sample: bool = True) -> None:\n self.cast_to_sample = cast_to_sample\n\n def _collate(self, batch: Sequence[Any]) -> Any:\n elem = batch[0]\n elem_type = type(elem)\n if isinstance(elem, Tensor):\n out = None\n if torch.utils.data.get_worker_info() is not None: # type: ignore\n # If we're in a background process, concatenate directly into a\n # shared memory tensor to avoid an extra copy\n numel = sum(x.numel() for x in batch)\n storage = elem.storage()._new_shared(numel)\n out = elem.new(storage)\n ndims = elem.dim()\n if (ndims > 0) and ((ndims % 2) == 0):\n return torch.cat(batch, dim=0, out=out) # type: ignore\n return torch.stack(batch, dim=0, out=out) # type: ignore\n elif (\n elem_type.__module__ == \"numpy\"\n and elem_type.__name__ != \"str_\"\n and elem_type.__name__ != \"string_\"\n ):\n elem = batch[0]\n if elem_type.__name__ == \"ndarray\":\n # array of string classes and object\n if np_str_obj_array_pattern.search(elem.dtype.str) is not None:\n raise TypeError(default_collate_err_msg_format.format(elem.dtype))\n return self._collate([torch.as_tensor(b) for b in batch])\n elif isinstance(elem, float):\n return torch.tensor(batch, dtype=torch.float64)\n elif isinstance(elem, int):\n return torch.tensor(batch)\n elif isinstance(elem, string_classes):\n return batch\n elif isinstance(elem, Mapping):\n return {key: self._collate([d[key] for d in batch]) for key in elem}\n elif isinstance(elem, tuple) and hasattr(elem, \"_fields\"): # namedtuple\n return elem_type(**(self._collate(samples) for samples in zip(*batch)))\n elif is_dataclass(elem): # dataclass\n return elem_type(\n **{\n field.name: self._collate([getattr(d, field.name) for d in batch])\n for field in fields(elem)\n }\n )\n elif isinstance(elem, (tuple, list)):\n transposed = zip(*batch)\n return [self._collate(samples) for samples in transposed]\n raise TypeError(default_collate_err_msg_format.format(elem_type))\n\n def __call__(self, batch: Sequence[Any]) -> Any:\n collated_batch = self._collate(batch=batch)\n if self.cast_to_sample and (not isinstance(collated_batch, SampleBase)):\n if isinstance(collated_batch, Tensor):\n collated_batch = NamedSample(x=collated_batch)\n elif isinstance(collated_batch, (tuple, list, dict)):\n if len(collated_batch) == 1:\n sample_cls = NamedSample\n elif len(collated_batch) == 2:\n sample_cls = BinarySample\n elif len(collated_batch) == 3:\n sample_cls = TernarySample\n else:\n raise ValueError\n if isinstance(collated_batch, dict):\n collated_batch = sample_cls(**collated_batch)\n else:\n collated_batch = sample_cls(*collated_batch)\n else:\n raise ValueError(\n f\"batch of type '{type(collated_batch)}' could not be automatically converted into a \"\n \"'Sample' instance. Batch must be of type 'dict', 'tuple', or 'list'.\"\n )\n return collated_batch\n\n\nclass CdtDataLoader(DataLoader):\n def __init__(\n self,\n dataset: Dataset,\n *,\n batch_size: Optional[int],\n shuffle: bool = False,\n sampler: Optional[Sampler[int]] = None,\n batch_sampler: Optional[Sampler[Sequence[int]]] = None,\n num_workers: int = 0,\n pin_memory: bool = False,\n drop_last: bool = False,\n timeout: float = 0,\n worker_init_fn: Optional[_worker_init_fn_t] = None,\n multiprocessing_context: Optional[Union[BaseContext, str]] = None,\n generator: Optional[torch.Generator] = None,\n prefetch_factor: int = 2,\n persistent_workers: bool = False,\n cast_to_sample: bool = True,\n ) -> None:\n super().__init__(\n dataset,\n batch_size=batch_size,\n shuffle=shuffle,\n sampler=sampler,\n batch_sampler=batch_sampler,\n num_workers=num_workers,\n collate_fn=pb_collate(cast_to_sample=cast_to_sample),\n pin_memory=pin_memory,\n drop_last=drop_last,\n timeout=timeout,\n worker_init_fn=worker_init_fn,\n multiprocessing_context=multiprocessing_context,\n generator=generator,\n prefetch_factor=prefetch_factor,\n persistent_workers=persistent_workers,\n )\n\n\ndef check_integrity(*, filepath: Path, md5: Optional[str]) -> None:\n from torchvision.datasets.utils import check_integrity # type: ignore\n\n ext = filepath.suffix\n if ext not in [\".zip\", \".7z\"] and check_integrity(fpath=str(filepath), md5=md5):\n raise RuntimeError('Dataset corrupted; try deleting it and redownloading it.')\n\n\nclass UrlFileInfo(NamedTuple):\n name: str\n url: str\n md5: Optional[str] = None\n\n\ndef download_from_url(\n *,\n file_info: Union[UrlFileInfo, List[UrlFileInfo]],\n root: Union[Path, str],\n logger: Optional[logging.Logger] = None,\n remove_finished: bool = True,\n) -> None:\n\n logger = logging.getLogger(__name__) if logger is None else logger\n file_info_ls = file_info if isinstance(file_info, list) else [file_info]\n if not isinstance(root, Path):\n root = Path(root).expanduser()\n # Create the specified root directory if it doesn't already exist\n root.mkdir(parents=True, exist_ok=True)\n\n for info in file_info_ls:\n filepath = root / info.name\n\n extracted_filepath = filepath\n for _ in extracted_filepath.suffixes:\n extracted_filepath = extracted_filepath.with_suffix(\"\")\n\n if extracted_filepath.exists():\n logger.info(f\"File '{info.name}' already downloaded and extracted.\")\n else:\n if filepath.exists():\n logger.info(f\"File '{info.name}' already downloaded.\")\n else:\n logger.info(f\"Downloading file '{info.name}' from address '{info.url}'.\")\n download_url(url=info.url, filename=info.name, root=str(root), md5=info.md5)\n\n logger.info(f\"Extracting '{filepath.resolve()}' to '{root.resolve()}'\")\n try:\n extract_archive(\n from_path=str(filepath),\n to_path=str(extracted_filepath),\n remove_finished=remove_finished,\n )\n # Fall back on using jar to unzip the archive\n except BadZipFile:\n try:\n subprocess.run([\"jar\", \"-xvf\", str(filepath)], check=True, cwd=root)\n except subprocess.CalledProcessError:\n logger.info(\n \"Attempted to fall back on using Java to extract malformed .zip file; \"\n \"however, there was a problem. Try redownloading the zip file or \"\n \"checking that Java has been properly added to your system variables.\"\n )\n\n\nclass GdriveFileInfo(NamedTuple):\n name: str\n id: str\n md5: Optional[str] = None\n\n\ndef download_from_gdrive(\n *,\n file_info: Union[GdriveFileInfo, List[GdriveFileInfo]],\n root: Union[Path, str],\n logger: Optional[logging.Logger] = None,\n) -> None:\n \"\"\"Attempt to download data if files cannot be found in the root directory.\"\"\"\n\n logger = logging.getLogger(__name__) if logger is None else logger\n\n file_info_ls = file_info if isinstance(file_info, list) else [file_info]\n if not isinstance(root, Path):\n root = Path(root).expanduser()\n # Create the specified root directory if it doesn't already exist\n root.mkdir(parents=True, exist_ok=True)\n\n for info in file_info_ls:\n filepath = root / info.name\n if filepath.exists():\n logger.info(f\"File '{info.name}' already downloaded.\")\n else:\n import gdown\n\n logger.info(f\"Downloading file '{info.name}' from Google Drive.\")\n gdown.cached_download(\n url=f\"https://drive.google.com/uc?id={info.id}\",\n path=str(filepath),\n quiet=False,\n md5=info.md5,\n )\n if filepath.suffix == \".zip\":\n if filepath.with_suffix(\"\").exists():\n logger.info(f\"File '{info.name}' already unzipped.\")\n else:\n check_integrity(filepath=filepath, md5=info.md5)\n # ------------------------------ Unzip the data ------------------------------\n import zipfile\n\n logger.info(f\"Unzipping '{filepath.resolve()}'; this could take a while.\")\n with zipfile.ZipFile(filepath, \"r\") as fhandle:\n fhandle.extractall(str(root))\n" ]
[ [ "torch.cat", "numpy.array", "torch.stack", "torch.utils.data.get_worker_info", "torch.utils.data._utils.collate.default_collate_err_msg_format.format", "torch.from_numpy", "torch.tensor", "torch.as_tensor", "numpy.moveaxis", "torch.utils.data._utils.collate.np_str_obj_array_pattern.search" ] ]
jzf2101/boardlaw
[ "29126c2a6ab7f11154fb242c303d3b11f1566201" ]
[ "boardlaw/networks.py" ]
[ "import numpy as np\nimport torch\nfrom . import heads\nfrom torch import nn\nimport torch.jit\nfrom rebar import recurrence, arrdict, profiling\nfrom torch.nn import functional as F\nfrom collections import namedtuple\n\nclass ReZeroResidual(nn.Linear):\n\n def __init__(self, width):\n super().__init__(width, width)\n nn.init.orthogonal_(self.weight, gain=2**.5)\n self.register_parameter('α', nn.Parameter(torch.zeros(())))\n\n def forward(self, x, *args, **kwargs):\n return x + self.α*super().forward(F.relu(x))\n\nclass FCModel(nn.Module):\n\n def __init__(self, obs_space, action_space, width=256, depth=64):\n super().__init__()\n self.obs_space = obs_space\n self.action_space = action_space\n\n self.policy = heads.output(action_space, width)\n self.sampler = self.policy.sample\n\n blocks = [heads.intake(obs_space, width)]\n for _ in range(depth):\n blocks.append(ReZeroResidual(width))\n self.body = nn.Sequential(*blocks) \n\n self.value = heads.ValueOutput(width)\n\n def forward(self, worlds):\n neck = self.body(worlds.obs)\n return arrdict.arrdict(\n logits=self.policy(neck, worlds.valid), \n v=self.value(neck, worlds.valid, worlds.seats))" ]
[ [ "torch.nn.Sequential", "torch.nn.init.orthogonal_", "torch.nn.functional.relu", "torch.zeros" ] ]
MicrobialDarkMatter/GraphMB
[ "04d777953bb7e5e23ec445e3d956c11c120feaa1" ]
[ "src/graphmb/graph_functions.py" ]
[ "from pathlib import Path\nimport time\nimport os\nimport pdb\nimport itertools\nfrom collections import Counter\nimport networkx as nx\nimport numpy as np\nfrom tqdm import tqdm\nimport operator\nfrom vamb.cluster import cluster as vamb_cluster\nimport dgl\nimport random\n\nfrom graphmb.evaluate import read_contig_genes, read_marker_gene_sets, evaluate_contig_sets, calculate_overall_prf\nimport torch\n\nSEED = 0\n\ncolors = [\n \"black\",\n \"silver\",\n \"maroon\",\n \"fuchsia\",\n \"lime\",\n \"olive\",\n \"yellow\",\n \"navy\",\n \"teal\",\n \"steelblue\",\n \"darkred\",\n \"darkgreen\",\n \"darkblue\",\n \"darkorange\",\n \"lightpink\",\n \"lightgreen\",\n \"lightblue\",\n \"crimson\",\n \"darkviolet\",\n \"tomato\",\n \"tan\",\n \"tab:blue\",\n \"tab:orange\",\n \"tab:green\",\n \"tab:red\",\n \"tab:purple\",\n \"tab:brown\",\n \"tab:pink\",\n \"tab:gray\",\n \"tab:olive\",\n \"tab:cyan\",\n]\n\n\ndef set_seed(seed=0):\n dgl.random.seed(seed)\n torch.manual_seed(seed)\n random.seed(seed)\n np.random.seed(seed)\n\n\nclass Read:\n def __init__(self, readid, species=None):\n self.readid = readid\n self.species = species\n self.mappings = set()\n\n\nclass ReadMapping:\n def __init__(self, readid, bitflag, contigname, pos, mapq, seq):\n self.readid = readid\n self.bitflag = bitflag\n self.contigname = contigname\n self.pos = pos\n self.mapq = mapq\n\n\ndef count_kmers(seq, k, kmer_to_id, canonical_k):\n # Used in case kmers are used as input features\n # https://stackoverflow.com/q/22428020\n kmers = [seq[i : i + k] for i in range(len(seq) - k + 1)]\n kmers = [kmer_to_id[k] for k in kmers if \"N\" not in k]\n kmer_counts = Counter(kmers)\n counts = np.array([kmer_counts[k] for k in range(canonical_k)])\n counts = counts / counts.sum()\n return counts\n\n\ndef draw_nx_graph(graph, node_to_label, labels_to_node, basename, contig_sizes=None, node_titles=None):\n # draw graph with pybiz library, creates an HTML file with graph\n # del labels_to_node[\"NA\"]\n from pyvis.network import Network\n\n labels_to_color = {l: colors[i % len(colors)] for i, l in enumerate(labels_to_node.keys())}\n labels_to_color[\"NA\"] = \"white\"\n sorted(labels_to_node, key=lambda key: len(labels_to_node[key]), reverse=True)[: len(colors)]\n # node_labels to plot\n\n node_labels = {\n node: {\n \"label\": str(node) + \":\" + str(node_to_label[node]),\n \"color\": labels_to_color[node_to_label[node]],\n }\n for node in node_to_label\n }\n if contig_sizes is not None:\n for n in node_labels:\n node_labels[n][\"size\"] = contig_sizes[n]\n\n if node_titles is not None:\n for n in node_labels:\n node_labels[n][\"title\"] = node_titles[n]\n\n nx.set_node_attributes(graph, node_labels)\n\n net = Network(notebook=False, height=\"750px\", width=\"100%\")\n net.add_nodes(\n [int(n) for n in node_labels.keys()],\n label=[node_labels[n][\"label\"] for n in node_labels],\n size=[node_labels[n].get(\"size\", 1) * 10 for n in node_labels],\n color=[node_labels[n][\"color\"] for n in node_labels],\n title=[node_labels[n].get(\"title\", \"\") for n in node_labels],\n )\n for u, v, a in graph.edges(data=True):\n if u != v:\n weight = float(a[\"weight\"].item())\n if weight != 1:\n net.add_edge(int(u), int(v), color=\"gray\", title=\"reads weight: {}\".format(weight))\n else:\n net.add_edge(int(u), int(v))\n\n # net.toggle_physics(False)\n net.show_buttons()\n print(\"saving graph to\", basename)\n net.show(\"{}.html\".format(basename))\n\n\ndef connected_components(graph, node_to_label, basename, min_elems=1):\n # explore connected components\n connected = [c for c in sorted(nx.connected_components(graph), key=len, reverse=True) if len(c) > min_elems]\n print(\"writing components to\", basename + \"_node_to_component.csv\")\n write_components_file(connected, basename + \"_node_to_component.csv\")\n multiple_contigs = 0\n mixed_contigs = 0\n for group in connected:\n multiple_contigs += 1\n group_labels = [node_to_label[c] for c in group if \"edge\" in c]\n group_labels = set(group_labels)\n if len(group_labels) > 1:\n mixed_contigs += 1\n # print(group, group_labels)Process some integers.\n\n disconnected = [c for c in sorted(nx.connected_components(graph), key=len, reverse=True) if len(c) <= min_elems]\n for group in disconnected:\n for node in group:\n graph.remove_node(node)\n\n print(\"graph density:\", nx.density(graph))\n print(\">1\", multiple_contigs)\n print(\"mixed groups\", mixed_contigs)\n return connected, disconnected\n\n\ndef write_components_file(components, outpath, minsize=2):\n \"\"\"Write file mapping each contig/node to a component ID (diff for each assembly)\n\n Args:\n components (list): List of connected components of a graph\n outpath (str): path to write file\n minsize: minimum number of elements of a component\n \"\"\"\n with open(outpath, \"w\") as outfile:\n for ic, c in enumerate(components):\n if len(c) < minsize:\n continue\n for node in c:\n if \"edge\" in node: # ignore read nodes\n outfile.write(f\"{node}\\t{ic}\\n\")\n\n\ndef read_reads_mapping_sam(readspath):\n \"\"\"Open all *read*.sam files in a dir and parse to dict of Read objects\n Filter by bitflag (not primary alignment) and not empty contig names\n Args:\n readspath ([type]): [description]\n \"\"\"\n contigs_to_reads = {}\n reads_dict = {}\n for reads in Path(readspath).glob(\"**/*reads_*.sam\"):\n print(\"reading\", reads)\n speciesname = reads.stem.split(\"_reads\")[0]\n with reads.open() as f:\n for line in tqdm(f):\n values = line.strip().split()\n if line.startswith(\"@SQ\"):\n continue\n elif line.startswith(\"@PG\"):\n continue\n else:\n read_name = values[0]\n if read_name not in reads_dict:\n reads_dict[read_name] = Read(read_name, species=speciesname)\n contig_name = values[2]\n mapq = int(values[4])\n bitflag = int(values[1])\n pos = int(values[3])\n # seq = values[9]\n seq = None\n # \tnot primary alignment: 8,\n if (bitflag >> 8) & 1 == 1:\n continue\n if contig_name == \"*\":\n continue\n # only mapq > 0\n if mapq > 0:\n reads_dict[read_name].mappings.add(\n ReadMapping(read_name, bitflag, contig_name, pos, mapq, seq)\n )\n # if contig_name not in contigs_to_reads:\n # contigs_to_reads[contig_name] = set()\n # contigs_to_reads[contig_name].add(read_name)\n return reads_dict\n\n\ndef augment_graph(graph, reads_dict, add_reads=False, min_mappings=0):\n \"\"\"Add read nodes to graph according to read mapping file\n\n Args:\n graph (networkx Graph): contig assembly graph\n reads_dict (str): Dict of read objects\n add_reads: add read nodes if True, otherwise add edges between contigs\n min_mappings: min number of mappings to add edge\n Return:\n graph with added edges\n \"\"\"\n edges = []\n for r in reads_dict:\n if len(reads_dict[r].mappings) > min_mappings:\n if add_reads:\n for m in reads_dict[r].mappings:\n edges.append((r, m.contigname))\n else:\n for c1, c2 in itertools.combinations(reads_dict[r].mappings, 2):\n if c1.contigname != c2.contigname:\n edges.append((c1.contigname, c2.contigname, {\"Title\": r, \"width\": 3}))\n print(\"adding to networkx graph\", len(edges), \"edges\")\n graph.add_edges_from(edges)\n return graph\n\n\ndef get_kmer_to_id(kmer):\n kmer_to_ids = {}\n BASE_COMPLEMENT = {\"A\": \"T\", \"T\": \"A\", \"G\": \"C\", \"C\": \"G\"}\n all_kmers = itertools.product(\"ATCG\", repeat=kmer)\n all_kmers = [\"\".join(k) for k in all_kmers]\n new_id = 0\n for kmer in all_kmers:\n if kmer not in kmer_to_ids:\n kmer_to_ids[kmer] = new_id\n rev_compl = \"\".join(tuple([BASE_COMPLEMENT[x] for x in reversed(kmer)]))\n kmer_to_ids[rev_compl] = new_id\n new_id += 1\n return kmer_to_ids, new_id\n\n\ndef open_gfa_file(filename, filter=1000, root=False, kmer=4):\n G = nx.Graph()\n if root:\n root_node = G.add_node(\"root\", length=0)\n skipped_contigs = set()\n skipped_edges = set()\n kmer_to_id, canonical_k = get_kmer_to_id(kmer)\n with open(filename, \"r\") as f:\n for line in f:\n if line.startswith(\"S\"):\n values = line.strip().split()\n contigid = values[1]\n # contiglen = int(values[3].split(\":\")[-1])\n contiglen = len(values[2])\n contig_kmers = count_kmers(values[2], kmer, kmer_to_id, canonical_k)\n if contiglen < filter:\n skipped_contigs.add(contigid)\n else:\n\n G.add_node(contigid, length=contiglen, kmers=contig_kmers) # TODO: add coverage and kmer too\n if root:\n G.add_edge(contigid, \"root\")\n if line.startswith(\"L\"):\n values = line.strip().split()\n if values[1] in skipped_contigs or values[3] in skipped_contigs:\n skipped_edges.add((values[1], values[3]))\n continue\n G.add_edge(values[1], values[3])\n\n print(\"load graph from GFA\")\n print(\"skipped these contigs {} (len<{})\".format(len(skipped_contigs), filter))\n print(\"skipped these edges {} (len<{})\".format(len(skipped_edges), filter))\n return G\n\n\ndef plot_embs(node_ids, node_embeddings_2dim, labels_to_node, centroids, hq_centroids, node_sizes, outputname=None):\n \"\"\"Plot embs of most labels with most support\n\n Args:\n node_ids ([type]): [description]\n node_embeddings_2dim ([type]): [description]\n labels_to_node ([type]): [description]\n \"\"\"\n import matplotlib.pyplot as plt\n\n markers = [\"o\", \"s\", \"p\", \"*\"]\n if \"NA\" in labels_to_node:\n del labels_to_node[\"NA\"]\n # breakpoint()\n labels_to_node = {label: labels_to_node[label] for label in labels_to_node if len(labels_to_node[label]) > 0}\n labels_to_plot = sorted(labels_to_node, key=lambda key: len(labels_to_node[key]), reverse=True)[\n : len(colors) * len(markers)\n ]\n # print(\"ploting these labels\", [l, colors[il], len(labels_to_node[l]) for il, l in enumerate(labels_to_plot)])\n x_to_plot = []\n y_to_plot = []\n colors_to_plot = []\n sizes_to_plot = []\n markers_to_plot = []\n print(labels_to_plot)\n plt.figure()\n print(\" LABEL COLOR SIZE DOTS\")\n for i, l in enumerate(labels_to_plot):\n valid_nodes = 0\n if len(labels_to_node) == 0:\n continue\n for node in labels_to_node[l]:\n if node not in node_ids:\n # print(\"skipping\", node)\n continue\n node_idx = node_ids.index(node)\n x_to_plot.append(node_embeddings_2dim[node_idx][0])\n y_to_plot.append(node_embeddings_2dim[node_idx][1])\n if node_sizes is not None:\n sizes_to_plot.append(node_sizes[node_idx])\n else:\n sizes_to_plot.append(50)\n valid_nodes += 1\n colors_to_plot.append(colors[i % len(colors)])\n markers_to_plot.append(markers[i // len(colors)])\n # breakpoint()\n # print(\"plotting\", l, colors[i % len(colors)], markers[i // len(colors)], len(labels_to_node[l]), valid_nodes)\n # plt.scatter(x_to_plot, y_to_plot, s=sizes_to_plot, c=colors[i], label=l) # , alpha=0.5)\n sc = plt.scatter(\n x_to_plot,\n y_to_plot,\n s=sizes_to_plot,\n c=colors[i % len(colors)],\n label=l,\n marker=markers[i // len(colors)],\n alpha=0.4,\n ) # , alpha=0.5)\n x_to_plot = []\n y_to_plot = []\n sizes_to_plot = []\n if centroids is not None:\n hq_centroids_mask = [x in hq_centroids for x in range(len(centroids))]\n lq_centroids_mask = [x not in hq_centroids for x in range(len(centroids))]\n # lq_centroids = set(range(len(centroids))) - set(hq_centroids)\n\n # plt.scatter(\n # centroids[lq_centroids_mask, 0], centroids[lq_centroids_mask, 1], c=\"black\", label=\"centroids (LQ)\", marker=\"x\"\n # )\n plt.scatter(\n centroids[hq_centroids_mask, 0],\n centroids[hq_centroids_mask, 1],\n c=\"black\",\n label=\"centroids (HQ)\",\n marker=\"P\",\n )\n\n # for n in node_embeddings:\n # plt.scatter(x_to_plot, y_to_plot, c=colors_to_plot) #, alpha=0.5)\n\n if outputname is not None:\n plt.savefig(outputname, bbox_inches=\"tight\", dpi=1200)\n else:\n plt.legend()\n plt.show()\n\n\ndef cluster_embs(\n node_embeddings,\n node_ids,\n clusteringalgo,\n kclusters,\n device=\"cpu\",\n node_lens=None,\n):\n\n if clusteringalgo == \"vamb\":\n it = vamb_cluster(\n node_embeddings, node_ids, cuda=(device == \"cuda:0\")\n ) # , seeds=seeds) # contig_lens=node_lens) #\n cluster_to_contig = {i: c for (i, (n, c)) in enumerate(it)}\n\n # get embs to clusters (cluster to contig has only contig names, not index)\n \"\"\"cluster_to_embs = {\n c: np.array([node_embeddings[i] for i, n in enumerate(node_ids) if n in cluster_to_contig[c]])\n for c in cluster_to_contig\n }\n cluster_centroids = np.array([cluster_to_embs[c].mean(0) for c in cluster_to_contig])\"\"\"\n cluster_centroids = None # not necessary for now\n else:\n from sklearn.cluster import (\n KMeans,\n DBSCAN,\n AgglomerativeClustering,\n MiniBatchKMeans,\n SpectralClustering,\n Birch,\n OPTICS,\n )\n from sklearn.mixture import GaussianMixture\n\n if clusteringalgo == \"kmeans\":\n clustering = KMeans(n_clusters=kclusters, random_state=SEED)\n cluster_labels = clustering.fit_predict(node_embeddings)\n cluster_centroids = clustering.cluster_centers_\n elif clusteringalgo == \"kmeansgpu\":\n node_embeddings = torch.tensor(node_embeddings).cuda()\n cluster_labels, cluster_centroids = kmeans_pytorch.kmeans(\n X=node_embeddings, num_clusters=kclusters, device=torch.device(\"cuda:0\")\n )\n elif clusteringalgo == \"dbscan\":\n cluster_labels = DBSCAN(eps=1.1, min_samples=5).fit_predict(node_embeddings)\n # cluster_centroids = clustering.cluster_centers_\n cluster_centroids = None\n elif clusteringalgo == \"gmm\":\n cluster_model = GaussianMixture(\n n_components=kclusters,\n covariance_type=\"full\",\n max_iter=1000,\n random_state=SEED,\n verbose=2,\n verbose_interval=1,\n ).fit(node_embeddings)\n cluster_labels = cluster_model.predict(node_embeddings)\n cluster_centroids = cluster_model.means_\n elif clusteringalgo == \"kmeansconst\":\n cluster_labels = KMeansConstrained(\n n_clusters=kclusters, size_min=1, size_max=5, random_state=SEED\n ).fit_predict(node_embeddings)\n elif clusteringalgo == \"kmeansbatch\":\n kmeans = MiniBatchKMeans(n_clusters=kclusters, random_state=SEED, batch_size=100, init=seed_matrix)\n cluster_labels = kmeans.fit_predict(node_embeddings)\n cluster_centroids = kmeans.cluster_centers_\n elif clusteringalgo == \"spectral\":\n cluster_labels = SpectralClustering(n_clusters=kclusters, random_state=SEED).fit_predict(node_embeddings)\n cluster_centroids = None\n elif clusteringalgo == \"birch\":\n cluster_labels = Birch(n_clusters=kclusters).fit_predict(node_embeddings)\n cluster_centroids = None\n elif clusteringalgo == \"optics\":\n cluster_labels = OPTICS(min_samples=5, cluster_method=\"xi\", n_jobs=-1).fit_predict(node_embeddings)\n cluster_centroids = None\n else:\n print(\"invalid clustering algorithm\")\n return None\n # compare clustering labels with actual labels\n # for each cluster, get majority label, and then P/R\n cluster_to_contig = {i: [] for i in range(kclusters)}\n for il, l in enumerate(cluster_labels):\n # if l not in cluster_to_contig:\n # cluster_to_contig[l] = []\n cluster_to_contig[l].append(node_ids[il])\n\n return cluster_to_contig, cluster_centroids\n\n\ndef evaluate_binning(cluster_to_contig, node_to_label, label_to_node, outputclusters=False, contig_sizes=None):\n \"\"\"Evaluate binning results using contig labels (supervised scenario)\n\n :param cluster_to_contig: mapping cluster IDs to contig names\n :type cluster_to_contig: dict\n :param node_to_label: mapping contig ids to labels (taxon)\n :type node_to_label: dict\n :param label_to_node: mapping label names to cotnigs\n :type label_to_node: dict\n :param outputclusters: print members of all clusters, defaults to False\n :type outputclusters: bool, optional\n :param contig_sizes: Provide contig sizes for balanced scores, defaults to None\n :type contig_sizes: dict, optional\n \"\"\"\n # uncomment if you want to plot only the biggest labels\n # labels_to_plot = sorted(label_to_node, key = lambda key: len(label_to_node.get(key, [])), reverse=True)[:6]\n # print(labels_to_plot)\n avg_precision = []\n avg_recall = []\n avg_f1 = []\n cluster_sizes = []\n if contig_sizes is None:\n contig_sizes = {c: 1 for c in node_to_label}\n for c in cluster_to_contig:\n cluster_labels = [node_to_label.get(n) for n in cluster_to_contig[c] if n in node_to_label]\n cluster_counts = {}\n for label in set(cluster_labels):\n cluster_counts[label] = sum(\n [contig_sizes[n] for n in cluster_to_contig[c] if node_to_label.get(n) == label]\n )\n if len(cluster_labels) == 0: # we do not have labels for any of the elements of this cluster\n continue\n # get majority label:\n # cluster_counter = collections.Counter(cluster_labels)\n # cluster_majority = cluster_counter.most_common(1)\n cluster_majority = max(cluster_counts.items(), key=operator.itemgetter(1))\n # print(cluster_majority)\n # if cluster_majority[0][0] not in labels_to_plot:\n # continue\n if cluster_majority[0] not in label_to_node:\n print(\n \"tie cluster\",\n f\"cluster {c}, majority: {cluster_majority[0]}, cluster size {len(cluster_to_contig[c])}\",\n )\n continue\n # print(f\"cluster {c}, majority: {cluster_majority}, cluster size {len(cluster_to_contig[c])}\")\n # print(f\" {len(label_to_node.get(cluster_majority[0], []))} contigs with this label\")\n # avg_precision.append(len(cluster_to_contig) * cluster_majority[0][1]/len(cluster_to_contig[c]))\n # avg_recall.append(len(cluster_to_contig) * cluster_majority[0][1]/len(label_to_node[cluster_majority[0][0]]))\n cluster_size = sum([contig_sizes.get(n, 1) for n in cluster_to_contig[c]])\n cluster_p = cluster_majority[1] / cluster_size\n avg_precision.append(cluster_p)\n cluster_r = cluster_majority[1] / sum([contig_sizes.get(n, 1) for n in label_to_node[cluster_majority[0]]])\n avg_recall.append(cluster_r)\n cluster_f1 = 2 * cluster_p * cluster_r / (cluster_p + cluster_r)\n avg_f1.append(cluster_f1)\n # print(cluster_p, cluster_r)\n if outputclusters:\n print(cluster_to_contig[c])\n cluster_sizes.append(len(cluster_to_contig))\n # print(\"average precision\", sum(avg_precision)/sum(cluster_sizes)/len(avg_precision))\n # print(\"average recall\", sum(avg_recall)/sum(cluster_sizes)/len(avg_recall))\n print(\n \"average precision\",\n round(sum(avg_precision) / len(avg_precision), 4),\n \"average recall\",\n round(sum(avg_recall) / len(avg_recall), 4),\n \"average f1\",\n round(sum(avg_f1) / len(avg_f1), 4),\n \"P>0.95 and R>0.9:\",\n len([i for i in range(len(avg_recall)) if avg_recall[i] >= 0.9 and avg_precision[i] >= 0.95]),\n )\n\n\ndef read_contigs_scg(ref_file, marker_file, node_names):\n \"\"\"Read marker stats and return count table\n\n :param ref_file: path to file with reference markers (e.g. Bacteria.ms)\n :type ref_file: str\n :param marker_file: path to file with markers found on each contig\n :type marker_file: str\n :param node_names: list of node names\n :type node_names: list\n :return: matrix where rows are contigs, columns are markers, values are marker counts\n :rtype: [type]\n \"\"\"\n ref_marker_genes = read_marker_gene_sets(ref_file)\n contigs_markers = read_contig_genes(marker_file)\n\n # flatten ref marker gene sets\n ref_marker_genes = [g for m in ref_marker_genes for g in m]\n counts = []\n for contig in node_names:\n counts.append([contigs_markers[contig].get(g, 0) for g in ref_marker_genes])\n\n return np.array(counts)\n\n\ndef calculate_bin_metrics(results, extra_metrics=False, logger=None):\n # TODO zero division\n hq_bins = [bin for bin in results if results[bin][\"comp\"] >= 90 and results[bin][\"cont\"] < 5]\n mq_bins = [bin for bin in results if results[bin][\"comp\"] >= 50 and results[bin][\"cont\"] < 10]\n if extra_metrics:\n avg_comp = sum([results[bin][\"comp\"] for bin in results]) / len(results)\n avg_cont = sum([results[bin][\"cont\"] for bin in results]) / len(results)\n cont_comp50 = [results[bin][\"cont\"] for bin in results if results[bin][\"comp\"] > 50]\n cont_comp50 = sum(cont_comp50) / len(cont_comp50)\n cont_comp90 = [results[bin][\"cont\"] for bin in results if results[bin][\"comp\"] > 90]\n cont_comp90 = sum(cont_comp90) / len(cont_comp90)\n comp_cont5 = [results[bin][\"comp\"] for bin in results if results[bin][\"cont\"] < 5]\n comp_cont5 = sum(comp_cont5) / len(comp_cont5)\n logger.info(\"HQ:\", hq_bins, \"avg comp\", avg_comp, \"avg cont\", avg_cont, \"avg cont when comp>50\", cont_comp50)\n logger.info(\n \"HQ {}, avg comp {:.2f}, avg cont {:.2f}, cont when comp>50 {:.2f}, cont when comp>90 {:.2f}, comp when cont<5 {:.2f}\".format(\n hq_bins, avg_comp, avg_cont, cont_comp50, cont_comp90, comp_cont5\n )\n )\n else:\n logger.info(f\"HQ: {len(hq_bins)}, MQ:, {len(mq_bins)}\")\n return hq_bins, mq_bins\n\n\ndef cluster_eval(\n model,\n dataset,\n logits,\n clustering,\n k,\n loss,\n best_hq,\n best_hq_epoch,\n epoch,\n device,\n clusteringloss=False,\n logger=None,\n use_marker_contigs_only=False,\n):\n \"\"\"Cluster contig embs and eval with markers\n\n :param model: Model used to generate embs, save if better than best_hq\n :type model: nn.Module\n :param dataset: dataset object used to train model\n :type dataset: ContigsDataset\n :param logits: tensor with output of model\n :type logits: torch.Tensor\n :param clustering: Type of clustering to be done\n :type clustering: str\n :param k: Number of clusters\n :type k: int\n :param loss: loss (for logging)\n :type loss: [type]\n :param best_hq: Best HQ obtained at this point\n :type best_hq: int\n :param best_hq_epoch: Epoch where best HQ was obtained\n :type best_hq_epoch: int\n :param epoch: Current epoch\n :type epoch: int\n :param device: If using cuda for clustering\n :type device: str\n :param clusteringloss: Compute a clustering loss, defaults to False\n :type clusteringloss: bool, optional\n :param logger: Logger object, defaults to None\n :type logger: [type], optional\n :return: new best HQ and epoch, clustering loss, cluster to contig mapping\n :rtype: list\n \"\"\"\n kmeans_loss = None\n t0_cluster = time.time()\n model.eval()\n set_seed()\n if torch.is_tensor(logits):\n embeds = logits.detach().numpy()\n else:\n embeds = logits\n if use_marker_contigs_only:\n marker_mask = [n in dataset.contig_markers and len(dataset.contig_markers[n]) > 0 for n in dataset.node_names]\n print(\"clustering\", sum(marker_mask), \"markers\", len(marker_mask))\n cluster_embeds = embeds[marker_mask]\n cluster_names = np.array(dataset.node_names)[marker_mask]\n else:\n cluster_embeds = embeds\n cluster_names = dataset.node_names\n cluster_to_contig, centroids = cluster_embs(\n cluster_embeds,\n cluster_names,\n clustering,\n # len(dataset.connected),\n k,\n device=device,\n )\n contig_to_cluster = {}\n for bin in cluster_to_contig:\n for contig in cluster_to_contig[bin]:\n contig_to_cluster[contig] = bin\n if dataset.species is not None and len(dataset.species) > 1:\n # evaluate_binning(\n # cluster_to_contig,\n # dataset.node_to_label,\n # dataset.label_to_node,\n # contig_sizes={dataset.contig_names[i]: dataset.nodes_len[i][0] for i in range(len(dataset.contig_names))},\n # )\n gs_metrics = calculate_overall_prf(cluster_to_contig, contig_to_cluster, dataset.node_to_label, dataset.label_to_node)\n best_hq, best_hq_epoch, kmeans_loss\n kmeans_loss = 0\n if gs_metrics[\"f1\"] > best_hq:\n best_hq = gs_metrics[\"f1\"]\n best_hq_epoch = epoch\n logger.info(\"new F1 best!!\")\n torch.save(model.state_dict(), os.path.join(dataset.assembly, \"best_model_hq.pkl\"))\n\n\n elif dataset.ref_marker_sets is not None:\n\n results = evaluate_contig_sets(dataset.ref_marker_sets, dataset.contig_markers, cluster_to_contig)\n hq, mq = calculate_bin_metrics(results, logger=logger)\n # print(\"clustering time:\", time.time() - t0_cluster, embeds.shape)\n if len(hq) > best_hq:\n best_hq = len(hq)\n best_hq_epoch = epoch\n logger.info(\"new HQ best!!\")\n torch.save(model.state_dict(), os.path.join(dataset.assembly, \"best_model_hq.pkl\"))\n\n if clusteringloss:\n centroids = torch.tensor(centroids, device=device)\n # get centroids of hq bins\n # centroids = centroids[hq]\n # get IDs of contigs of HQ bins\n hq_node_ids = [\n dataset.node_names.index(n)\n for bin in hq\n for n in cluster_to_contig[bin]\n # if sum(dataset.contig_markers[n].values()) > 0\n ]\n # count only nodes with SCGs\n # TODO multiply squared dist by completness of cluster\n # cluster_comp = np.array([results[i][\"comp\"] / 100 for i, r in enumerate(results)])\n # breakpoint()\n cluster_cont = np.array([i for i, r in enumerate(results) if results[i][\"cont\"] > 10])\n\n cont_node_ids = [\n dataset.node_names.index(n)\n for bin in cluster_cont\n for n in cluster_to_contig[bin]\n # if sum(dataset.contig_markers[n].values()) > 0\n ]\n # TODO subtract dist of contaminated bins (same num of good bins)\n hq_logits = logits[hq_node_ids]\n # cont_logits = logits[cont_node_ids]\n # breakpoint()\n # https://discuss.pytorch.org/t/k-means-loss-calculation/22041/7\n kmeans_loss_good = ((hq_logits[:, None] - centroids[None]) ** 2).sum(2).min(1)[0].mean()\n # kmeans_loss_bad = ((cont_logits[:, None] - centroids[None]) ** 2).sum(2).min(1)[0].mean()\n # kmeans_loss = -kmeans_loss_bad\n kmeans_loss = kmeans_loss_good\n # kmeans_loss = kmeans_loss_good - kmeans_loss_bad\n # kmeans_loss = ((logits[:, None] - centroids[None]) ** 2).sum(2).min(1)[0].mean()\n logger.info(\n f\"Kmeans loss: {kmeans_loss.item()} on {len(hq_logits)} points {len(centroids)} total clusters\",\n )\n # loss = kmeans_loss * alpha + (1 - alpha) * loss\n logger.info(\n \"Epoch {:05d} | Best HQ: {} | Best epoch {} | Total loss {:.4f}\".format(\n epoch,\n best_hq,\n best_hq_epoch,\n loss.detach(),\n )\n )\n return best_hq, best_hq_epoch, kmeans_loss, cluster_to_contig\n\n\ndef compute_loss_para(adj, device):\n pos_weight = (adj.shape[0] * adj.shape[0] - adj.sum()) / adj.sum()\n norm = adj.shape[0] * adj.shape[0] / float((adj.shape[0] * adj.shape[0] - adj.sum()) * 2)\n weight_mask = adj.view(-1) == 1\n weight_tensor = torch.ones(weight_mask.size(0)).to(device)\n weight_tensor[weight_mask] = pos_weight\n return weight_tensor, norm\n" ]
[ [ "torch.device", "numpy.array", "sklearn.cluster.MiniBatchKMeans", "sklearn.cluster.SpectralClustering", "torch.is_tensor", "numpy.random.seed", "matplotlib.pyplot.savefig", "sklearn.cluster.KMeans", "matplotlib.pyplot.legend", "sklearn.cluster.Birch", "matplotlib.pyplot.figure", "torch.manual_seed", "sklearn.cluster.OPTICS", "sklearn.mixture.GaussianMixture", "sklearn.cluster.DBSCAN", "matplotlib.pyplot.show", "torch.tensor", "matplotlib.pyplot.scatter" ] ]
mou3adb/spread_the_particle
[ "6cc666fded62f07380ed1e3ed52969c436295906" ]
[ "scripts/text/text_particles.py" ]
[ "\"\"\"\nThe outfile structure is the following:\n\ndiameter density\nbirth lifetime\nis_captured stuck_to_geometry theta\n(blank line)\nRe Ur\n(blank line)\nn_trajectory\nx1 y1 up1 vp1 Uf1 Vf1 gradpx1 gradpy1 ap_x1 ap_y1 af_x1 af_y1\nx2 y2 up2 vp2 Uf2 Vf2 gradpx2 gradpy2 ap_x2 ap_y2 af_x2 af_y2\n...\nxNt yNt upNt vpNt UfNt VfNt gradpxNt gradpyNt ap_xN ap_yN af_xN af_yN\n\n\"\"\"\nimport sys\nsys.path.append('..')\n\nimport numpy as np\n\nfrom particle import Particle\n#==============================================================================\ndef floatIt(l):\n return np.array([float(e) for e in l])\n\ndef intIt(l):\n return np.array([int(e) for e in l])\n\ndef write_particle(p, f):\n f.write('%2.3f %1.3f\\n' % (p.diameter, p.density))\n f.write('%d %d\\n' % (p.birth, p.lifetime))\n f.write('%s %s %s\\n' % (p.captured, p.stuck_to_geometry, p.theta))\n\n f.write('\\n') # blank line\n f.write('%d %.1f\\n' % (p.Re, p.Ur))\n\n f.write('\\n')\n Nt = len(p.trajectory)\n f.write('%d\\n' % Nt)\n\n for n in range(Nt):\n f.write('%e '*12 % \\\n (p.trajectory[n,0],\n p.trajectory[n,1],\n p.velocities[n,0],\n p.velocities[n,1],\n p.fluid_velocities[n,0],\n p.fluid_velocities[n,1],\n p.pressure_gradients[n,0],\n p.pressure_gradients[n,1],\n p.accelerations[n,0],\n p.accelerations[n,1],\n p.fluid_accelerations[n,0],\n p.fluid_accelerations[n,1]))\n\n f.write('\\n')\n\ndef write_particles(particles, outfile):\n f = open(outfile, 'w')\n\n Np = len(particles)\n\n f.write('%d\\n' % Np)\n f.write('\\n') # blank line\n\n for p in particles:\n write_particle(p, f)\n f.write('\\n')\n\n f.close()\n\ndef read_particle(f, old_version=False):\n # I kept old_version because I had many particles saved before the final\n # update of this function.\n diameter, density = floatIt(f.readline().strip().split())\n\n birth, lifetime = intIt(f.readline().strip().split())\n\n if not(old_version):\n str_captured, str_stuck, str_theta = f.readline().strip().split()\n theta = float(str_theta)\n\n else:\n str_captured, str_stuck = f.readline().strip().split()\n\n captured = False if str_captured == 'False' else True\n stuck = None if str_stuck == 'None' else int(str_stuck)\n\n f.readline() # read the blank line\n Re, Ur = floatIt(f.readline().strip().split())\n\n f.readline()\n Nt = int(f.readline().strip())\n\n trajectory = []\n velocities = []\n\n fluid_velocities = []\n pressure_gradients = []\n\n accelerations = []\n fluid_accelerations = []\n\n for n in range(Nt):\n if old_version:\n x, y, u, v, U, V, gradpx, gradpy \\\n = floatIt(f.readline().strip().split())\n else:\n x, y, u, v, U, V, gradpx, gradpy, ap_x, ap_y, af_x, af_y \\\n = floatIt(f.readline().strip().split())\n\n trajectory.append([x, y])\n velocities.append([u, v])\n\n fluid_velocities.append([U, V])\n pressure_gradients.append([gradpx, gradpy])\n\n if not(old_version):\n accelerations.append([ap_x, ap_y])\n fluid_accelerations.append([af_x, af_y])\n\n pos0 = trajectory[0]\n u0 = velocities[0]\n \n p = Particle(diameter, density, birth, lifetime, pos0, u0)\n\n p.captured, p.stuck_to_geometry = captured, stuck\n\n p.Re, p.Ur = Re, Ur\n\n p.trajectory = np.array(trajectory)\n p.velocities = np.array(velocities)\n\n p.fluid_velocities = np.array(fluid_velocities)\n p.pressure_gradients = np.array(pressure_gradients)\n\n if not(old_version):\n p.accelerations = np.array(accelerations)\n p.fluid_accelerations = np.array(fluid_accelerations)\n\n p.theta = theta\n\n return p\n\ndef read_particles(infile, old_version=False):\n f = open(infile, 'r')\n\n Np = int(f.readline())\n\n f.readline() # read a blank line\n\n particles = []\n\n for i in range(Np):\n particles.append(read_particle(f, old_version))\n\n f.readline()\n\n f.close()\n\n return np.array(particles)\n" ]
[ [ "numpy.array" ] ]
XinhuiLi/PipelineHarmonization
[ "701f84841528d3e50c5538b2de244ee36eedd7d6" ]
[ "figure/s2/abcd.py" ]
[ "import os\nimport glob\nimport numpy as np\nimport nibabel as nb\nimport os\nimport scipy.io as sio\nfrom scipy.stats import pearsonr\n\nPH_SERVER_ROOT = os.environ.get('PH_SERVER_ROOT')\n\ndef zscore(data, axis):\n data -= data.mean(axis=axis, keepdims=True)\n data /= data.std(axis=axis, keepdims=True)\n return np.nan_to_num(data, copy=False)\n\ndef correlation(matrix1, matrix2):\n d1 = matrix1.shape[-1]\n d2 = matrix2.shape[-1]\n\n assert d1 == d2\n assert matrix1.ndim <= 2\n assert matrix2.ndim <= 2\n \n matrix1 = zscore(matrix1.astype(float), matrix1.ndim - 1) / np.sqrt(d1)\n matrix2 = zscore(matrix2.astype(float), matrix2.ndim - 1) / np.sqrt(d2)\n \n if matrix1.ndim >= matrix2.ndim:\n return np.dot(matrix1, matrix2.T)\n else:\n return np.dot(matrix2, matrix1.T)\n\ndef get_motion_params(file, pipeline = 'cpac'):\n\n data = np.genfromtxt(file).T\n \n if pipeline == 'abcd':\n data = np.vstack((data[3:,:],data[:3,:]))\n\n data = np.vstack((data[2,:]*180/np.pi,\n data[0,:]*180/np.pi,\n -data[1,:]*180/np.pi,\n data[5,:],\n data[3,:],\n -data[4,:]))\n else:\n data = np.vstack((data[2,:]*180/np.pi,\n data[0,:]*180/np.pi,\n -data[1,:]*180/np.pi,\n data[5,:],\n data[3,:],\n -data[4,:]))\n\n return data\n\npath1 = f'{os.environ.get(\"PH_SERVER_WORKING_ROOT\")}/CPAC_XCP/ABCD/preprocessed/data'\npath2 = f'{os.environ.get(\"DATA_INPUT_DIR\")}/cpac_abcd'\n\nsub_list = list(range(25427,25457))\nsub_list.remove(25430)\nsub_list.remove(25448)\n\nvar_list = ['anat mask', 'CSF', 'GM', 'WM', 'func mask', 'motion', \n 'anat-mni abcd', 'anat-mni cpac', 'func-mni abcd', 'func-mni cpac', \n 'func-t1 abcd', 'func-t1 cpac', 'anat-mni', 'func-mni', 'func-t1']\n\nif 'motion' in var_list:\n motion_index = var_list.index('motion')\ncorrs = np.zeros((len(sub_list), len(var_list)+5))\n\nfor num_sub, sub in enumerate(sub_list):\n\n sub = '00'+str(sub)\n\n path_list1 = [path1+'/sub-'+sub+'/ses-1/files/T1w/brainmask_fs.nii.gz',\n path1+'/sub-'+sub+'/ses-1/files/T1w/T1w_fast_pve_0.nii.gz',\n path1+'/sub-'+sub+'/ses-1/files/T1w/T1w_fast_pve_1.nii.gz',\n path1+'/sub-'+sub+'/ses-1/files/T1w/T1w_fast_pve_2.nii.gz',\n path1+'/sub-'+sub+'/ses-1/files/task-rest01/brainmask_fs.2.0.nii.gz',\n path1+'/sub-'+sub+'/ses-1/files/task-rest01/MotionCorrection/task-rest01_mc.par',\n # path1+'/sub-'+sub+'/ses-1/files/MNINonLinear/Results/task-rest01/DCANBOLDProc_v4.0.0/FD.mat',\n path1+'/sub-'+sub+'/ses-1/files/MNINonLinear/T1w_restore_brain.nii.gz', # ABCD anat to standard\n path2+'/output/cpac_cpac_abcd-options/sub-'+sub+'a_ses-1/anat/sub-'+sub+'a_ses-1_space-template_desc-brain_T1w.nii.gz', # C-PAC anat to standard\n path1+'/sub-'+sub+'/ses-1/files/MNINonLinear/Results/task-rest01/task-rest01_mean.nii.gz', # ABCD func to standard\n path2+'/output/cpac_cpac_abcd-options/sub-'+sub+'a_ses-1/func/sub-'+sub+'a_ses-1_task-rest_run-1_space-template_desc-mean_bold.nii.gz', # C-PAC func to standard\n path1+'/sub-'+sub+'/ses-1/files/task-rest01/Scout2T1w_masked.nii.gz', # ABCD func in T1 space\n glob.glob(path2+'/working/cpac_sub-'+sub+'a_ses-1/func_to_anat_FLIRT_*/_*/linear_func_to_anat/*flirt.nii.gz')[0], # C-PAC func in T1 space\n path1+'/sub-'+sub+'/ses-1/files/MNINonLinear/T1w_restore_brain.nii.gz', # ABCD anat to standard\n path1+'/sub-'+sub+'/ses-1/files/MNINonLinear/Results/task-rest01/task-rest01_mean.nii.gz', # ABCD func to standard\n path1+'/sub-'+sub+'/ses-1/files/task-rest01/Scout2T1w_masked.nii.gz'] # ABCD func in T1 space\n\n path_list2 = [path2+'/output/cpac_cpac_abcd-options/sub-'+sub+'a_ses-1/anat/sub-'+sub+'a_ses-1_space-T1w_desc-brain_mask.nii.gz',\n path2+'/output/cpac_cpac_abcd-options/sub-'+sub+'a_ses-1/anat/sub-'+sub+'a_ses-1_label-CSF_mask.nii.gz',\n path2+'/output/cpac_cpac_abcd-options/sub-'+sub+'a_ses-1/anat/sub-'+sub+'a_ses-1_label-GM_mask.nii.gz',\n path2+'/output/cpac_cpac_abcd-options/sub-'+sub+'a_ses-1/anat/sub-'+sub+'a_ses-1_label-WM_mask.nii.gz',\n path2+'/working/cpac_sub-'+sub+'a_ses-1/resample_anat_brain_mask_in_standard_125/wmparc_maths_fill_holes_maths_warp_warp_warp.nii.gz',\n glob.glob(path2+'/working/cpac_sub-'+sub+'a_ses-1/_*/*mcflirt_122/*par')[0], \n # glob.glob(path2+'/sub-'+sub+'/output/*/sub-'+sub+ses+'_ses-1/frame_wise_displacement_power/*/FD.1D')[0], # TODO find FD, only max/rel disp\n # Note: this template is from DCAN-HCP GitHub: https://github.com/DCAN-Labs/DCAN-HCP/tree/master/global/templates/MNI152_T1_1mm_brain.nii.gz\n f'{PH_SERVER_ROOT}/freesurfer/DCAN-HCP/global/templates/MNI152_T1_1mm_brain.nii.gz', # ABCD anat template\n f'{PH_SERVER_ROOT}/freesurfer/DCAN-HCP/global/templates/MNI152_T1_1mm_brain.nii.gz', # C-PAC anat template\n # Note: this template is from FSL standard template distribution\n '/usr/local/fsl/data/standard/MNI152_T1_2mm_brain.nii.gz', # ABCD func template on Lisa\n '/usr/local/fsl/data/standard/MNI152_T1_2mm_brain.nii.gz', # C-PAC func template on Lisa\n # '/usr/share/fsl/5.0/data/standard/MNI152_T1_2mm_brain.nii.gz', # ABCD func template on Ned\n # '/usr/share/fsl/5.0/data/standard/MNI152_T1_2mm_brain.nii.gz', # C-PAC func template on Ned\n path1+'/sub-'+sub+'/ses-1/files/T1w/T1w_acpc_dc_restore_brain.nii.gz', # ABCD T1\n glob.glob(path2+'/working/cpac_sub-'+sub+'a_ses-1/brain_extraction_*/*.nii.gz')[0], # C-PAC T1\n path2+'/output/cpac_cpac_abcd-options/sub-'+sub+'a_ses-1/anat/sub-'+sub+'a_ses-1_space-template_desc-brain_T1w.nii.gz', # C-PAC anat to standard\n path2+'/output/cpac_cpac_abcd-options/sub-'+sub+'a_ses-1/func/sub-'+sub+'a_ses-1_task-rest_run-1_space-template_desc-mean_bold.nii.gz', # C-PAC func to standard\n glob.glob(path2+'/working/cpac_sub-'+sub+'a_ses-1/func_to_anat_FLIRT_*/_*/linear_func_to_anat/*flirt.nii.gz')[0]] # C-PAC func in T1 space\n\n for num_var, var in enumerate(var_list):\n\n file1 = path_list1[num_var]\n file2 = path_list2[num_var]\n\n if '.nii.gz' in file1:\n img1 = nb.load(file1)\n data1 = img1.get_fdata()\n # data1 = img1.get_data()\n img2 = nb.load(file2)\n data2 = img2.get_fdata()\n # data2 = img2.get_data()\n elif '.par' in file1:\n data1 = get_motion_params(file1, 'abcd')\n data2 = get_motion_params(file2)\n elif '.mat' in file1:\n data1 = sio.loadmat(file1)['FD']\n data2 = np.expand_dims(np.loadtxt(file2)[1:], axis=1)\n\n if var == 'motion':\n motion_params = correlation(data1, data2)\n corr = motion_params.diagonal()\n elif isinstance(data1, np.ndarray) and data1.shape == data2.shape:\n corr, _ = pearsonr(data1.flatten(), data2.flatten())\n\n print(sub + ' ' + str(num_var) + ' ' + var)\n print(corr)\n\n if num_var < motion_index:\n corrs[num_sub][num_var] = round(corr, 3)\n elif num_var == motion_index:\n corrs[num_sub][num_var:num_var+6] = corr\n elif num_var > motion_index:\n corrs[num_sub][num_var+5] = round(corr, 3)\n\nprint(corrs)\nnp.save(f'{os.environ.get(\"SCRIPT_DIR\")}/abcd_corrs.npy', corrs)" ]
[ [ "numpy.dot", "numpy.nan_to_num", "numpy.genfromtxt", "scipy.io.loadmat", "numpy.loadtxt", "numpy.sqrt", "numpy.vstack" ] ]
Adam1679/FET
[ "a7fc83ae22c7f15d84a80b9ebde9e67bf74ea988" ]
[ "models/fetentvecutils.py" ]
[ "import logging\nimport random\nfrom collections import defaultdict\n\nimport numpy as np\n\nfrom utils import datautils\n\n\nclass FETEntityVec:\n def get_entity_vecs(self, *input_args):\n raise NotImplementedError\n\n\nclass MentionFeat :\n @staticmethod\n def get_feat_set() :\n return {'all-upper', 'has-upper', 'len-1', 'len-2', 'len-3', 'len>=4'}\n\n @staticmethod\n def features(model_sample) :\n '''\n Compute a minimal set of features for antecedent a and mention i\n\n :param markables: list of markables for the document\n :param a: index of antecedent\n :param i: index of mention\n :returns: dict of features\n :rtype: defaultdict\n '''\n\n f = defaultdict (float)\n # STUDENT\n full_ch = model_sample.mention_str\n upper_cnt = 0\n lower_cnt = 0\n for c in full_ch :\n if not c.isupper () :\n continue\n if c.isupper () :\n upper_cnt += 1\n if c.islower () :\n lower_cnt += 1\n if upper_cnt > 0 and lower_cnt == 0 :\n f['all-upper'] = 1.0\n if upper_cnt > 0 :\n f['has-upper'] = 1.0\n length = len (model_sample.mstr_token_seq)\n if length == 1 :\n f['len-1'] = 1.0\n elif length == 2 :\n f['len-2'] = 1.0\n elif length == 3 :\n f['len-3'] = 1.0\n else :\n f['len>=4'] = 1.0\n # END STUDENT\n return f\n\nclass ELDirectEntityVec:\n def __init__(self, n_types, type_to_id_dict, el_system, wid_types_file):\n self.n_types = n_types\n self.el_system = el_system\n self.rand_assign_rate = 1.1\n print('loading {} ...'.format(wid_types_file))\n logging.info('rand_assign_rate={}'.format(self.rand_assign_rate))\n self.wid_types_dict = datautils.load_wid_types_file(wid_types_file, type_to_id_dict) #TODO:知识图谱里面的type? 还是说是已经map好了的type?\n\n def get_entity_vecs(self, mention_strs, prev_pred_results, min_popularity=10, true_wids=None,\n filter_by_pop=False, person_type_id=None, person_l2_type_ids=None, type_vocab=None) :\n all_entity_vecs = np.zeros ((len (mention_strs), self.n_types), np.float32)\n el_sgns = np.zeros (len (mention_strs), np.float32)\n probs = np.zeros (len (mention_strs), np.float32)\n # 通过字符串匹配的方式计算匹配的entity,通过图的出度入读来计算一个score\n candidates_list = self.el_system.link_all (mention_strs, prev_pred_results)\n # print(candidates_list)\n for i, el_candidates in enumerate (candidates_list) :\n # el_candidates = self.el_system.link(mstr)\n if not el_candidates :\n continue\n wid, mstr_target_cnt, popularity = el_candidates[0]\n if filter_by_pop and popularity < min_popularity :\n continue\n types = self.wid_types_dict.get (wid, None)\n if types is None :\n continue\n\n probs[i] = mstr_target_cnt / (sum ([cand[1] for cand in el_candidates]) + 1e-7) # ( 41 x 1)\n el_sgns[i] = 1\n for type_id in types :\n all_entity_vecs[i][type_id] = 1\n\n if person_type_id is not None and person_type_id in types and (\n self.rand_assign_rate >= 1.0 or np.random.uniform () < self.rand_assign_rate) :\n for _ in range (3) :\n rand_person_type_id = person_l2_type_ids[random.randint (0, len (person_l2_type_ids) - 1)]\n if all_entity_vecs[i][rand_person_type_id] < 1.0 :\n all_entity_vecs[i][rand_person_type_id] = 1.0\n break\n return all_entity_vecs, el_sgns, probs\n" ]
[ [ "numpy.random.uniform" ] ]
nazcaspider/simple-3dviz
[ "3c40007259a1f754311623f74d24b06f7b98be14" ]
[ "simple_3dviz/behaviours/mouse.py" ]
[ "import numpy as np\nfrom pyrr import matrix33, vector\n\nfrom . import Behaviour\nfrom .trajectory import Circle\n\n\nclass MouseRotate(Behaviour):\n \"\"\"Rotate the camera based using the mouse when left button is pressed.\n\n We rotate the camera with the following convention. At any given point we\n consider a sphere with a center in target and radius equal to the distance\n to the camera. We move on that sphere based on the movements of the mouse.\n \"\"\"\n def __init__(self):\n self._start = None\n self._origin = None\n self._camera_pos = None\n self._right = None\n self._up = None\n\n def behave(self, params):\n if params.mouse.left_pressed:\n if self._start is None:\n self._start = params.mouse.location\n self._origin = params.scene.camera_target\n self._camera_pos = params.scene.camera_position\n cam_dir = vector.normalize(self._camera_pos - self._origin)\n self._right = np.cross(params.scene.up_vector, cam_dir)\n self._up = np.cross(cam_dir, self._right)\n else:\n size = params.scene.size\n end = params.mouse.location\n deltaX = float(end[0] - self._start[0])/size[0]\n deltaY = float(end[1] - self._start[1])/size[1]\n\n Rx = matrix33.create_from_axis_rotation(\n self._up,\n deltaX*2*np.pi\n )\n Ry = matrix33.create_from_axis_rotation(\n self._right,\n deltaY*2*np.pi\n )\n R = Ry.dot(Rx)\n newpos = self._origin + R.dot(self._camera_pos - self._origin)\n newup = R.dot(self._up)\n\n params.scene.camera_position = newpos\n params.scene.up_vector = newup\n params.refresh = True\n else:\n self._start = None\n\n\nclass MouseZoom(Behaviour):\n \"\"\"Zoom in/out with the mouse scroll wheel.\"\"\"\n def __init__(self, delta=0.9):\n self._delta = delta\n\n def behave(self, params):\n rotations = params.mouse.wheel_rotation\n if rotations != 0:\n cam_position = params.scene.camera_position\n cam_target = params.scene.camera_target\n ray = cam_target - cam_position\n if rotations > 0:\n cam_position += ray * (1-self._delta)\n else:\n cam_position -= ray * (1-self._delta)\n params.scene.camera_position = cam_position\n params.refresh = True\n\n\nclass MousePan(Behaviour):\n \"\"\"Move the target by dragging the mouse with the middle button pressed.\"\"\"\n def __init__(self, delta=1.):\n self._delta = delta\n self._start = None\n self._target = None\n self._right = None\n self._up = None\n\n def behave(self, params):\n if params.mouse.middle_pressed:\n if self._start is None:\n self._start = params.mouse.location\n self._target = params.scene.camera_target\n cam_dir = params.scene.camera_position - self._target\n cam_dir = vector.normalize(cam_dir)\n self._right = np.cross(params.scene.up_vector, cam_dir)\n self._up = np.cross(cam_dir, self._right)\n else:\n size = params.scene.size\n end = params.mouse.location\n deltaX = float(end[0] - self._start[0])/size[0]\n deltaY = float(end[1] - self._start[1])/size[1]\n\n newtarget = (\n self._target +\n -self._delta * deltaX * self._right +\n self._delta * deltaY * self._up\n )\n params.scene.camera_target = newtarget\n params.refresh = True\n else:\n self._start = None\n" ]
[ [ "numpy.cross" ] ]
utegulovalmat/Mask_RCNN
[ "daa3b1d582e3ed95059b76071055c9e90ec513d0" ]
[ "mrcnn/model.py" ]
[ "\"\"\"\nMask R-CNN\nThe main Mask R-CNN model implementation.\n\nCopyright (c) 2017 Matterport, Inc.\nLicensed under the MIT License (see LICENSE for details)\nWritten by Waleed Abdulla\n\"\"\"\n\nimport os\nimport random\nimport datetime\nimport re\nimport math\nimport logging\nfrom collections import OrderedDict\nimport multiprocessing\nimport numpy as np\nimport tensorflow as tf\nimport keras\nimport keras.backend as K\nimport keras.layers as KL\nimport keras.engine as KE\nimport keras.models as KM\n\nfrom mrcnn import utils\n\n# Requires TensorFlow 1.3+ and Keras 2.0.8+.\nfrom distutils.version import LooseVersion\nassert LooseVersion(tf.__version__) >= LooseVersion(\"1.3\")\nassert LooseVersion(keras.__version__) >= LooseVersion('2.0.8')\n\n\n############################################################\n# Utility Functions\n############################################################\n\ndef log(text, array=None):\n \"\"\"Prints a text message. And, optionally, if a Numpy array is provided it\n prints it's shape, min, and max values.\n \"\"\"\n if array is not None:\n text = text.ljust(25)\n text += (\"shape: {:20} \".format(str(array.shape)))\n if array.size:\n text += (\"min: {:10.5f} max: {:10.5f}\".format(array.min(),array.max()))\n else:\n text += (\"min: {:10} max: {:10}\".format(\"\",\"\"))\n text += \" {}\".format(array.dtype)\n print(text)\n\n\nclass BatchNorm(KL.BatchNormalization):\n \"\"\"Extends the Keras BatchNormalization class to allow a central place\n to make changes if needed.\n\n Batch normalization has a negative effect on training if batches are small\n so this layer is often frozen (via setting in Config class) and functions\n as linear layer.\n \"\"\"\n def call(self, inputs, training=None):\n \"\"\"\n Note about training values:\n None: Train BN layers. This is the normal mode\n False: Freeze BN layers. Good when batch size is small\n True: (don't use). Set layer in training mode even when making inferences\n \"\"\"\n return super(self.__class__, self).call(inputs, training=training)\n\n\ndef compute_backbone_shapes(config, image_shape):\n \"\"\"Computes the width and height of each stage of the backbone network.\n\n Returns:\n [N, (height, width)]. Where N is the number of stages\n \"\"\"\n if callable(config.BACKBONE):\n return config.COMPUTE_BACKBONE_SHAPE(image_shape)\n\n # Currently supports ResNet only\n assert config.BACKBONE in [\"resnet50\", \"resnet101\"]\n return np.array(\n [[int(math.ceil(image_shape[0] / stride)),\n int(math.ceil(image_shape[1] / stride))]\n for stride in config.BACKBONE_STRIDES])\n\n\n############################################################\n# Resnet Graph\n############################################################\n\n# Code adopted from:\n# https://github.com/fchollet/deep-learning-models/blob/master/resnet50.py\n\ndef identity_block(input_tensor, kernel_size, filters, stage, block,\n use_bias=True, train_bn=True):\n \"\"\"The identity_block is the block that has no conv layer at shortcut\n # Arguments\n input_tensor: input tensor\n kernel_size: default 3, the kernel size of middle conv layer at main path\n filters: list of integers, the nb_filters of 3 conv layer at main path\n stage: integer, current stage label, used for generating layer names\n block: 'a','b'..., current block label, used for generating layer names\n use_bias: Boolean. To use or not use a bias in conv layers.\n train_bn: Boolean. Train or freeze Batch Norm layers\n \"\"\"\n nb_filter1, nb_filter2, nb_filter3 = filters\n conv_name_base = 'res' + str(stage) + block + '_branch'\n bn_name_base = 'bn' + str(stage) + block + '_branch'\n\n x = KL.Conv2D(nb_filter1, (1, 1), name=conv_name_base + '2a',\n use_bias=use_bias)(input_tensor)\n x = BatchNorm(name=bn_name_base + '2a')(x, training=train_bn)\n x = KL.Activation('relu')(x)\n\n x = KL.Conv2D(nb_filter2, (kernel_size, kernel_size), padding='same',\n name=conv_name_base + '2b', use_bias=use_bias)(x)\n x = BatchNorm(name=bn_name_base + '2b')(x, training=train_bn)\n x = KL.Activation('relu')(x)\n\n x = KL.Conv2D(nb_filter3, (1, 1), name=conv_name_base + '2c',\n use_bias=use_bias)(x)\n x = BatchNorm(name=bn_name_base + '2c')(x, training=train_bn)\n\n x = KL.Add()([x, input_tensor])\n x = KL.Activation('relu', name='res' + str(stage) + block + '_out')(x)\n return x\n\n\ndef conv_block(input_tensor, kernel_size, filters, stage, block,\n strides=(2, 2), use_bias=True, train_bn=True):\n \"\"\"conv_block is the block that has a conv layer at shortcut\n # Arguments\n input_tensor: input tensor\n kernel_size: default 3, the kernel size of middle conv layer at main path\n filters: list of integers, the nb_filters of 3 conv layer at main path\n stage: integer, current stage label, used for generating layer names\n block: 'a','b'..., current block label, used for generating layer names\n use_bias: Boolean. To use or not use a bias in conv layers.\n train_bn: Boolean. Train or freeze Batch Norm layers\n Note that from stage 3, the first conv layer at main path is with subsample=(2,2)\n And the shortcut should have subsample=(2,2) as well\n \"\"\"\n nb_filter1, nb_filter2, nb_filter3 = filters\n conv_name_base = 'res' + str(stage) + block + '_branch'\n bn_name_base = 'bn' + str(stage) + block + '_branch'\n\n x = KL.Conv2D(nb_filter1, (1, 1), strides=strides,\n name=conv_name_base + '2a', use_bias=use_bias)(input_tensor)\n x = BatchNorm(name=bn_name_base + '2a')(x, training=train_bn)\n x = KL.Activation('relu')(x)\n\n x = KL.Conv2D(nb_filter2, (kernel_size, kernel_size), padding='same',\n name=conv_name_base + '2b', use_bias=use_bias)(x)\n x = BatchNorm(name=bn_name_base + '2b')(x, training=train_bn)\n x = KL.Activation('relu')(x)\n\n x = KL.Conv2D(nb_filter3, (1, 1), name=conv_name_base +\n '2c', use_bias=use_bias)(x)\n x = BatchNorm(name=bn_name_base + '2c')(x, training=train_bn)\n\n shortcut = KL.Conv2D(nb_filter3, (1, 1), strides=strides,\n name=conv_name_base + '1', use_bias=use_bias)(input_tensor)\n shortcut = BatchNorm(name=bn_name_base + '1')(shortcut, training=train_bn)\n\n x = KL.Add()([x, shortcut])\n x = KL.Activation('relu', name='res' + str(stage) + block + '_out')(x)\n return x\n\n\ndef resnet_graph(input_image, architecture, stage5=False, train_bn=True):\n \"\"\"Build a ResNet graph.\n architecture: Can be resnet50 or resnet101\n stage5: Boolean. If False, stage5 of the network is not created\n train_bn: Boolean. Train or freeze Batch Norm layers\n \"\"\"\n assert architecture in [\"resnet50\", \"resnet101\"]\n # Stage 1\n x = KL.ZeroPadding2D((3, 3))(input_image)\n x = KL.Conv2D(64, (7, 7), strides=(2, 2), name='conv1', use_bias=True)(x)\n x = BatchNorm(name='bn_conv1')(x, training=train_bn)\n x = KL.Activation('relu')(x)\n C1 = x = KL.MaxPooling2D((3, 3), strides=(2, 2), padding=\"same\")(x)\n # Stage 2\n x = conv_block(x, 3, [64, 64, 256], stage=2, block='a', strides=(1, 1), train_bn=train_bn)\n x = identity_block(x, 3, [64, 64, 256], stage=2, block='b', train_bn=train_bn)\n C2 = x = identity_block(x, 3, [64, 64, 256], stage=2, block='c', train_bn=train_bn)\n # Stage 3\n x = conv_block(x, 3, [128, 128, 512], stage=3, block='a', train_bn=train_bn)\n x = identity_block(x, 3, [128, 128, 512], stage=3, block='b', train_bn=train_bn)\n x = identity_block(x, 3, [128, 128, 512], stage=3, block='c', train_bn=train_bn)\n C3 = x = identity_block(x, 3, [128, 128, 512], stage=3, block='d', train_bn=train_bn)\n # Stage 4\n x = conv_block(x, 3, [256, 256, 1024], stage=4, block='a', train_bn=train_bn)\n block_count = {\"resnet50\": 5, \"resnet101\": 22}[architecture]\n for i in range(block_count):\n x = identity_block(x, 3, [256, 256, 1024], stage=4, block=chr(98 + i), train_bn=train_bn)\n C4 = x\n # Stage 5\n if stage5:\n x = conv_block(x, 3, [512, 512, 2048], stage=5, block='a', train_bn=train_bn)\n x = identity_block(x, 3, [512, 512, 2048], stage=5, block='b', train_bn=train_bn)\n C5 = x = identity_block(x, 3, [512, 512, 2048], stage=5, block='c', train_bn=train_bn)\n else:\n C5 = None\n return [C1, C2, C3, C4, C5]\n\n\n############################################################\n# Proposal Layer\n############################################################\n\ndef apply_box_deltas_graph(boxes, deltas):\n \"\"\"Applies the given deltas to the given boxes.\n boxes: [N, (y1, x1, y2, x2)] boxes to update\n deltas: [N, (dy, dx, log(dh), log(dw))] refinements to apply\n \"\"\"\n # Convert to y, x, h, w\n height = boxes[:, 2] - boxes[:, 0]\n width = boxes[:, 3] - boxes[:, 1]\n center_y = boxes[:, 0] + 0.5 * height\n center_x = boxes[:, 1] + 0.5 * width\n # Apply deltas\n center_y += deltas[:, 0] * height\n center_x += deltas[:, 1] * width\n height *= tf.exp(deltas[:, 2])\n width *= tf.exp(deltas[:, 3])\n # Convert back to y1, x1, y2, x2\n y1 = center_y - 0.5 * height\n x1 = center_x - 0.5 * width\n y2 = y1 + height\n x2 = x1 + width\n result = tf.stack([y1, x1, y2, x2], axis=1, name=\"apply_box_deltas_out\")\n return result\n\n\ndef clip_boxes_graph(boxes, window):\n \"\"\"\n boxes: [N, (y1, x1, y2, x2)]\n window: [4] in the form y1, x1, y2, x2\n \"\"\"\n # Split\n wy1, wx1, wy2, wx2 = tf.split(window, 4)\n y1, x1, y2, x2 = tf.split(boxes, 4, axis=1)\n # Clip\n y1 = tf.maximum(tf.minimum(y1, wy2), wy1)\n x1 = tf.maximum(tf.minimum(x1, wx2), wx1)\n y2 = tf.maximum(tf.minimum(y2, wy2), wy1)\n x2 = tf.maximum(tf.minimum(x2, wx2), wx1)\n clipped = tf.concat([y1, x1, y2, x2], axis=1, name=\"clipped_boxes\")\n clipped.set_shape((clipped.shape[0], 4))\n return clipped\n\n\nclass ProposalLayer(KE.Layer):\n \"\"\"Receives anchor scores and selects a subset to pass as proposals\n to the second stage. Filtering is done based on anchor scores and\n non-max suppression to remove overlaps. It also applies bounding\n box refinement deltas to anchors.\n\n Inputs:\n rpn_probs: [batch, num_anchors, (bg prob, fg prob)]\n rpn_bbox: [batch, num_anchors, (dy, dx, log(dh), log(dw))]\n anchors: [batch, num_anchors, (y1, x1, y2, x2)] anchors in normalized coordinates\n\n Returns:\n Proposals in normalized coordinates [batch, rois, (y1, x1, y2, x2)]\n \"\"\"\n\n def __init__(self, proposal_count, nms_threshold, config=None, **kwargs):\n super(ProposalLayer, self).__init__(**kwargs)\n self.config = config\n self.proposal_count = proposal_count\n self.nms_threshold = nms_threshold\n\n def call(self, inputs):\n # Box Scores. Use the foreground class confidence. [Batch, num_rois, 1]\n scores = inputs[0][:, :, 1]\n # Box deltas [batch, num_rois, 4]\n deltas = inputs[1]\n deltas = deltas * np.reshape(self.config.RPN_BBOX_STD_DEV, [1, 1, 4])\n # Anchors\n anchors = inputs[2]\n\n # Improve performance by trimming to top anchors by score\n # and doing the rest on the smaller subset.\n pre_nms_limit = tf.minimum(self.config.PRE_NMS_LIMIT, tf.shape(anchors)[1])\n ix = tf.nn.top_k(scores, pre_nms_limit, sorted=True,\n name=\"top_anchors\").indices\n scores = utils.batch_slice([scores, ix], lambda x, y: tf.gather(x, y),\n self.config.IMAGES_PER_GPU)\n deltas = utils.batch_slice([deltas, ix], lambda x, y: tf.gather(x, y),\n self.config.IMAGES_PER_GPU)\n pre_nms_anchors = utils.batch_slice([anchors, ix], lambda a, x: tf.gather(a, x),\n self.config.IMAGES_PER_GPU,\n names=[\"pre_nms_anchors\"])\n\n # Apply deltas to anchors to get refined anchors.\n # [batch, N, (y1, x1, y2, x2)]\n boxes = utils.batch_slice([pre_nms_anchors, deltas],\n lambda x, y: apply_box_deltas_graph(x, y),\n self.config.IMAGES_PER_GPU,\n names=[\"refined_anchors\"])\n\n # Clip to image boundaries. Since we're in normalized coordinates,\n # clip to 0..1 range. [batch, N, (y1, x1, y2, x2)]\n window = np.array([0, 0, 1, 1], dtype=np.float32)\n boxes = utils.batch_slice(boxes,\n lambda x: clip_boxes_graph(x, window),\n self.config.IMAGES_PER_GPU,\n names=[\"refined_anchors_clipped\"])\n\n # Filter out small boxes\n # According to Xinlei Chen's paper, this reduces detection accuracy\n # for small objects, so we're skipping it.\n\n # Non-max suppression\n def nms(boxes, scores):\n indices = tf.image.non_max_suppression(\n boxes, scores, self.proposal_count,\n self.nms_threshold, name=\"rpn_non_max_suppression\")\n proposals = tf.gather(boxes, indices)\n # Pad if needed\n padding = tf.maximum(self.proposal_count - tf.shape(proposals)[0], 0)\n proposals = tf.pad(proposals, [(0, padding), (0, 0)])\n return proposals\n proposals = utils.batch_slice([boxes, scores], nms,\n self.config.IMAGES_PER_GPU)\n return proposals\n\n def compute_output_shape(self, input_shape):\n return (None, self.proposal_count, 4)\n\n\n############################################################\n# ROIAlign Layer\n############################################################\n\ndef log2_graph(x):\n \"\"\"Implementation of Log2. TF doesn't have a native implementation.\"\"\"\n return tf.log(x) / tf.log(2.0)\n\n\nclass PyramidROIAlign(KE.Layer):\n \"\"\"Implements ROI Pooling on multiple levels of the feature pyramid.\n\n Params:\n - pool_shape: [pool_height, pool_width] of the output pooled regions. Usually [7, 7]\n\n Inputs:\n - boxes: [batch, num_boxes, (y1, x1, y2, x2)] in normalized\n coordinates. Possibly padded with zeros if not enough\n boxes to fill the array.\n - image_meta: [batch, (meta data)] Image details. See compose_image_meta()\n - feature_maps: List of feature maps from different levels of the pyramid.\n Each is [batch, height, width, channels]\n\n Output:\n Pooled regions in the shape: [batch, num_boxes, pool_height, pool_width, channels].\n The width and height are those specific in the pool_shape in the layer\n constructor.\n \"\"\"\n\n def __init__(self, pool_shape, **kwargs):\n super(PyramidROIAlign, self).__init__(**kwargs)\n self.pool_shape = tuple(pool_shape)\n\n def call(self, inputs):\n # Crop boxes [batch, num_boxes, (y1, x1, y2, x2)] in normalized coords\n boxes = inputs[0]\n\n # Image meta\n # Holds details about the image. See compose_image_meta()\n image_meta = inputs[1]\n\n # Feature Maps. List of feature maps from different level of the\n # feature pyramid. Each is [batch, height, width, channels]\n feature_maps = inputs[2:]\n\n # Assign each ROI to a level in the pyramid based on the ROI area.\n y1, x1, y2, x2 = tf.split(boxes, 4, axis=2)\n h = y2 - y1\n w = x2 - x1\n # Use shape of first image. Images in a batch must have the same size.\n image_shape = parse_image_meta_graph(image_meta)['image_shape'][0]\n # Equation 1 in the Feature Pyramid Networks paper. Account for\n # the fact that our coordinates are normalized here.\n # e.g. a 224x224 ROI (in pixels) maps to P4\n image_area = tf.cast(image_shape[0] * image_shape[1], tf.float32)\n roi_level = log2_graph(tf.sqrt(h * w) / (224.0 / tf.sqrt(image_area)))\n roi_level = tf.minimum(5, tf.maximum(\n 2, 4 + tf.cast(tf.round(roi_level), tf.int32)))\n roi_level = tf.squeeze(roi_level, 2)\n\n # Loop through levels and apply ROI pooling to each. P2 to P5.\n pooled = []\n box_to_level = []\n for i, level in enumerate(range(2, 6)):\n ix = tf.where(tf.equal(roi_level, level))\n level_boxes = tf.gather_nd(boxes, ix)\n\n # Box indices for crop_and_resize.\n box_indices = tf.cast(ix[:, 0], tf.int32)\n\n # Keep track of which box is mapped to which level\n box_to_level.append(ix)\n\n # Stop gradient propogation to ROI proposals\n level_boxes = tf.stop_gradient(level_boxes)\n box_indices = tf.stop_gradient(box_indices)\n\n # Crop and Resize\n # From Mask R-CNN paper: \"We sample four regular locations, so\n # that we can evaluate either max or average pooling. In fact,\n # interpolating only a single value at each bin center (without\n # pooling) is nearly as effective.\"\n #\n # Here we use the simplified approach of a single value per bin,\n # which is how it's done in tf.crop_and_resize()\n # Result: [batch * num_boxes, pool_height, pool_width, channels]\n pooled.append(tf.image.crop_and_resize(\n feature_maps[i], level_boxes, box_indices, self.pool_shape,\n method=\"bilinear\"))\n\n # Pack pooled features into one tensor\n pooled = tf.concat(pooled, axis=0)\n\n # Pack box_to_level mapping into one array and add another\n # column representing the order of pooled boxes\n box_to_level = tf.concat(box_to_level, axis=0)\n box_range = tf.expand_dims(tf.range(tf.shape(box_to_level)[0]), 1)\n box_to_level = tf.concat([tf.cast(box_to_level, tf.int32), box_range],\n axis=1)\n\n # Rearrange pooled features to match the order of the original boxes\n # Sort box_to_level by batch then box index\n # TF doesn't have a way to sort by two columns, so merge them and sort.\n sorting_tensor = box_to_level[:, 0] * 100000 + box_to_level[:, 1]\n ix = tf.nn.top_k(sorting_tensor, k=tf.shape(\n box_to_level)[0]).indices[::-1]\n ix = tf.gather(box_to_level[:, 2], ix)\n pooled = tf.gather(pooled, ix)\n\n # Re-add the batch dimension\n shape = tf.concat([tf.shape(boxes)[:2], tf.shape(pooled)[1:]], axis=0)\n pooled = tf.reshape(pooled, shape)\n return pooled\n\n def compute_output_shape(self, input_shape):\n return input_shape[0][:2] + self.pool_shape + (input_shape[2][-1], )\n\n\n############################################################\n# Detection Target Layer\n############################################################\n\ndef overlaps_graph(boxes1, boxes2):\n \"\"\"Computes IoU overlaps between two sets of boxes.\n boxes1, boxes2: [N, (y1, x1, y2, x2)].\n \"\"\"\n # 1. Tile boxes2 and repeat boxes1. This allows us to compare\n # every boxes1 against every boxes2 without loops.\n # TF doesn't have an equivalent to np.repeat() so simulate it\n # using tf.tile() and tf.reshape.\n b1 = tf.reshape(tf.tile(tf.expand_dims(boxes1, 1),\n [1, 1, tf.shape(boxes2)[0]]), [-1, 4])\n b2 = tf.tile(boxes2, [tf.shape(boxes1)[0], 1])\n # 2. Compute intersections\n b1_y1, b1_x1, b1_y2, b1_x2 = tf.split(b1, 4, axis=1)\n b2_y1, b2_x1, b2_y2, b2_x2 = tf.split(b2, 4, axis=1)\n y1 = tf.maximum(b1_y1, b2_y1)\n x1 = tf.maximum(b1_x1, b2_x1)\n y2 = tf.minimum(b1_y2, b2_y2)\n x2 = tf.minimum(b1_x2, b2_x2)\n intersection = tf.maximum(x2 - x1, 0) * tf.maximum(y2 - y1, 0)\n # 3. Compute unions\n b1_area = (b1_y2 - b1_y1) * (b1_x2 - b1_x1)\n b2_area = (b2_y2 - b2_y1) * (b2_x2 - b2_x1)\n union = b1_area + b2_area - intersection\n # 4. Compute IoU and reshape to [boxes1, boxes2]\n iou = intersection / union\n overlaps = tf.reshape(iou, [tf.shape(boxes1)[0], tf.shape(boxes2)[0]])\n return overlaps\n\n\ndef detection_targets_graph(proposals, gt_class_ids, gt_boxes, gt_masks, config):\n \"\"\"Generates detection targets for one image. Subsamples proposals and\n generates target class IDs, bounding box deltas, and masks for each.\n\n Inputs:\n proposals: [POST_NMS_ROIS_TRAINING, (y1, x1, y2, x2)] in normalized coordinates. Might\n be zero padded if there are not enough proposals.\n gt_class_ids: [MAX_GT_INSTANCES] int class IDs\n gt_boxes: [MAX_GT_INSTANCES, (y1, x1, y2, x2)] in normalized coordinates.\n gt_masks: [height, width, MAX_GT_INSTANCES] of boolean type.\n\n Returns: Target ROIs and corresponding class IDs, bounding box shifts,\n and masks.\n rois: [TRAIN_ROIS_PER_IMAGE, (y1, x1, y2, x2)] in normalized coordinates\n class_ids: [TRAIN_ROIS_PER_IMAGE]. Integer class IDs. Zero padded.\n deltas: [TRAIN_ROIS_PER_IMAGE, (dy, dx, log(dh), log(dw))]\n masks: [TRAIN_ROIS_PER_IMAGE, height, width]. Masks cropped to bbox\n boundaries and resized to neural network output size.\n\n Note: Returned arrays might be zero padded if not enough target ROIs.\n \"\"\"\n # Assertions\n asserts = [\n tf.Assert(tf.greater(tf.shape(proposals)[0], 0), [proposals],\n name=\"roi_assertion\"),\n ]\n with tf.control_dependencies(asserts):\n proposals = tf.identity(proposals)\n\n # Remove zero padding\n proposals, _ = trim_zeros_graph(proposals, name=\"trim_proposals\")\n gt_boxes, non_zeros = trim_zeros_graph(gt_boxes, name=\"trim_gt_boxes\")\n gt_class_ids = tf.boolean_mask(gt_class_ids, non_zeros,\n name=\"trim_gt_class_ids\")\n gt_masks = tf.gather(gt_masks, tf.where(non_zeros)[:, 0], axis=2,\n name=\"trim_gt_masks\")\n\n # Handle COCO crowds\n # A crowd box in COCO is a bounding box around several instances. Exclude\n # them from training. A crowd box is given a negative class ID.\n crowd_ix = tf.where(gt_class_ids < 0)[:, 0]\n non_crowd_ix = tf.where(gt_class_ids > 0)[:, 0]\n crowd_boxes = tf.gather(gt_boxes, crowd_ix)\n gt_class_ids = tf.gather(gt_class_ids, non_crowd_ix)\n gt_boxes = tf.gather(gt_boxes, non_crowd_ix)\n gt_masks = tf.gather(gt_masks, non_crowd_ix, axis=2)\n\n # Compute overlaps matrix [proposals, gt_boxes]\n overlaps = overlaps_graph(proposals, gt_boxes)\n\n # Compute overlaps with crowd boxes [proposals, crowd_boxes]\n crowd_overlaps = overlaps_graph(proposals, crowd_boxes)\n crowd_iou_max = tf.reduce_max(crowd_overlaps, axis=1)\n no_crowd_bool = (crowd_iou_max < 0.001)\n\n # Determine positive and negative ROIs\n roi_iou_max = tf.reduce_max(overlaps, axis=1)\n # 1. Positive ROIs are those with >= 0.5 IoU with a GT box\n positive_roi_bool = (roi_iou_max >= 0.5)\n positive_indices = tf.where(positive_roi_bool)[:, 0]\n # 2. Negative ROIs are those with < 0.5 with every GT box. Skip crowds.\n negative_indices = tf.where(tf.logical_and(roi_iou_max < 0.5, no_crowd_bool))[:, 0]\n\n # Subsample ROIs. Aim for 33% positive\n # Positive ROIs\n positive_count = int(config.TRAIN_ROIS_PER_IMAGE *\n config.ROI_POSITIVE_RATIO)\n positive_indices = tf.random_shuffle(positive_indices)[:positive_count]\n positive_count = tf.shape(positive_indices)[0]\n # Negative ROIs. Add enough to maintain positive:negative ratio.\n r = 1.0 / config.ROI_POSITIVE_RATIO\n negative_count = tf.cast(r * tf.cast(positive_count, tf.float32), tf.int32) - positive_count\n negative_indices = tf.random_shuffle(negative_indices)[:negative_count]\n # Gather selected ROIs\n positive_rois = tf.gather(proposals, positive_indices)\n negative_rois = tf.gather(proposals, negative_indices)\n\n # Assign positive ROIs to GT boxes.\n positive_overlaps = tf.gather(overlaps, positive_indices)\n roi_gt_box_assignment = tf.cond(\n tf.greater(tf.shape(positive_overlaps)[1], 0),\n true_fn = lambda: tf.argmax(positive_overlaps, axis=1),\n false_fn = lambda: tf.cast(tf.constant([]),tf.int64)\n )\n roi_gt_boxes = tf.gather(gt_boxes, roi_gt_box_assignment)\n roi_gt_class_ids = tf.gather(gt_class_ids, roi_gt_box_assignment)\n\n # Compute bbox refinement for positive ROIs\n deltas = utils.box_refinement_graph(positive_rois, roi_gt_boxes)\n deltas /= config.BBOX_STD_DEV\n\n # Assign positive ROIs to GT masks\n # Permute masks to [N, height, width, 1]\n transposed_masks = tf.expand_dims(tf.transpose(gt_masks, [2, 0, 1]), -1)\n # Pick the right mask for each ROI\n roi_masks = tf.gather(transposed_masks, roi_gt_box_assignment)\n\n # Compute mask targets\n boxes = positive_rois\n if config.USE_MINI_MASK:\n # Transform ROI coordinates from normalized image space\n # to normalized mini-mask space.\n y1, x1, y2, x2 = tf.split(positive_rois, 4, axis=1)\n gt_y1, gt_x1, gt_y2, gt_x2 = tf.split(roi_gt_boxes, 4, axis=1)\n gt_h = gt_y2 - gt_y1\n gt_w = gt_x2 - gt_x1\n y1 = (y1 - gt_y1) / gt_h\n x1 = (x1 - gt_x1) / gt_w\n y2 = (y2 - gt_y1) / gt_h\n x2 = (x2 - gt_x1) / gt_w\n boxes = tf.concat([y1, x1, y2, x2], 1)\n box_ids = tf.range(0, tf.shape(roi_masks)[0])\n masks = tf.image.crop_and_resize(tf.cast(roi_masks, tf.float32), boxes,\n box_ids,\n config.MASK_SHAPE)\n # Remove the extra dimension from masks.\n masks = tf.squeeze(masks, axis=3)\n\n # Threshold mask pixels at 0.5 to have GT masks be 0 or 1 to use with\n # binary cross entropy loss.\n masks = tf.round(masks)\n\n # Append negative ROIs and pad bbox deltas and masks that\n # are not used for negative ROIs with zeros.\n rois = tf.concat([positive_rois, negative_rois], axis=0)\n N = tf.shape(negative_rois)[0]\n P = tf.maximum(config.TRAIN_ROIS_PER_IMAGE - tf.shape(rois)[0], 0)\n rois = tf.pad(rois, [(0, P), (0, 0)])\n roi_gt_boxes = tf.pad(roi_gt_boxes, [(0, N + P), (0, 0)])\n roi_gt_class_ids = tf.pad(roi_gt_class_ids, [(0, N + P)])\n deltas = tf.pad(deltas, [(0, N + P), (0, 0)])\n masks = tf.pad(masks, [[0, N + P], (0, 0), (0, 0)])\n\n return rois, roi_gt_class_ids, deltas, masks\n\n\nclass DetectionTargetLayer(KE.Layer):\n \"\"\"Subsamples proposals and generates target box refinement, class_ids,\n and masks for each.\n\n Inputs:\n proposals: [batch, N, (y1, x1, y2, x2)] in normalized coordinates. Might\n be zero padded if there are not enough proposals.\n gt_class_ids: [batch, MAX_GT_INSTANCES] Integer class IDs.\n gt_boxes: [batch, MAX_GT_INSTANCES, (y1, x1, y2, x2)] in normalized\n coordinates.\n gt_masks: [batch, height, width, MAX_GT_INSTANCES] of boolean type\n\n Returns: Target ROIs and corresponding class IDs, bounding box shifts,\n and masks.\n rois: [batch, TRAIN_ROIS_PER_IMAGE, (y1, x1, y2, x2)] in normalized\n coordinates\n target_class_ids: [batch, TRAIN_ROIS_PER_IMAGE]. Integer class IDs.\n target_deltas: [batch, TRAIN_ROIS_PER_IMAGE, (dy, dx, log(dh), log(dw)]\n target_mask: [batch, TRAIN_ROIS_PER_IMAGE, height, width]\n Masks cropped to bbox boundaries and resized to neural\n network output size.\n\n Note: Returned arrays might be zero padded if not enough target ROIs.\n \"\"\"\n\n def __init__(self, config, **kwargs):\n super(DetectionTargetLayer, self).__init__(**kwargs)\n self.config = config\n\n def call(self, inputs):\n proposals = inputs[0]\n gt_class_ids = inputs[1]\n gt_boxes = inputs[2]\n gt_masks = inputs[3]\n\n # Slice the batch and run a graph for each slice\n # TODO: Rename target_bbox to target_deltas for clarity\n names = [\"rois\", \"target_class_ids\", \"target_bbox\", \"target_mask\"]\n outputs = utils.batch_slice(\n [proposals, gt_class_ids, gt_boxes, gt_masks],\n lambda w, x, y, z: detection_targets_graph(\n w, x, y, z, self.config),\n self.config.IMAGES_PER_GPU, names=names)\n return outputs\n\n def compute_output_shape(self, input_shape):\n return [\n (None, self.config.TRAIN_ROIS_PER_IMAGE, 4), # rois\n (None, self.config.TRAIN_ROIS_PER_IMAGE), # class_ids\n (None, self.config.TRAIN_ROIS_PER_IMAGE, 4), # deltas\n (None, self.config.TRAIN_ROIS_PER_IMAGE, self.config.MASK_SHAPE[0],\n self.config.MASK_SHAPE[1]) # masks\n ]\n\n def compute_mask(self, inputs, mask=None):\n return [None, None, None, None]\n\n\n############################################################\n# Detection Layer\n############################################################\n\ndef refine_detections_graph(rois, probs, deltas, window, config):\n \"\"\"Refine classified proposals and filter overlaps and return final\n detections.\n\n Inputs:\n rois: [N, (y1, x1, y2, x2)] in normalized coordinates\n probs: [N, num_classes]. Class probabilities.\n deltas: [N, num_classes, (dy, dx, log(dh), log(dw))]. Class-specific\n bounding box deltas.\n window: (y1, x1, y2, x2) in normalized coordinates. The part of the image\n that contains the image excluding the padding.\n\n Returns detections shaped: [num_detections, (y1, x1, y2, x2, class_id, score)] where\n coordinates are normalized.\n \"\"\"\n # Class IDs per ROI\n class_ids = tf.argmax(probs, axis=1, output_type=tf.int32)\n # Class probability of the top class of each ROI\n indices = tf.stack([tf.range(probs.shape[0]), class_ids], axis=1)\n class_scores = tf.gather_nd(probs, indices)\n # Class-specific bounding box deltas\n deltas_specific = tf.gather_nd(deltas, indices)\n # Apply bounding box deltas\n # Shape: [boxes, (y1, x1, y2, x2)] in normalized coordinates\n refined_rois = apply_box_deltas_graph(\n rois, deltas_specific * config.BBOX_STD_DEV)\n # Clip boxes to image window\n refined_rois = clip_boxes_graph(refined_rois, window)\n\n # TODO: Filter out boxes with zero area\n\n # Filter out background boxes\n keep = tf.where(class_ids > 0)[:, 0]\n # Filter out low confidence boxes\n if config.DETECTION_MIN_CONFIDENCE:\n conf_keep = tf.where(class_scores >= config.DETECTION_MIN_CONFIDENCE)[:, 0]\n keep = tf.sets.set_intersection(tf.expand_dims(keep, 0),\n tf.expand_dims(conf_keep, 0))\n keep = tf.sparse_tensor_to_dense(keep)[0]\n\n # Apply per-class NMS\n # 1. Prepare variables\n pre_nms_class_ids = tf.gather(class_ids, keep)\n pre_nms_scores = tf.gather(class_scores, keep)\n pre_nms_rois = tf.gather(refined_rois, keep)\n unique_pre_nms_class_ids = tf.unique(pre_nms_class_ids)[0]\n\n def nms_keep_map(class_id):\n \"\"\"Apply Non-Maximum Suppression on ROIs of the given class.\"\"\"\n # Indices of ROIs of the given class\n ixs = tf.where(tf.equal(pre_nms_class_ids, class_id))[:, 0]\n # Apply NMS\n class_keep = tf.image.non_max_suppression(\n tf.gather(pre_nms_rois, ixs),\n tf.gather(pre_nms_scores, ixs),\n max_output_size=config.DETECTION_MAX_INSTANCES,\n iou_threshold=config.DETECTION_NMS_THRESHOLD)\n # Map indices\n class_keep = tf.gather(keep, tf.gather(ixs, class_keep))\n # Pad with -1 so returned tensors have the same shape\n gap = config.DETECTION_MAX_INSTANCES - tf.shape(class_keep)[0]\n class_keep = tf.pad(class_keep, [(0, gap)],\n mode='CONSTANT', constant_values=-1)\n # Set shape so map_fn() can infer result shape\n class_keep.set_shape([config.DETECTION_MAX_INSTANCES])\n return class_keep\n\n # 2. Map over class IDs\n nms_keep = tf.map_fn(nms_keep_map, unique_pre_nms_class_ids,\n dtype=tf.int64)\n # 3. Merge results into one list, and remove -1 padding\n nms_keep = tf.reshape(nms_keep, [-1])\n nms_keep = tf.gather(nms_keep, tf.where(nms_keep > -1)[:, 0])\n # 4. Compute intersection between keep and nms_keep\n keep = tf.sets.set_intersection(tf.expand_dims(keep, 0),\n tf.expand_dims(nms_keep, 0))\n keep = tf.sparse_tensor_to_dense(keep)[0]\n # Keep top detections\n roi_count = config.DETECTION_MAX_INSTANCES\n class_scores_keep = tf.gather(class_scores, keep)\n num_keep = tf.minimum(tf.shape(class_scores_keep)[0], roi_count)\n top_ids = tf.nn.top_k(class_scores_keep, k=num_keep, sorted=True)[1]\n keep = tf.gather(keep, top_ids)\n\n # Arrange output as [N, (y1, x1, y2, x2, class_id, score)]\n # Coordinates are normalized.\n detections = tf.concat([\n tf.gather(refined_rois, keep),\n tf.to_float(tf.gather(class_ids, keep))[..., tf.newaxis],\n tf.gather(class_scores, keep)[..., tf.newaxis]\n ], axis=1)\n\n # Pad with zeros if detections < DETECTION_MAX_INSTANCES\n gap = config.DETECTION_MAX_INSTANCES - tf.shape(detections)[0]\n detections = tf.pad(detections, [(0, gap), (0, 0)], \"CONSTANT\")\n return detections\n\n\nclass DetectionLayer(KE.Layer):\n \"\"\"Takes classified proposal boxes and their bounding box deltas and\n returns the final detection boxes.\n\n Returns:\n [batch, num_detections, (y1, x1, y2, x2, class_id, class_score)] where\n coordinates are normalized.\n \"\"\"\n\n def __init__(self, config=None, **kwargs):\n super(DetectionLayer, self).__init__(**kwargs)\n self.config = config\n\n def call(self, inputs):\n rois = inputs[0]\n mrcnn_class = inputs[1]\n mrcnn_bbox = inputs[2]\n image_meta = inputs[3]\n\n # Get windows of images in normalized coordinates. Windows are the area\n # in the image that excludes the padding.\n # Use the shape of the first image in the batch to normalize the window\n # because we know that all images get resized to the same size.\n m = parse_image_meta_graph(image_meta)\n image_shape = m['image_shape'][0]\n window = norm_boxes_graph(m['window'], image_shape[:2])\n\n # Run detection refinement graph on each item in the batch\n detections_batch = utils.batch_slice(\n [rois, mrcnn_class, mrcnn_bbox, window],\n lambda x, y, w, z: refine_detections_graph(x, y, w, z, self.config),\n self.config.IMAGES_PER_GPU)\n\n # Reshape output\n # [batch, num_detections, (y1, x1, y2, x2, class_id, class_score)] in\n # normalized coordinates\n return tf.reshape(\n detections_batch,\n [self.config.BATCH_SIZE, self.config.DETECTION_MAX_INSTANCES, 6])\n\n def compute_output_shape(self, input_shape):\n return (None, self.config.DETECTION_MAX_INSTANCES, 6)\n\n\n############################################################\n# Region Proposal Network (RPN)\n############################################################\n\ndef rpn_graph(feature_map, anchors_per_location, anchor_stride):\n \"\"\"Builds the computation graph of Region Proposal Network.\n\n feature_map: backbone features [batch, height, width, depth]\n anchors_per_location: number of anchors per pixel in the feature map\n anchor_stride: Controls the density of anchors. Typically 1 (anchors for\n every pixel in the feature map), or 2 (every other pixel).\n\n Returns:\n rpn_class_logits: [batch, H * W * anchors_per_location, 2] Anchor classifier logits (before softmax)\n rpn_probs: [batch, H * W * anchors_per_location, 2] Anchor classifier probabilities.\n rpn_bbox: [batch, H * W * anchors_per_location, (dy, dx, log(dh), log(dw))] Deltas to be\n applied to anchors.\n \"\"\"\n # TODO: check if stride of 2 causes alignment issues if the feature map\n # is not even.\n # Shared convolutional base of the RPN\n shared = KL.Conv2D(512, (3, 3), padding='same', activation='relu',\n strides=anchor_stride,\n name='rpn_conv_shared')(feature_map)\n\n # Anchor Score. [batch, height, width, anchors per location * 2].\n x = KL.Conv2D(2 * anchors_per_location, (1, 1), padding='valid',\n activation='linear', name='rpn_class_raw')(shared)\n\n # Reshape to [batch, anchors, 2]\n rpn_class_logits = KL.Lambda(\n lambda t: tf.reshape(t, [tf.shape(t)[0], -1, 2]))(x)\n\n # Softmax on last dimension of BG/FG.\n rpn_probs = KL.Activation(\n \"softmax\", name=\"rpn_class_xxx\")(rpn_class_logits)\n\n # Bounding box refinement. [batch, H, W, anchors per location * depth]\n # where depth is [x, y, log(w), log(h)]\n x = KL.Conv2D(anchors_per_location * 4, (1, 1), padding=\"valid\",\n activation='linear', name='rpn_bbox_pred')(shared)\n\n # Reshape to [batch, anchors, 4]\n rpn_bbox = KL.Lambda(lambda t: tf.reshape(t, [tf.shape(t)[0], -1, 4]))(x)\n\n return [rpn_class_logits, rpn_probs, rpn_bbox]\n\n\ndef build_rpn_model(anchor_stride, anchors_per_location, depth):\n \"\"\"Builds a Keras model of the Region Proposal Network.\n It wraps the RPN graph so it can be used multiple times with shared\n weights.\n\n anchors_per_location: number of anchors per pixel in the feature map\n anchor_stride: Controls the density of anchors. Typically 1 (anchors for\n every pixel in the feature map), or 2 (every other pixel).\n depth: Depth of the backbone feature map.\n\n Returns a Keras Model object. The model outputs, when called, are:\n rpn_class_logits: [batch, H * W * anchors_per_location, 2] Anchor classifier logits (before softmax)\n rpn_probs: [batch, H * W * anchors_per_location, 2] Anchor classifier probabilities.\n rpn_bbox: [batch, H * W * anchors_per_location, (dy, dx, log(dh), log(dw))] Deltas to be\n applied to anchors.\n \"\"\"\n input_feature_map = KL.Input(shape=[None, None, depth],\n name=\"input_rpn_feature_map\")\n outputs = rpn_graph(input_feature_map, anchors_per_location, anchor_stride)\n return KM.Model([input_feature_map], outputs, name=\"rpn_model\")\n\n\n############################################################\n# Feature Pyramid Network Heads\n############################################################\n\ndef fpn_classifier_graph(rois, feature_maps, image_meta,\n pool_size, num_classes, train_bn=True,\n fc_layers_size=1024):\n \"\"\"Builds the computation graph of the feature pyramid network classifier\n and regressor heads.\n\n rois: [batch, num_rois, (y1, x1, y2, x2)] Proposal boxes in normalized\n coordinates.\n feature_maps: List of feature maps from different layers of the pyramid,\n [P2, P3, P4, P5]. Each has a different resolution.\n image_meta: [batch, (meta data)] Image details. See compose_image_meta()\n pool_size: The width of the square feature map generated from ROI Pooling.\n num_classes: number of classes, which determines the depth of the results\n train_bn: Boolean. Train or freeze Batch Norm layers\n fc_layers_size: Size of the 2 FC layers\n\n Returns:\n logits: [batch, num_rois, NUM_CLASSES] classifier logits (before softmax)\n probs: [batch, num_rois, NUM_CLASSES] classifier probabilities\n bbox_deltas: [batch, num_rois, NUM_CLASSES, (dy, dx, log(dh), log(dw))] Deltas to apply to\n proposal boxes\n \"\"\"\n # ROI Pooling\n # Shape: [batch, num_rois, POOL_SIZE, POOL_SIZE, channels]\n x = PyramidROIAlign([pool_size, pool_size],\n name=\"roi_align_classifier\")([rois, image_meta] + feature_maps)\n # Two 1024 FC layers (implemented with Conv2D for consistency)\n x = KL.TimeDistributed(KL.Conv2D(fc_layers_size, (pool_size, pool_size), padding=\"valid\"),\n name=\"mrcnn_class_conv1\")(x)\n x = KL.TimeDistributed(BatchNorm(), name='mrcnn_class_bn1')(x, training=train_bn)\n x = KL.Activation('relu')(x)\n x = KL.TimeDistributed(KL.Conv2D(fc_layers_size, (1, 1)),\n name=\"mrcnn_class_conv2\")(x)\n x = KL.TimeDistributed(BatchNorm(), name='mrcnn_class_bn2')(x, training=train_bn)\n x = KL.Activation('relu')(x)\n\n shared = KL.Lambda(lambda x: K.squeeze(K.squeeze(x, 3), 2),\n name=\"pool_squeeze\")(x)\n\n # Classifier head\n mrcnn_class_logits = KL.TimeDistributed(KL.Dense(num_classes),\n name='mrcnn_class_logits')(shared)\n mrcnn_probs = KL.TimeDistributed(KL.Activation(\"softmax\"),\n name=\"mrcnn_class\")(mrcnn_class_logits)\n\n # BBox head\n # [batch, num_rois, NUM_CLASSES * (dy, dx, log(dh), log(dw))]\n x = KL.TimeDistributed(KL.Dense(num_classes * 4, activation='linear'),\n name='mrcnn_bbox_fc')(shared)\n # Reshape to [batch, num_rois, NUM_CLASSES, (dy, dx, log(dh), log(dw))]\n s = K.int_shape(x)\n mrcnn_bbox = KL.Reshape((s[1], num_classes, 4), name=\"mrcnn_bbox\")(x)\n\n return mrcnn_class_logits, mrcnn_probs, mrcnn_bbox\n\n\ndef build_fpn_mask_graph(rois, feature_maps, image_meta,\n pool_size, num_classes, train_bn=True):\n \"\"\"Builds the computation graph of the mask head of Feature Pyramid Network.\n\n rois: [batch, num_rois, (y1, x1, y2, x2)] Proposal boxes in normalized\n coordinates.\n feature_maps: List of feature maps from different layers of the pyramid,\n [P2, P3, P4, P5]. Each has a different resolution.\n image_meta: [batch, (meta data)] Image details. See compose_image_meta()\n pool_size: The width of the square feature map generated from ROI Pooling.\n num_classes: number of classes, which determines the depth of the results\n train_bn: Boolean. Train or freeze Batch Norm layers\n\n Returns: Masks [batch, num_rois, MASK_POOL_SIZE, MASK_POOL_SIZE, NUM_CLASSES]\n \"\"\"\n # ROI Pooling\n # Shape: [batch, num_rois, MASK_POOL_SIZE, MASK_POOL_SIZE, channels]\n x = PyramidROIAlign([pool_size, pool_size],\n name=\"roi_align_mask\")([rois, image_meta] + feature_maps)\n\n # Conv layers\n x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding=\"same\"),\n name=\"mrcnn_mask_conv1\")(x)\n x = KL.TimeDistributed(BatchNorm(),\n name='mrcnn_mask_bn1')(x, training=train_bn)\n x = KL.Activation('relu')(x)\n\n x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding=\"same\"),\n name=\"mrcnn_mask_conv2\")(x)\n x = KL.TimeDistributed(BatchNorm(),\n name='mrcnn_mask_bn2')(x, training=train_bn)\n x = KL.Activation('relu')(x)\n\n x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding=\"same\"),\n name=\"mrcnn_mask_conv3\")(x)\n x = KL.TimeDistributed(BatchNorm(),\n name='mrcnn_mask_bn3')(x, training=train_bn)\n x = KL.Activation('relu')(x)\n\n x = KL.TimeDistributed(KL.Conv2D(256, (3, 3), padding=\"same\"),\n name=\"mrcnn_mask_conv4\")(x)\n x = KL.TimeDistributed(BatchNorm(),\n name='mrcnn_mask_bn4')(x, training=train_bn)\n x = KL.Activation('relu')(x)\n\n x = KL.TimeDistributed(KL.Conv2DTranspose(256, (2, 2), strides=2, activation=\"relu\"),\n name=\"mrcnn_mask_deconv\")(x)\n x = KL.TimeDistributed(KL.Conv2D(num_classes, (1, 1), strides=1, activation=\"sigmoid\"),\n name=\"mrcnn_mask\")(x)\n return x\n\n\n############################################################\n# Loss Functions\n############################################################\n\ndef smooth_l1_loss(y_true, y_pred):\n \"\"\"Implements Smooth-L1 loss.\n y_true and y_pred are typically: [N, 4], but could be any shape.\n \"\"\"\n diff = K.abs(y_true - y_pred)\n less_than_one = K.cast(K.less(diff, 1.0), \"float32\")\n loss = (less_than_one * 0.5 * diff**2) + (1 - less_than_one) * (diff - 0.5)\n return loss\n\n\ndef rpn_class_loss_graph(rpn_match, rpn_class_logits):\n \"\"\"RPN anchor classifier loss.\n\n rpn_match: [batch, anchors, 1]. Anchor match type. 1=positive,\n -1=negative, 0=neutral anchor.\n rpn_class_logits: [batch, anchors, 2]. RPN classifier logits for BG/FG.\n \"\"\"\n # Squeeze last dim to simplify\n rpn_match = tf.squeeze(rpn_match, -1)\n # Get anchor classes. Convert the -1/+1 match to 0/1 values.\n anchor_class = K.cast(K.equal(rpn_match, 1), tf.int32)\n # Positive and Negative anchors contribute to the loss,\n # but neutral anchors (match value = 0) don't.\n indices = tf.where(K.not_equal(rpn_match, 0))\n # Pick rows that contribute to the loss and filter out the rest.\n rpn_class_logits = tf.gather_nd(rpn_class_logits, indices)\n anchor_class = tf.gather_nd(anchor_class, indices)\n # Cross entropy loss\n loss = K.sparse_categorical_crossentropy(target=anchor_class,\n output=rpn_class_logits,\n from_logits=True)\n loss = K.switch(tf.size(loss) > 0, K.mean(loss), tf.constant(0.0))\n return loss\n\n\ndef rpn_bbox_loss_graph(config, target_bbox, rpn_match, rpn_bbox):\n \"\"\"Return the RPN bounding box loss graph.\n\n config: the model config object.\n target_bbox: [batch, max positive anchors, (dy, dx, log(dh), log(dw))].\n Uses 0 padding to fill in unsed bbox deltas.\n rpn_match: [batch, anchors, 1]. Anchor match type. 1=positive,\n -1=negative, 0=neutral anchor.\n rpn_bbox: [batch, anchors, (dy, dx, log(dh), log(dw))]\n \"\"\"\n # Positive anchors contribute to the loss, but negative and\n # neutral anchors (match value of 0 or -1) don't.\n rpn_match = K.squeeze(rpn_match, -1)\n indices = tf.where(K.equal(rpn_match, 1))\n\n # Pick bbox deltas that contribute to the loss\n rpn_bbox = tf.gather_nd(rpn_bbox, indices)\n\n # Trim target bounding box deltas to the same length as rpn_bbox.\n batch_counts = K.sum(K.cast(K.equal(rpn_match, 1), tf.int32), axis=1)\n target_bbox = batch_pack_graph(target_bbox, batch_counts,\n config.IMAGES_PER_GPU)\n\n loss = smooth_l1_loss(target_bbox, rpn_bbox)\n \n loss = K.switch(tf.size(loss) > 0, K.mean(loss), tf.constant(0.0))\n return loss\n\n\ndef mrcnn_class_loss_graph(target_class_ids, pred_class_logits,\n active_class_ids):\n \"\"\"Loss for the classifier head of Mask RCNN.\n\n target_class_ids: [batch, num_rois]. Integer class IDs. Uses zero\n padding to fill in the array.\n pred_class_logits: [batch, num_rois, num_classes]\n active_class_ids: [batch, num_classes]. Has a value of 1 for\n classes that are in the dataset of the image, and 0\n for classes that are not in the dataset.\n \"\"\"\n # During model building, Keras calls this function with\n # target_class_ids of type float32. Unclear why. Cast it\n # to int to get around it.\n target_class_ids = tf.cast(target_class_ids, 'int64')\n\n # Find predictions of classes that are not in the dataset.\n pred_class_ids = tf.argmax(pred_class_logits, axis=2)\n # TODO: Update this line to work with batch > 1. Right now it assumes all\n # images in a batch have the same active_class_ids\n pred_active = tf.gather(active_class_ids[0], pred_class_ids)\n\n # Loss\n loss = tf.nn.sparse_softmax_cross_entropy_with_logits(\n labels=target_class_ids, logits=pred_class_logits)\n\n # Erase losses of predictions of classes that are not in the active\n # classes of the image.\n loss = loss * pred_active\n\n # Computer loss mean. Use only predictions that contribute\n # to the loss to get a correct mean.\n loss = tf.reduce_sum(loss) / tf.reduce_sum(pred_active)\n return loss\n\n\ndef mrcnn_bbox_loss_graph(target_bbox, target_class_ids, pred_bbox):\n \"\"\"Loss for Mask R-CNN bounding box refinement.\n\n target_bbox: [batch, num_rois, (dy, dx, log(dh), log(dw))]\n target_class_ids: [batch, num_rois]. Integer class IDs.\n pred_bbox: [batch, num_rois, num_classes, (dy, dx, log(dh), log(dw))]\n \"\"\"\n # Reshape to merge batch and roi dimensions for simplicity.\n target_class_ids = K.reshape(target_class_ids, (-1,))\n target_bbox = K.reshape(target_bbox, (-1, 4))\n pred_bbox = K.reshape(pred_bbox, (-1, K.int_shape(pred_bbox)[2], 4))\n\n # Only positive ROIs contribute to the loss. And only\n # the right class_id of each ROI. Get their indices.\n positive_roi_ix = tf.where(target_class_ids > 0)[:, 0]\n positive_roi_class_ids = tf.cast(\n tf.gather(target_class_ids, positive_roi_ix), tf.int64)\n indices = tf.stack([positive_roi_ix, positive_roi_class_ids], axis=1)\n\n # Gather the deltas (predicted and true) that contribute to loss\n target_bbox = tf.gather(target_bbox, positive_roi_ix)\n pred_bbox = tf.gather_nd(pred_bbox, indices)\n\n # Smooth-L1 Loss\n loss = K.switch(tf.size(target_bbox) > 0,\n smooth_l1_loss(y_true=target_bbox, y_pred=pred_bbox),\n tf.constant(0.0))\n loss = K.mean(loss)\n return loss\n\n\ndef mrcnn_mask_loss_graph(target_masks, target_class_ids, pred_masks):\n \"\"\"Mask binary cross-entropy loss for the masks head.\n\n target_masks: [batch, num_rois, height, width].\n A float32 tensor of values 0 or 1. Uses zero padding to fill array.\n target_class_ids: [batch, num_rois]. Integer class IDs. Zero padded.\n pred_masks: [batch, proposals, height, width, num_classes] float32 tensor\n with values from 0 to 1.\n \"\"\"\n # Reshape for simplicity. Merge first two dimensions into one.\n target_class_ids = K.reshape(target_class_ids, (-1,))\n mask_shape = tf.shape(target_masks)\n target_masks = K.reshape(target_masks, (-1, mask_shape[2], mask_shape[3]))\n pred_shape = tf.shape(pred_masks)\n pred_masks = K.reshape(pred_masks,\n (-1, pred_shape[2], pred_shape[3], pred_shape[4]))\n # Permute predicted masks to [N, num_classes, height, width]\n pred_masks = tf.transpose(pred_masks, [0, 3, 1, 2])\n\n # Only positive ROIs contribute to the loss. And only\n # the class specific mask of each ROI.\n positive_ix = tf.where(target_class_ids > 0)[:, 0]\n positive_class_ids = tf.cast(\n tf.gather(target_class_ids, positive_ix), tf.int64)\n indices = tf.stack([positive_ix, positive_class_ids], axis=1)\n\n # Gather the masks (predicted and true) that contribute to loss\n y_true = tf.gather(target_masks, positive_ix)\n y_pred = tf.gather_nd(pred_masks, indices)\n\n # Compute binary cross entropy. If no positive ROIs, then return 0.\n # shape: [batch, roi, num_classes]\n loss = K.switch(tf.size(y_true) > 0,\n K.binary_crossentropy(target=y_true, output=y_pred),\n tf.constant(0.0))\n loss = K.mean(loss)\n return loss\n\n\n############################################################\n# Data Generator\n############################################################\n\ndef load_image_gt(dataset, config, image_id, augment=False, augmentation=None,\n use_mini_mask=False):\n \"\"\"Load and return ground truth data for an image (image, mask, bounding boxes).\n\n augment: (deprecated. Use augmentation instead). If true, apply random\n image augmentation. Currently, only horizontal flipping is offered.\n augmentation: Optional. An imgaug (https://github.com/aleju/imgaug) augmentation.\n For example, passing imgaug.augmenters.Fliplr(0.5) flips images\n right/left 50% of the time.\n use_mini_mask: If False, returns full-size masks that are the same height\n and width as the original image. These can be big, for example\n 1024x1024x100 (for 100 instances). Mini masks are smaller, typically,\n 224x224 and are generated by extracting the bounding box of the\n object and resizing it to MINI_MASK_SHAPE.\n\n Returns:\n image: [height, width, 3]\n shape: the original shape of the image before resizing and cropping.\n class_ids: [instance_count] Integer class IDs\n bbox: [instance_count, (y1, x1, y2, x2)]\n mask: [height, width, instance_count]. The height and width are those\n of the image unless use_mini_mask is True, in which case they are\n defined in MINI_MASK_SHAPE.\n \"\"\"\n # Load image and mask\n image = dataset.load_image(image_id)\n mask, class_ids = dataset.load_mask(image_id)\n original_shape = image.shape\n image, window, scale, padding, crop = utils.resize_image(\n image,\n min_dim=config.IMAGE_MIN_DIM,\n min_scale=config.IMAGE_MIN_SCALE,\n max_dim=config.IMAGE_MAX_DIM,\n mode=config.IMAGE_RESIZE_MODE)\n mask = utils.resize_mask(mask, scale, padding, crop)\n\n # Random horizontal flips.\n # TODO: will be removed in a future update in favor of augmentation\n if augment:\n logging.warning(\"'augment' is deprecated. Use 'augmentation' instead.\")\n if random.randint(0, 1):\n image = np.fliplr(image)\n mask = np.fliplr(mask)\n\n # Augmentation\n # This requires the imgaug lib (https://github.com/aleju/imgaug)\n if augmentation:\n import imgaug\n\n # Augmenters that are safe to apply to masks\n # Some, such as Affine, have settings that make them unsafe, so always\n # test your augmentation on masks\n MASK_AUGMENTERS = [\"Sequential\", \"SomeOf\", \"OneOf\", \"Sometimes\",\n \"Fliplr\", \"Flipud\", \"CropAndPad\",\n \"Affine\", \"PiecewiseAffine\"]\n\n def hook(images, augmenter, parents, default):\n \"\"\"Determines which augmenters to apply to masks.\"\"\"\n return augmenter.__class__.__name__ in MASK_AUGMENTERS\n\n # Store shapes before augmentation to compare\n image_shape = image.shape\n mask_shape = mask.shape\n # Make augmenters deterministic to apply similarly to images and masks\n det = augmentation.to_deterministic()\n image = det.augment_image(image)\n # Change mask to np.uint8 because imgaug doesn't support np.bool\n mask = det.augment_image(mask.astype(np.uint8),\n hooks=imgaug.HooksImages(activator=hook))\n # Verify that shapes didn't change\n assert image.shape == image_shape, \"Augmentation shouldn't change image size\"\n assert mask.shape == mask_shape, \"Augmentation shouldn't change mask size\"\n # Change mask back to bool\n mask = mask.astype(np.bool)\n\n # Note that some boxes might be all zeros if the corresponding mask got cropped out.\n # and here is to filter them out\n _idx = np.sum(mask, axis=(0, 1)) > 0\n mask = mask[:, :, _idx]\n class_ids = class_ids[_idx]\n # Bounding boxes. Note that some boxes might be all zeros\n # if the corresponding mask got cropped out.\n # bbox: [num_instances, (y1, x1, y2, x2)]\n bbox = utils.extract_bboxes(mask)\n\n # Active classes\n # Different datasets have different classes, so track the\n # classes supported in the dataset of this image.\n active_class_ids = np.zeros([dataset.num_classes], dtype=np.int32)\n source_class_ids = dataset.source_class_ids[dataset.image_info[image_id][\"source\"]]\n active_class_ids[source_class_ids] = 1\n\n # Resize masks to smaller size to reduce memory usage\n if use_mini_mask:\n mask = utils.minimize_mask(bbox, mask, config.MINI_MASK_SHAPE)\n\n # Image meta data\n image_meta = compose_image_meta(image_id, original_shape, image.shape,\n window, scale, active_class_ids)\n\n return image, image_meta, class_ids, bbox, mask\n\n\ndef build_detection_targets(rpn_rois, gt_class_ids, gt_boxes, gt_masks, config):\n \"\"\"Generate targets for training Stage 2 classifier and mask heads.\n This is not used in normal training. It's useful for debugging or to train\n the Mask RCNN heads without using the RPN head.\n\n Inputs:\n rpn_rois: [N, (y1, x1, y2, x2)] proposal boxes.\n gt_class_ids: [instance count] Integer class IDs\n gt_boxes: [instance count, (y1, x1, y2, x2)]\n gt_masks: [height, width, instance count] Ground truth masks. Can be full\n size or mini-masks.\n\n Returns:\n rois: [TRAIN_ROIS_PER_IMAGE, (y1, x1, y2, x2)]\n class_ids: [TRAIN_ROIS_PER_IMAGE]. Integer class IDs.\n bboxes: [TRAIN_ROIS_PER_IMAGE, NUM_CLASSES, (y, x, log(h), log(w))]. Class-specific\n bbox refinements.\n masks: [TRAIN_ROIS_PER_IMAGE, height, width, NUM_CLASSES). Class specific masks cropped\n to bbox boundaries and resized to neural network output size.\n \"\"\"\n assert rpn_rois.shape[0] > 0\n assert gt_class_ids.dtype == np.int32, \"Expected int but got {}\".format(\n gt_class_ids.dtype)\n assert gt_boxes.dtype == np.int32, \"Expected int but got {}\".format(\n gt_boxes.dtype)\n assert gt_masks.dtype == np.bool_, \"Expected bool but got {}\".format(\n gt_masks.dtype)\n\n # It's common to add GT Boxes to ROIs but we don't do that here because\n # according to XinLei Chen's paper, it doesn't help.\n\n # Trim empty padding in gt_boxes and gt_masks parts\n instance_ids = np.where(gt_class_ids > 0)[0]\n assert instance_ids.shape[0] > 0, \"Image must contain instances.\"\n gt_class_ids = gt_class_ids[instance_ids]\n gt_boxes = gt_boxes[instance_ids]\n gt_masks = gt_masks[:, :, instance_ids]\n\n # Compute areas of ROIs and ground truth boxes.\n rpn_roi_area = (rpn_rois[:, 2] - rpn_rois[:, 0]) * \\\n (rpn_rois[:, 3] - rpn_rois[:, 1])\n gt_box_area = (gt_boxes[:, 2] - gt_boxes[:, 0]) * \\\n (gt_boxes[:, 3] - gt_boxes[:, 1])\n\n # Compute overlaps [rpn_rois, gt_boxes]\n overlaps = np.zeros((rpn_rois.shape[0], gt_boxes.shape[0]))\n for i in range(overlaps.shape[1]):\n gt = gt_boxes[i]\n overlaps[:, i] = utils.compute_iou(\n gt, rpn_rois, gt_box_area[i], rpn_roi_area)\n\n # Assign ROIs to GT boxes\n rpn_roi_iou_argmax = np.argmax(overlaps, axis=1)\n rpn_roi_iou_max = overlaps[np.arange(\n overlaps.shape[0]), rpn_roi_iou_argmax]\n # GT box assigned to each ROI\n rpn_roi_gt_boxes = gt_boxes[rpn_roi_iou_argmax]\n rpn_roi_gt_class_ids = gt_class_ids[rpn_roi_iou_argmax]\n\n # Positive ROIs are those with >= 0.5 IoU with a GT box.\n fg_ids = np.where(rpn_roi_iou_max > 0.5)[0]\n\n # Negative ROIs are those with max IoU 0.1-0.5 (hard example mining)\n # TODO: To hard example mine or not to hard example mine, that's the question\n # bg_ids = np.where((rpn_roi_iou_max >= 0.1) & (rpn_roi_iou_max < 0.5))[0]\n bg_ids = np.where(rpn_roi_iou_max < 0.5)[0]\n\n # Subsample ROIs. Aim for 33% foreground.\n # FG\n fg_roi_count = int(config.TRAIN_ROIS_PER_IMAGE * config.ROI_POSITIVE_RATIO)\n if fg_ids.shape[0] > fg_roi_count:\n keep_fg_ids = np.random.choice(fg_ids, fg_roi_count, replace=False)\n else:\n keep_fg_ids = fg_ids\n # BG\n remaining = config.TRAIN_ROIS_PER_IMAGE - keep_fg_ids.shape[0]\n if bg_ids.shape[0] > remaining:\n keep_bg_ids = np.random.choice(bg_ids, remaining, replace=False)\n else:\n keep_bg_ids = bg_ids\n # Combine indices of ROIs to keep\n keep = np.concatenate([keep_fg_ids, keep_bg_ids])\n # Need more?\n remaining = config.TRAIN_ROIS_PER_IMAGE - keep.shape[0]\n if remaining > 0:\n # Looks like we don't have enough samples to maintain the desired\n # balance. Reduce requirements and fill in the rest. This is\n # likely different from the Mask RCNN paper.\n\n # There is a small chance we have neither fg nor bg samples.\n if keep.shape[0] == 0:\n # Pick bg regions with easier IoU threshold\n bg_ids = np.where(rpn_roi_iou_max < 0.5)[0]\n assert bg_ids.shape[0] >= remaining\n keep_bg_ids = np.random.choice(bg_ids, remaining, replace=False)\n assert keep_bg_ids.shape[0] == remaining\n keep = np.concatenate([keep, keep_bg_ids])\n else:\n # Fill the rest with repeated bg rois.\n keep_extra_ids = np.random.choice(\n keep_bg_ids, remaining, replace=True)\n keep = np.concatenate([keep, keep_extra_ids])\n assert keep.shape[0] == config.TRAIN_ROIS_PER_IMAGE, \\\n \"keep doesn't match ROI batch size {}, {}\".format(\n keep.shape[0], config.TRAIN_ROIS_PER_IMAGE)\n\n # Reset the gt boxes assigned to BG ROIs.\n rpn_roi_gt_boxes[keep_bg_ids, :] = 0\n rpn_roi_gt_class_ids[keep_bg_ids] = 0\n\n # For each kept ROI, assign a class_id, and for FG ROIs also add bbox refinement.\n rois = rpn_rois[keep]\n roi_gt_boxes = rpn_roi_gt_boxes[keep]\n roi_gt_class_ids = rpn_roi_gt_class_ids[keep]\n roi_gt_assignment = rpn_roi_iou_argmax[keep]\n\n # Class-aware bbox deltas. [y, x, log(h), log(w)]\n bboxes = np.zeros((config.TRAIN_ROIS_PER_IMAGE,\n config.NUM_CLASSES, 4), dtype=np.float32)\n pos_ids = np.where(roi_gt_class_ids > 0)[0]\n bboxes[pos_ids, roi_gt_class_ids[pos_ids]] = utils.box_refinement(\n rois[pos_ids], roi_gt_boxes[pos_ids, :4])\n # Normalize bbox refinements\n bboxes /= config.BBOX_STD_DEV\n\n # Generate class-specific target masks\n masks = np.zeros((config.TRAIN_ROIS_PER_IMAGE, config.MASK_SHAPE[0], config.MASK_SHAPE[1], config.NUM_CLASSES),\n dtype=np.float32)\n for i in pos_ids:\n class_id = roi_gt_class_ids[i]\n assert class_id > 0, \"class id must be greater than 0\"\n gt_id = roi_gt_assignment[i]\n class_mask = gt_masks[:, :, gt_id]\n\n if config.USE_MINI_MASK:\n # Create a mask placeholder, the size of the image\n placeholder = np.zeros(config.IMAGE_SHAPE[:2], dtype=bool)\n # GT box\n gt_y1, gt_x1, gt_y2, gt_x2 = gt_boxes[gt_id]\n gt_w = gt_x2 - gt_x1\n gt_h = gt_y2 - gt_y1\n # Resize mini mask to size of GT box\n placeholder[gt_y1:gt_y2, gt_x1:gt_x2] = \\\n np.round(utils.resize(class_mask, (gt_h, gt_w))).astype(bool)\n # Place the mini batch in the placeholder\n class_mask = placeholder\n\n # Pick part of the mask and resize it\n y1, x1, y2, x2 = rois[i].astype(np.int32)\n m = class_mask[y1:y2, x1:x2]\n mask = utils.resize(m, config.MASK_SHAPE)\n masks[i, :, :, class_id] = mask\n\n return rois, roi_gt_class_ids, bboxes, masks\n\n\ndef build_rpn_targets(image_shape, anchors, gt_class_ids, gt_boxes, config):\n \"\"\"Given the anchors and GT boxes, compute overlaps and identify positive\n anchors and deltas to refine them to match their corresponding GT boxes.\n\n anchors: [num_anchors, (y1, x1, y2, x2)]\n gt_class_ids: [num_gt_boxes] Integer class IDs.\n gt_boxes: [num_gt_boxes, (y1, x1, y2, x2)]\n\n Returns:\n rpn_match: [N] (int32) matches between anchors and GT boxes.\n 1 = positive anchor, -1 = negative anchor, 0 = neutral\n rpn_bbox: [N, (dy, dx, log(dh), log(dw))] Anchor bbox deltas.\n \"\"\"\n # RPN Match: 1 = positive anchor, -1 = negative anchor, 0 = neutral\n rpn_match = np.zeros([anchors.shape[0]], dtype=np.int32)\n # RPN bounding boxes: [max anchors per image, (dy, dx, log(dh), log(dw))]\n rpn_bbox = np.zeros((config.RPN_TRAIN_ANCHORS_PER_IMAGE, 4))\n\n # Handle COCO crowds\n # A crowd box in COCO is a bounding box around several instances. Exclude\n # them from training. A crowd box is given a negative class ID.\n crowd_ix = np.where(gt_class_ids < 0)[0]\n if crowd_ix.shape[0] > 0:\n # Filter out crowds from ground truth class IDs and boxes\n non_crowd_ix = np.where(gt_class_ids > 0)[0]\n crowd_boxes = gt_boxes[crowd_ix]\n gt_class_ids = gt_class_ids[non_crowd_ix]\n gt_boxes = gt_boxes[non_crowd_ix]\n # Compute overlaps with crowd boxes [anchors, crowds]\n crowd_overlaps = utils.compute_overlaps(anchors, crowd_boxes)\n crowd_iou_max = np.amax(crowd_overlaps, axis=1)\n no_crowd_bool = (crowd_iou_max < 0.001)\n else:\n # All anchors don't intersect a crowd\n no_crowd_bool = np.ones([anchors.shape[0]], dtype=bool)\n\n # Compute overlaps [num_anchors, num_gt_boxes]\n overlaps = utils.compute_overlaps(anchors, gt_boxes)\n\n # Match anchors to GT Boxes\n # If an anchor overlaps a GT box with IoU >= 0.7 then it's positive.\n # If an anchor overlaps a GT box with IoU < 0.3 then it's negative.\n # Neutral anchors are those that don't match the conditions above,\n # and they don't influence the loss function.\n # However, don't keep any GT box unmatched (rare, but happens). Instead,\n # match it to the closest anchor (even if its max IoU is < 0.3).\n #\n # 1. Set negative anchors first. They get overwritten below if a GT box is\n # matched to them. Skip boxes in crowd areas.\n anchor_iou_argmax = np.argmax(overlaps, axis=1)\n anchor_iou_max = overlaps[np.arange(overlaps.shape[0]), anchor_iou_argmax]\n rpn_match[(anchor_iou_max < 0.3) & (no_crowd_bool)] = -1\n # 2. Set an anchor for each GT box (regardless of IoU value).\n # If multiple anchors have the same IoU match all of them\n gt_iou_argmax = np.argwhere(overlaps == np.max(overlaps, axis=0))[:,0]\n rpn_match[gt_iou_argmax] = 1\n # 3. Set anchors with high overlap as positive.\n rpn_match[anchor_iou_max >= 0.7] = 1\n\n # Subsample to balance positive and negative anchors\n # Don't let positives be more than half the anchors\n ids = np.where(rpn_match == 1)[0]\n extra = len(ids) - (config.RPN_TRAIN_ANCHORS_PER_IMAGE // 2)\n if extra > 0:\n # Reset the extra ones to neutral\n ids = np.random.choice(ids, extra, replace=False)\n rpn_match[ids] = 0\n # Same for negative proposals\n ids = np.where(rpn_match == -1)[0]\n extra = len(ids) - (config.RPN_TRAIN_ANCHORS_PER_IMAGE -\n np.sum(rpn_match == 1))\n if extra > 0:\n # Rest the extra ones to neutral\n ids = np.random.choice(ids, extra, replace=False)\n rpn_match[ids] = 0\n\n # For positive anchors, compute shift and scale needed to transform them\n # to match the corresponding GT boxes.\n ids = np.where(rpn_match == 1)[0]\n ix = 0 # index into rpn_bbox\n # TODO: use box_refinement() rather than duplicating the code here\n for i, a in zip(ids, anchors[ids]):\n # Closest gt box (it might have IoU < 0.7)\n gt = gt_boxes[anchor_iou_argmax[i]]\n\n # Convert coordinates to center plus width/height.\n # GT Box\n gt_h = gt[2] - gt[0]\n gt_w = gt[3] - gt[1]\n gt_center_y = gt[0] + 0.5 * gt_h\n gt_center_x = gt[1] + 0.5 * gt_w\n # Anchor\n a_h = a[2] - a[0]\n a_w = a[3] - a[1]\n a_center_y = a[0] + 0.5 * a_h\n a_center_x = a[1] + 0.5 * a_w\n\n # Compute the bbox refinement that the RPN should predict.\n rpn_bbox[ix] = [\n (gt_center_y - a_center_y) / a_h,\n (gt_center_x - a_center_x) / a_w,\n np.log(gt_h / a_h),\n np.log(gt_w / a_w),\n ]\n # Normalize\n rpn_bbox[ix] /= config.RPN_BBOX_STD_DEV\n ix += 1\n\n return rpn_match, rpn_bbox\n\n\ndef generate_random_rois(image_shape, count, gt_class_ids, gt_boxes):\n \"\"\"Generates ROI proposals similar to what a region proposal network\n would generate.\n\n image_shape: [Height, Width, Depth]\n count: Number of ROIs to generate\n gt_class_ids: [N] Integer ground truth class IDs\n gt_boxes: [N, (y1, x1, y2, x2)] Ground truth boxes in pixels.\n\n Returns: [count, (y1, x1, y2, x2)] ROI boxes in pixels.\n \"\"\"\n # placeholder\n rois = np.zeros((count, 4), dtype=np.int32)\n\n # Generate random ROIs around GT boxes (90% of count)\n rois_per_box = int(0.9 * count / gt_boxes.shape[0])\n for i in range(gt_boxes.shape[0]):\n gt_y1, gt_x1, gt_y2, gt_x2 = gt_boxes[i]\n h = gt_y2 - gt_y1\n w = gt_x2 - gt_x1\n # random boundaries\n r_y1 = max(gt_y1 - h, 0)\n r_y2 = min(gt_y2 + h, image_shape[0])\n r_x1 = max(gt_x1 - w, 0)\n r_x2 = min(gt_x2 + w, image_shape[1])\n\n # To avoid generating boxes with zero area, we generate double what\n # we need and filter out the extra. If we get fewer valid boxes\n # than we need, we loop and try again.\n while True:\n y1y2 = np.random.randint(r_y1, r_y2, (rois_per_box * 2, 2))\n x1x2 = np.random.randint(r_x1, r_x2, (rois_per_box * 2, 2))\n # Filter out zero area boxes\n threshold = 1\n y1y2 = y1y2[np.abs(y1y2[:, 0] - y1y2[:, 1]) >=\n threshold][:rois_per_box]\n x1x2 = x1x2[np.abs(x1x2[:, 0] - x1x2[:, 1]) >=\n threshold][:rois_per_box]\n if y1y2.shape[0] == rois_per_box and x1x2.shape[0] == rois_per_box:\n break\n\n # Sort on axis 1 to ensure x1 <= x2 and y1 <= y2 and then reshape\n # into x1, y1, x2, y2 order\n x1, x2 = np.split(np.sort(x1x2, axis=1), 2, axis=1)\n y1, y2 = np.split(np.sort(y1y2, axis=1), 2, axis=1)\n box_rois = np.hstack([y1, x1, y2, x2])\n rois[rois_per_box * i:rois_per_box * (i + 1)] = box_rois\n\n # Generate random ROIs anywhere in the image (10% of count)\n remaining_count = count - (rois_per_box * gt_boxes.shape[0])\n # To avoid generating boxes with zero area, we generate double what\n # we need and filter out the extra. If we get fewer valid boxes\n # than we need, we loop and try again.\n while True:\n y1y2 = np.random.randint(0, image_shape[0], (remaining_count * 2, 2))\n x1x2 = np.random.randint(0, image_shape[1], (remaining_count * 2, 2))\n # Filter out zero area boxes\n threshold = 1\n y1y2 = y1y2[np.abs(y1y2[:, 0] - y1y2[:, 1]) >=\n threshold][:remaining_count]\n x1x2 = x1x2[np.abs(x1x2[:, 0] - x1x2[:, 1]) >=\n threshold][:remaining_count]\n if y1y2.shape[0] == remaining_count and x1x2.shape[0] == remaining_count:\n break\n\n # Sort on axis 1 to ensure x1 <= x2 and y1 <= y2 and then reshape\n # into x1, y1, x2, y2 order\n x1, x2 = np.split(np.sort(x1x2, axis=1), 2, axis=1)\n y1, y2 = np.split(np.sort(y1y2, axis=1), 2, axis=1)\n global_rois = np.hstack([y1, x1, y2, x2])\n rois[-remaining_count:] = global_rois\n return rois\n\n\ndef data_generator(dataset, config, shuffle=True, augment=False, augmentation=None,\n random_rois=0, batch_size=1, detection_targets=False,\n no_augmentation_sources=None):\n \"\"\"A generator that returns images and corresponding target class ids,\n bounding box deltas, and masks.\n\n dataset: The Dataset object to pick data from\n config: The model config object\n shuffle: If True, shuffles the samples before every epoch\n augment: (deprecated. Use augmentation instead). If true, apply random\n image augmentation. Currently, only horizontal flipping is offered.\n augmentation: Optional. An imgaug (https://github.com/aleju/imgaug) augmentation.\n For example, passing imgaug.augmenters.Fliplr(0.5) flips images\n right/left 50% of the time.\n random_rois: If > 0 then generate proposals to be used to train the\n network classifier and mask heads. Useful if training\n the Mask RCNN part without the RPN.\n batch_size: How many images to return in each call\n detection_targets: If True, generate detection targets (class IDs, bbox\n deltas, and masks). Typically for debugging or visualizations because\n in trainig detection targets are generated by DetectionTargetLayer.\n no_augmentation_sources: Optional. List of sources to exclude for\n augmentation. A source is string that identifies a dataset and is\n defined in the Dataset class.\n\n Returns a Python generator. Upon calling next() on it, the\n generator returns two lists, inputs and outputs. The contents\n of the lists differs depending on the received arguments:\n inputs list:\n - images: [batch, H, W, C]\n - image_meta: [batch, (meta data)] Image details. See compose_image_meta()\n - rpn_match: [batch, N] Integer (1=positive anchor, -1=negative, 0=neutral)\n - rpn_bbox: [batch, N, (dy, dx, log(dh), log(dw))] Anchor bbox deltas.\n - gt_class_ids: [batch, MAX_GT_INSTANCES] Integer class IDs\n - gt_boxes: [batch, MAX_GT_INSTANCES, (y1, x1, y2, x2)]\n - gt_masks: [batch, height, width, MAX_GT_INSTANCES]. The height and width\n are those of the image unless use_mini_mask is True, in which\n case they are defined in MINI_MASK_SHAPE.\n\n outputs list: Usually empty in regular training. But if detection_targets\n is True then the outputs list contains target class_ids, bbox deltas,\n and masks.\n \"\"\"\n b = 0 # batch item index\n image_index = -1\n image_ids = np.copy(dataset.image_ids)\n error_count = 0\n no_augmentation_sources = no_augmentation_sources or []\n\n # Anchors\n # [anchor_count, (y1, x1, y2, x2)]\n backbone_shapes = compute_backbone_shapes(config, config.IMAGE_SHAPE)\n anchors = utils.generate_pyramid_anchors(config.RPN_ANCHOR_SCALES,\n config.RPN_ANCHOR_RATIOS,\n backbone_shapes,\n config.BACKBONE_STRIDES,\n config.RPN_ANCHOR_STRIDE)\n\n # Keras requires a generator to run indefinitely.\n while True:\n try:\n # Increment index to pick next image. Shuffle if at the start of an epoch.\n image_index = (image_index + 1) % len(image_ids)\n if shuffle and image_index == 0:\n np.random.shuffle(image_ids)\n\n # Get GT bounding boxes and masks for image.\n image_id = image_ids[image_index]\n\n # If the image source is not to be augmented pass None as augmentation\n if dataset.image_info[image_id]['source'] in no_augmentation_sources:\n image, image_meta, gt_class_ids, gt_boxes, gt_masks = \\\n load_image_gt(dataset, config, image_id, augment=augment,\n augmentation=None,\n use_mini_mask=config.USE_MINI_MASK)\n else:\n image, image_meta, gt_class_ids, gt_boxes, gt_masks = \\\n load_image_gt(dataset, config, image_id, augment=augment,\n augmentation=augmentation,\n use_mini_mask=config.USE_MINI_MASK)\n\n # Skip images that have no instances. This can happen in cases\n # where we train on a subset of classes and the image doesn't\n # have any of the classes we care about.\n if not np.any(gt_class_ids > 0):\n continue\n\n # RPN Targets\n rpn_match, rpn_bbox = build_rpn_targets(image.shape, anchors,\n gt_class_ids, gt_boxes, config)\n\n # Mask R-CNN Targets\n if random_rois:\n rpn_rois = generate_random_rois(\n image.shape, random_rois, gt_class_ids, gt_boxes)\n if detection_targets:\n rois, mrcnn_class_ids, mrcnn_bbox, mrcnn_mask =\\\n build_detection_targets(\n rpn_rois, gt_class_ids, gt_boxes, gt_masks, config)\n\n # Init batch arrays\n if b == 0:\n batch_image_meta = np.zeros(\n (batch_size,) + image_meta.shape, dtype=image_meta.dtype)\n batch_rpn_match = np.zeros(\n [batch_size, anchors.shape[0], 1], dtype=rpn_match.dtype)\n batch_rpn_bbox = np.zeros(\n [batch_size, config.RPN_TRAIN_ANCHORS_PER_IMAGE, 4], dtype=rpn_bbox.dtype)\n batch_images = np.zeros(\n (batch_size,) + image.shape, dtype=np.float32)\n batch_gt_class_ids = np.zeros(\n (batch_size, config.MAX_GT_INSTANCES), dtype=np.int32)\n batch_gt_boxes = np.zeros(\n (batch_size, config.MAX_GT_INSTANCES, 4), dtype=np.int32)\n batch_gt_masks = np.zeros(\n (batch_size, gt_masks.shape[0], gt_masks.shape[1],\n config.MAX_GT_INSTANCES), dtype=gt_masks.dtype)\n if random_rois:\n batch_rpn_rois = np.zeros(\n (batch_size, rpn_rois.shape[0], 4), dtype=rpn_rois.dtype)\n if detection_targets:\n batch_rois = np.zeros(\n (batch_size,) + rois.shape, dtype=rois.dtype)\n batch_mrcnn_class_ids = np.zeros(\n (batch_size,) + mrcnn_class_ids.shape, dtype=mrcnn_class_ids.dtype)\n batch_mrcnn_bbox = np.zeros(\n (batch_size,) + mrcnn_bbox.shape, dtype=mrcnn_bbox.dtype)\n batch_mrcnn_mask = np.zeros(\n (batch_size,) + mrcnn_mask.shape, dtype=mrcnn_mask.dtype)\n\n # If more instances than fits in the array, sub-sample from them.\n if gt_boxes.shape[0] > config.MAX_GT_INSTANCES:\n ids = np.random.choice(\n np.arange(gt_boxes.shape[0]), config.MAX_GT_INSTANCES, replace=False)\n gt_class_ids = gt_class_ids[ids]\n gt_boxes = gt_boxes[ids]\n gt_masks = gt_masks[:, :, ids]\n\n # Add to batch\n batch_image_meta[b] = image_meta\n batch_rpn_match[b] = rpn_match[:, np.newaxis]\n batch_rpn_bbox[b] = rpn_bbox\n batch_images[b] = mold_image(image.astype(np.float32), config)\n batch_gt_class_ids[b, :gt_class_ids.shape[0]] = gt_class_ids\n batch_gt_boxes[b, :gt_boxes.shape[0]] = gt_boxes\n batch_gt_masks[b, :, :, :gt_masks.shape[-1]] = gt_masks\n if random_rois:\n batch_rpn_rois[b] = rpn_rois\n if detection_targets:\n batch_rois[b] = rois\n batch_mrcnn_class_ids[b] = mrcnn_class_ids\n batch_mrcnn_bbox[b] = mrcnn_bbox\n batch_mrcnn_mask[b] = mrcnn_mask\n b += 1\n\n # Batch full?\n if b >= batch_size:\n inputs = [batch_images, batch_image_meta, batch_rpn_match, batch_rpn_bbox,\n batch_gt_class_ids, batch_gt_boxes, batch_gt_masks]\n outputs = []\n\n if random_rois:\n inputs.extend([batch_rpn_rois])\n if detection_targets:\n inputs.extend([batch_rois])\n # Keras requires that output and targets have the same number of dimensions\n batch_mrcnn_class_ids = np.expand_dims(\n batch_mrcnn_class_ids, -1)\n outputs.extend(\n [batch_mrcnn_class_ids, batch_mrcnn_bbox, batch_mrcnn_mask])\n\n yield inputs, outputs\n\n # start a new batch\n b = 0\n except (GeneratorExit, KeyboardInterrupt):\n raise\n except:\n # Log it and skip the image\n logging.exception(\"Error processing image {}\".format(\n dataset.image_info[image_id]))\n error_count += 1\n if error_count > 5:\n raise\n\n\n############################################################\n# MaskRCNN Class\n############################################################\n\nclass MaskRCNN():\n \"\"\"Encapsulates the Mask RCNN model functionality.\n\n The actual Keras model is in the keras_model property.\n \"\"\"\n\n def __init__(self, mode, config, model_dir):\n \"\"\"\n mode: Either \"training\" or \"inference\"\n config: A Sub-class of the Config class\n model_dir: Directory to save training logs and trained weights\n \"\"\"\n assert mode in ['training', 'inference']\n self.mode = mode\n self.config = config\n self.model_dir = model_dir\n self.set_log_dir()\n self.keras_model = self.build(mode=mode, config=config)\n\n def build(self, mode, config):\n \"\"\"Build Mask R-CNN architecture.\n input_shape: The shape of the input image.\n mode: Either \"training\" or \"inference\". The inputs and\n outputs of the model differ accordingly.\n \"\"\"\n assert mode in ['training', 'inference']\n\n # Image size must be dividable by 2 multiple times\n h, w = config.IMAGE_SHAPE[:2]\n if h / 2**6 != int(h / 2**6) or w / 2**6 != int(w / 2**6):\n raise Exception(\"Image size must be dividable by 2 at least 6 times \"\n \"to avoid fractions when downscaling and upscaling.\"\n \"For example, use 256, 320, 384, 448, 512, ... etc. \")\n\n # Inputs\n input_image = KL.Input(\n shape=[None, None, config.IMAGE_SHAPE[2]], name=\"input_image\")\n input_image_meta = KL.Input(shape=[config.IMAGE_META_SIZE],\n name=\"input_image_meta\")\n if mode == \"training\":\n # RPN GT\n input_rpn_match = KL.Input(\n shape=[None, 1], name=\"input_rpn_match\", dtype=tf.int32)\n input_rpn_bbox = KL.Input(\n shape=[None, 4], name=\"input_rpn_bbox\", dtype=tf.float32)\n\n # Detection GT (class IDs, bounding boxes, and masks)\n # 1. GT Class IDs (zero padded)\n input_gt_class_ids = KL.Input(\n shape=[None], name=\"input_gt_class_ids\", dtype=tf.int32)\n # 2. GT Boxes in pixels (zero padded)\n # [batch, MAX_GT_INSTANCES, (y1, x1, y2, x2)] in image coordinates\n input_gt_boxes = KL.Input(\n shape=[None, 4], name=\"input_gt_boxes\", dtype=tf.float32)\n # Normalize coordinates\n gt_boxes = KL.Lambda(lambda x: norm_boxes_graph(\n x, K.shape(input_image)[1:3]))(input_gt_boxes)\n # 3. GT Masks (zero padded)\n # [batch, height, width, MAX_GT_INSTANCES]\n if config.USE_MINI_MASK:\n input_gt_masks = KL.Input(\n shape=[config.MINI_MASK_SHAPE[0],\n config.MINI_MASK_SHAPE[1], None],\n name=\"input_gt_masks\", dtype=bool)\n else:\n input_gt_masks = KL.Input(\n shape=[config.IMAGE_SHAPE[0], config.IMAGE_SHAPE[1], None],\n name=\"input_gt_masks\", dtype=bool)\n elif mode == \"inference\":\n # Anchors in normalized coordinates\n input_anchors = KL.Input(shape=[None, 4], name=\"input_anchors\")\n\n # Build the shared convolutional layers.\n # Bottom-up Layers\n # Returns a list of the last layers of each stage, 5 in total.\n # Don't create the thead (stage 5), so we pick the 4th item in the list.\n if callable(config.BACKBONE):\n _, C2, C3, C4, C5 = config.BACKBONE(input_image, stage5=True,\n train_bn=config.TRAIN_BN)\n else:\n _, C2, C3, C4, C5 = resnet_graph(input_image, config.BACKBONE,\n stage5=True, train_bn=config.TRAIN_BN)\n # Top-down Layers\n # TODO: add assert to varify feature map sizes match what's in config\n P5 = KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (1, 1), name='fpn_c5p5')(C5)\n P4 = KL.Add(name=\"fpn_p4add\")([\n KL.UpSampling2D(size=(2, 2), name=\"fpn_p5upsampled\")(P5),\n KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (1, 1), name='fpn_c4p4')(C4)])\n P3 = KL.Add(name=\"fpn_p3add\")([\n KL.UpSampling2D(size=(2, 2), name=\"fpn_p4upsampled\")(P4),\n KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (1, 1), name='fpn_c3p3')(C3)])\n P2 = KL.Add(name=\"fpn_p2add\")([\n KL.UpSampling2D(size=(2, 2), name=\"fpn_p3upsampled\")(P3),\n KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (1, 1), name='fpn_c2p2')(C2)])\n # Attach 3x3 conv to all P layers to get the final feature maps.\n P2 = KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (3, 3), padding=\"SAME\", name=\"fpn_p2\")(P2)\n P3 = KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (3, 3), padding=\"SAME\", name=\"fpn_p3\")(P3)\n P4 = KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (3, 3), padding=\"SAME\", name=\"fpn_p4\")(P4)\n P5 = KL.Conv2D(config.TOP_DOWN_PYRAMID_SIZE, (3, 3), padding=\"SAME\", name=\"fpn_p5\")(P5)\n # P6 is used for the 5th anchor scale in RPN. Generated by\n # subsampling from P5 with stride of 2.\n P6 = KL.MaxPooling2D(pool_size=(1, 1), strides=2, name=\"fpn_p6\")(P5)\n\n # Note that P6 is used in RPN, but not in the classifier heads.\n rpn_feature_maps = [P2, P3, P4, P5, P6]\n mrcnn_feature_maps = [P2, P3, P4, P5]\n\n # Anchors\n if mode == \"training\":\n anchors = self.get_anchors(config.IMAGE_SHAPE)\n # Duplicate across the batch dimension because Keras requires it\n # TODO: can this be optimized to avoid duplicating the anchors?\n anchors = np.broadcast_to(anchors, (config.BATCH_SIZE,) + anchors.shape)\n # A hack to get around Keras's bad support for constants\n anchors = KL.Lambda(lambda x: tf.Variable(anchors), name=\"anchors\")(input_image)\n else:\n anchors = input_anchors\n\n # RPN Model\n rpn = build_rpn_model(config.RPN_ANCHOR_STRIDE,\n len(config.RPN_ANCHOR_RATIOS), config.TOP_DOWN_PYRAMID_SIZE)\n # Loop through pyramid layers\n layer_outputs = [] # list of lists\n for p in rpn_feature_maps:\n layer_outputs.append(rpn([p]))\n # Concatenate layer outputs\n # Convert from list of lists of level outputs to list of lists\n # of outputs across levels.\n # e.g. [[a1, b1, c1], [a2, b2, c2]] => [[a1, a2], [b1, b2], [c1, c2]]\n output_names = [\"rpn_class_logits\", \"rpn_class\", \"rpn_bbox\"]\n outputs = list(zip(*layer_outputs))\n outputs = [KL.Concatenate(axis=1, name=n)(list(o))\n for o, n in zip(outputs, output_names)]\n\n rpn_class_logits, rpn_class, rpn_bbox = outputs\n\n # Generate proposals\n # Proposals are [batch, N, (y1, x1, y2, x2)] in normalized coordinates\n # and zero padded.\n proposal_count = config.POST_NMS_ROIS_TRAINING if mode == \"training\"\\\n else config.POST_NMS_ROIS_INFERENCE\n rpn_rois = ProposalLayer(\n proposal_count=proposal_count,\n nms_threshold=config.RPN_NMS_THRESHOLD,\n name=\"ROI\",\n config=config)([rpn_class, rpn_bbox, anchors])\n\n if mode == \"training\":\n # Class ID mask to mark class IDs supported by the dataset the image\n # came from.\n active_class_ids = KL.Lambda(\n lambda x: parse_image_meta_graph(x)[\"active_class_ids\"]\n )(input_image_meta)\n\n if not config.USE_RPN_ROIS:\n # Ignore predicted ROIs and use ROIs provided as an input.\n input_rois = KL.Input(shape=[config.POST_NMS_ROIS_TRAINING, 4],\n name=\"input_roi\", dtype=np.int32)\n # Normalize coordinates\n target_rois = KL.Lambda(lambda x: norm_boxes_graph(\n x, K.shape(input_image)[1:3]))(input_rois)\n else:\n target_rois = rpn_rois\n\n # Generate detection targets\n # Subsamples proposals and generates target outputs for training\n # Note that proposal class IDs, gt_boxes, and gt_masks are zero\n # padded. Equally, returned rois and targets are zero padded.\n rois, target_class_ids, target_bbox, target_mask =\\\n DetectionTargetLayer(config, name=\"proposal_targets\")([\n target_rois, input_gt_class_ids, gt_boxes, input_gt_masks])\n\n # Network Heads\n # TODO: verify that this handles zero padded ROIs\n mrcnn_class_logits, mrcnn_class, mrcnn_bbox =\\\n fpn_classifier_graph(rois, mrcnn_feature_maps, input_image_meta,\n config.POOL_SIZE, config.NUM_CLASSES,\n train_bn=config.TRAIN_BN,\n fc_layers_size=config.FPN_CLASSIF_FC_LAYERS_SIZE)\n\n mrcnn_mask = build_fpn_mask_graph(rois, mrcnn_feature_maps,\n input_image_meta,\n config.MASK_POOL_SIZE,\n config.NUM_CLASSES,\n train_bn=config.TRAIN_BN)\n\n # TODO: clean up (use tf.identify if necessary)\n output_rois = KL.Lambda(lambda x: x * 1, name=\"output_rois\")(rois)\n\n # Losses\n rpn_class_loss = KL.Lambda(lambda x: rpn_class_loss_graph(*x), name=\"rpn_class_loss\")(\n [input_rpn_match, rpn_class_logits])\n rpn_bbox_loss = KL.Lambda(lambda x: rpn_bbox_loss_graph(config, *x), name=\"rpn_bbox_loss\")(\n [input_rpn_bbox, input_rpn_match, rpn_bbox])\n class_loss = KL.Lambda(lambda x: mrcnn_class_loss_graph(*x), name=\"mrcnn_class_loss\")(\n [target_class_ids, mrcnn_class_logits, active_class_ids])\n bbox_loss = KL.Lambda(lambda x: mrcnn_bbox_loss_graph(*x), name=\"mrcnn_bbox_loss\")(\n [target_bbox, target_class_ids, mrcnn_bbox])\n mask_loss = KL.Lambda(lambda x: mrcnn_mask_loss_graph(*x), name=\"mrcnn_mask_loss\")(\n [target_mask, target_class_ids, mrcnn_mask])\n\n # Model\n inputs = [input_image, input_image_meta,\n input_rpn_match, input_rpn_bbox, input_gt_class_ids, input_gt_boxes, input_gt_masks]\n if not config.USE_RPN_ROIS:\n inputs.append(input_rois)\n outputs = [rpn_class_logits, rpn_class, rpn_bbox,\n mrcnn_class_logits, mrcnn_class, mrcnn_bbox, mrcnn_mask,\n rpn_rois, output_rois,\n rpn_class_loss, rpn_bbox_loss, class_loss, bbox_loss, mask_loss]\n model = KM.Model(inputs, outputs, name='mask_rcnn')\n else:\n # Network Heads\n # Proposal classifier and BBox regressor heads\n mrcnn_class_logits, mrcnn_class, mrcnn_bbox =\\\n fpn_classifier_graph(rpn_rois, mrcnn_feature_maps, input_image_meta,\n config.POOL_SIZE, config.NUM_CLASSES,\n train_bn=config.TRAIN_BN,\n fc_layers_size=config.FPN_CLASSIF_FC_LAYERS_SIZE)\n\n # Detections\n # output is [batch, num_detections, (y1, x1, y2, x2, class_id, score)] in\n # normalized coordinates\n detections = DetectionLayer(config, name=\"mrcnn_detection\")(\n [rpn_rois, mrcnn_class, mrcnn_bbox, input_image_meta])\n\n # Create masks for detections\n detection_boxes = KL.Lambda(lambda x: x[..., :4])(detections)\n mrcnn_mask = build_fpn_mask_graph(detection_boxes, mrcnn_feature_maps,\n input_image_meta,\n config.MASK_POOL_SIZE,\n config.NUM_CLASSES,\n train_bn=config.TRAIN_BN)\n\n model = KM.Model([input_image, input_image_meta, input_anchors],\n [detections, mrcnn_class, mrcnn_bbox,\n mrcnn_mask, rpn_rois, rpn_class, rpn_bbox],\n name='mask_rcnn')\n\n # Add multi-GPU support.\n if config.GPU_COUNT > 1:\n from mrcnn.parallel_model import ParallelModel\n model = ParallelModel(model, config.GPU_COUNT)\n\n return model\n\n def find_last(self):\n \"\"\"Finds the last checkpoint file of the last trained model in the\n model directory.\n Returns:\n The path of the last checkpoint file\n \"\"\"\n # Get directory names. Each directory corresponds to a model\n dir_names = next(os.walk(self.model_dir))[1]\n key = self.config.NAME.lower()\n dir_names = filter(lambda f: f.startswith(key), dir_names)\n dir_names = sorted(dir_names)\n if not dir_names:\n import errno\n raise FileNotFoundError(\n errno.ENOENT,\n \"Could not find model directory under {}\".format(self.model_dir))\n # Pick last directory\n dir_name = os.path.join(self.model_dir, dir_names[-1])\n # Find the last checkpoint\n checkpoints = next(os.walk(dir_name))[2]\n checkpoints = filter(lambda f: f.startswith(\"mask_rcnn\"), checkpoints)\n checkpoints = sorted(checkpoints)\n if not checkpoints:\n import errno\n raise FileNotFoundError(\n errno.ENOENT, \"Could not find weight files in {}\".format(dir_name))\n checkpoint = os.path.join(dir_name, checkpoints[-1])\n return checkpoint\n\n def load_weights(self, filepath, by_name=False, exclude=None):\n \"\"\"Modified version of the corresponding Keras function with\n the addition of multi-GPU support and the ability to exclude\n some layers from loading.\n exclude: list of layer names to exclude\n \"\"\"\n import h5py\n # Conditional import to support versions of Keras before 2.2\n # TODO: remove in about 6 months (end of 2018)\n try:\n from keras.engine import saving\n except ImportError:\n # Keras before 2.2 used the 'topology' namespace.\n from keras.engine import topology as saving\n\n if exclude:\n by_name = True\n\n if h5py is None:\n raise ImportError('`load_weights` requires h5py.')\n f = h5py.File(filepath, mode='r')\n if 'layer_names' not in f.attrs and 'model_weights' in f:\n f = f['model_weights']\n\n # In multi-GPU training, we wrap the model. Get layers\n # of the inner model because they have the weights.\n keras_model = self.keras_model\n layers = keras_model.inner_model.layers if hasattr(keras_model, \"inner_model\")\\\n else keras_model.layers\n\n # Exclude some layers\n if exclude:\n layers = filter(lambda l: l.name not in exclude, layers)\n\n if by_name:\n saving.load_weights_from_hdf5_group_by_name(f, layers)\n else:\n saving.load_weights_from_hdf5_group(f, layers)\n if hasattr(f, 'close'):\n f.close()\n\n # Update the log directory\n self.set_log_dir(filepath)\n\n def get_imagenet_weights(self):\n \"\"\"Downloads ImageNet trained weights from Keras.\n Returns path to weights file.\n \"\"\"\n from keras.utils.data_utils import get_file\n TF_WEIGHTS_PATH_NO_TOP = 'https://github.com/fchollet/deep-learning-models/'\\\n 'releases/download/v0.2/'\\\n 'resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5'\n weights_path = get_file('resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5',\n TF_WEIGHTS_PATH_NO_TOP,\n cache_subdir='models',\n md5_hash='a268eb855778b3df3c7506639542a6af')\n return weights_path\n\n def compile(self, learning_rate, momentum):\n \"\"\"Gets the model ready for training. Adds losses, regularization, and\n metrics. Then calls the Keras compile() function.\n \"\"\"\n # Optimizer object\n optimizer = keras.optimizers.Adam(learning_rate)\n # optimizer = keras.optimizers.SGD(\n # lr=learning_rate, momentum=momentum,\n # clipnorm=self.config.GRADIENT_CLIP_NORM)\n\n # Add Losses\n # First, clear previously set losses to avoid duplication\n self.keras_model._losses = []\n self.keras_model._per_input_losses = {}\n loss_names = [\n \"rpn_class_loss\", \"rpn_bbox_loss\",\n \"mrcnn_class_loss\", \"mrcnn_bbox_loss\", \"mrcnn_mask_loss\"]\n for name in loss_names:\n layer = self.keras_model.get_layer(name)\n if layer.output in self.keras_model.losses:\n continue\n loss = (\n tf.reduce_mean(layer.output, keepdims=True)\n * self.config.LOSS_WEIGHTS.get(name, 1.))\n self.keras_model.add_loss(loss)\n\n # Add L2 Regularization\n # Skip gamma and beta weights of batch normalization layers.\n reg_losses = [\n keras.regularizers.l2(self.config.WEIGHT_DECAY)(w) / tf.cast(tf.size(w), tf.float32)\n for w in self.keras_model.trainable_weights\n if 'gamma' not in w.name and 'beta' not in w.name]\n self.keras_model.add_loss(tf.add_n(reg_losses))\n\n # Compile\n self.keras_model.compile(\n optimizer=optimizer,\n loss=[None] * len(self.keras_model.outputs))\n\n # Add metrics for losses\n for name in loss_names:\n if name in self.keras_model.metrics_names:\n continue\n layer = self.keras_model.get_layer(name)\n self.keras_model.metrics_names.append(name)\n loss = (\n tf.reduce_mean(layer.output, keepdims=True)\n * self.config.LOSS_WEIGHTS.get(name, 1.))\n self.keras_model.metrics_tensors.append(loss)\n\n def set_trainable(self, layer_regex, keras_model=None, indent=0, verbose=1):\n \"\"\"Sets model layers as trainable if their names match\n the given regular expression.\n \"\"\"\n # Print message on the first call (but not on recursive calls)\n if verbose > 0 and keras_model is None:\n log(\"Selecting layers to train\")\n\n keras_model = keras_model or self.keras_model\n\n # In multi-GPU training, we wrap the model. Get layers\n # of the inner model because they have the weights.\n layers = keras_model.inner_model.layers if hasattr(keras_model, \"inner_model\")\\\n else keras_model.layers\n\n for layer in layers:\n # Is the layer a model?\n if layer.__class__.__name__ == 'Model':\n print(\"In model: \", layer.name)\n self.set_trainable(\n layer_regex, keras_model=layer, indent=indent + 4)\n continue\n\n if not layer.weights:\n continue\n # Is it trainable?\n trainable = bool(re.fullmatch(layer_regex, layer.name))\n # Update layer. If layer is a container, update inner layer.\n if layer.__class__.__name__ == 'TimeDistributed':\n layer.layer.trainable = trainable\n else:\n layer.trainable = trainable\n # Print trainable layer names\n if trainable and verbose > 0:\n log(\"{}{:20} ({})\".format(\" \" * indent, layer.name,\n layer.__class__.__name__))\n\n def set_log_dir(self, model_path=None):\n \"\"\"Sets the model log directory and epoch counter.\n\n model_path: If None, or a format different from what this code uses\n then set a new log directory and start epochs from 0. Otherwise,\n extract the log directory and the epoch counter from the file\n name.\n \"\"\"\n # Set date and epoch counter as if starting a new model\n self.epoch = 0\n now = datetime.datetime.now()\n\n # If we have a model path with date and epochs use them\n if model_path:\n # Continue from we left of. Get epoch and date from the file name\n # A sample model path might look like:\n # \\path\\to\\logs\\coco20171029T2315\\mask_rcnn_coco_0001.h5 (Windows)\n # /path/to/logs/coco20171029T2315/mask_rcnn_coco_0001.h5 (Linux)\n regex = r\".*[/\\\\][\\w-]+(\\d{4})(\\d{2})(\\d{2})T(\\d{2})(\\d{2})[/\\\\]mask\\_rcnn\\_[\\w-]+(\\d{4})\\.h5\"\n m = re.match(regex, model_path)\n if m:\n now = datetime.datetime(int(m.group(1)), int(m.group(2)), int(m.group(3)),\n int(m.group(4)), int(m.group(5)))\n # Epoch number in file is 1-based, and in Keras code it's 0-based.\n # So, adjust for that then increment by one to start from the next epoch\n self.epoch = int(m.group(6)) - 1 + 1\n print('Re-starting from epoch %d' % self.epoch)\n\n # Directory for training logs\n self.log_dir = os.path.join(self.model_dir, \"{}{:%Y%m%dT%H%M}\".format(\n self.config.NAME.lower(), now))\n\n # Path to save after each epoch. Include placeholders that get filled by Keras.\n self.checkpoint_path = os.path.join(self.log_dir, \"mask_rcnn_{}_*epoch*.h5\".format(\n self.config.NAME.lower()))\n self.checkpoint_path = self.checkpoint_path.replace(\n \"*epoch*\", \"{epoch:04d}\")\n\n def train(self, train_dataset, val_dataset, learning_rate, epochs, layers,\n augmentation=None, custom_callbacks=None, no_augmentation_sources=None):\n \"\"\"Train the model.\n train_dataset, val_dataset: Training and validation Dataset objects.\n learning_rate: The learning rate to train with\n epochs: Number of training epochs. Note that previous training epochs\n are considered to be done alreay, so this actually determines\n the epochs to train in total rather than in this particaular\n call.\n layers: Allows selecting wich layers to train. It can be:\n - A regular expression to match layer names to train\n - One of these predefined values:\n heads: The RPN, classifier and mask heads of the network\n all: All the layers\n 3+: Train Resnet stage 3 and up\n 4+: Train Resnet stage 4 and up\n 5+: Train Resnet stage 5 and up\n augmentation: Optional. An imgaug (https://github.com/aleju/imgaug)\n augmentation. For example, passing imgaug.augmenters.Fliplr(0.5)\n flips images right/left 50% of the time. You can pass complex\n augmentations as well. This augmentation applies 50% of the\n time, and when it does it flips images right/left half the time\n and adds a Gaussian blur with a random sigma in range 0 to 5.\n\n augmentation = imgaug.augmenters.Sometimes(0.5, [\n imgaug.augmenters.Fliplr(0.5),\n imgaug.augmenters.GaussianBlur(sigma=(0.0, 5.0))\n ])\n\t custom_callbacks: Optional. Add custom callbacks to be called\n\t with the keras fit_generator method. Must be list of type keras.callbacks.\n no_augmentation_sources: Optional. List of sources to exclude for\n augmentation. A source is string that identifies a dataset and is\n defined in the Dataset class.\n \"\"\"\n assert self.mode == \"training\", \"Create model in training mode.\"\n\n # Pre-defined layer regular expressions\n layer_regex = {\n # all layers but the backbone\n \"heads\": r\"(mrcnn\\_.*)|(rpn\\_.*)|(fpn\\_.*)\",\n # From a specific Resnet stage and up\n \"3+\": r\"(res3.*)|(bn3.*)|(res4.*)|(bn4.*)|(res5.*)|(bn5.*)|(mrcnn\\_.*)|(rpn\\_.*)|(fpn\\_.*)\",\n \"4+\": r\"(res4.*)|(bn4.*)|(res5.*)|(bn5.*)|(mrcnn\\_.*)|(rpn\\_.*)|(fpn\\_.*)\",\n \"5+\": r\"(res5.*)|(bn5.*)|(mrcnn\\_.*)|(rpn\\_.*)|(fpn\\_.*)\",\n # All layers\n \"all\": \".*\",\n }\n if layers in layer_regex.keys():\n layers = layer_regex[layers]\n\n # Data generators\n train_generator = data_generator(train_dataset, self.config, shuffle=True,\n augmentation=augmentation,\n batch_size=self.config.BATCH_SIZE,\n no_augmentation_sources=no_augmentation_sources)\n val_generator = data_generator(val_dataset, self.config, shuffle=True,\n batch_size=self.config.BATCH_SIZE)\n\n # Create log_dir if it does not exist\n if not os.path.exists(self.log_dir):\n os.makedirs(self.log_dir)\n\n # Callbacks\n callbacks = [\n keras.callbacks.TensorBoard(log_dir=self.log_dir,\n histogram_freq=0, write_graph=True, write_images=False),\n keras.callbacks.ModelCheckpoint(self.checkpoint_path,\n verbose=0, save_weights_only=True, save_best_only=True),\n keras.callbacks.ReduceLROnPlateau(),\n keras.callbacks.EarlyStopping(monitor='val_loss', patience=3, baseline=None,\n restore_best_weights=True)\n ]\n\n # Add custom callbacks to the list\n if custom_callbacks:\n callbacks += custom_callbacks\n\n # Train\n log(\"\\nStarting at epoch {}. LR={}\\n\".format(self.epoch, learning_rate))\n log(\"Checkpoint Path: {}\".format(self.checkpoint_path))\n self.set_trainable(layers)\n self.compile(learning_rate, self.config.LEARNING_MOMENTUM)\n\n # Work-around for Windows: Keras fails on Windows when using\n # multiprocessing workers. See discussion here:\n # https://github.com/matterport/Mask_RCNN/issues/13#issuecomment-353124009\n if os.name is 'nt':\n workers = 0\n else:\n workers = multiprocessing.cpu_count()\n\n self.keras_model.fit_generator(\n train_generator,\n initial_epoch=self.epoch,\n epochs=epochs,\n steps_per_epoch=self.config.STEPS_PER_EPOCH,\n callbacks=callbacks,\n validation_data=val_generator,\n validation_steps=self.config.VALIDATION_STEPS,\n max_queue_size=100,\n workers=workers,\n use_multiprocessing=True,\n )\n self.epoch = max(self.epoch, epochs)\n\n def mold_inputs(self, images):\n \"\"\"Takes a list of images and modifies them to the format expected\n as an input to the neural network.\n images: List of image matrices [height,width,depth]. Images can have\n different sizes.\n\n Returns 3 Numpy matrices:\n molded_images: [N, h, w, 3]. Images resized and normalized.\n image_metas: [N, length of meta data]. Details about each image.\n windows: [N, (y1, x1, y2, x2)]. The portion of the image that has the\n original image (padding excluded).\n \"\"\"\n molded_images = []\n image_metas = []\n windows = []\n for image in images:\n # Resize image\n # TODO: move resizing to mold_image()\n molded_image, window, scale, padding, crop = utils.resize_image(\n image,\n min_dim=self.config.IMAGE_MIN_DIM,\n min_scale=self.config.IMAGE_MIN_SCALE,\n max_dim=self.config.IMAGE_MAX_DIM,\n mode=self.config.IMAGE_RESIZE_MODE)\n molded_image = mold_image(molded_image, self.config)\n # Build image_meta\n image_meta = compose_image_meta(\n 0, image.shape, molded_image.shape, window, scale,\n np.zeros([self.config.NUM_CLASSES], dtype=np.int32))\n # Append\n molded_images.append(molded_image)\n windows.append(window)\n image_metas.append(image_meta)\n # Pack into arrays\n molded_images = np.stack(molded_images)\n image_metas = np.stack(image_metas)\n windows = np.stack(windows)\n return molded_images, image_metas, windows\n\n def unmold_detections(self, detections, mrcnn_mask, original_image_shape,\n image_shape, window):\n \"\"\"Reformats the detections of one image from the format of the neural\n network output to a format suitable for use in the rest of the\n application.\n\n detections: [N, (y1, x1, y2, x2, class_id, score)] in normalized coordinates\n mrcnn_mask: [N, height, width, num_classes]\n original_image_shape: [H, W, C] Original image shape before resizing\n image_shape: [H, W, C] Shape of the image after resizing and padding\n window: [y1, x1, y2, x2] Pixel coordinates of box in the image where the real\n image is excluding the padding.\n\n Returns:\n boxes: [N, (y1, x1, y2, x2)] Bounding boxes in pixels\n class_ids: [N] Integer class IDs for each bounding box\n scores: [N] Float probability scores of the class_id\n masks: [height, width, num_instances] Instance masks\n \"\"\"\n # How many detections do we have?\n # Detections array is padded with zeros. Find the first class_id == 0.\n zero_ix = np.where(detections[:, 4] == 0)[0]\n N = zero_ix[0] if zero_ix.shape[0] > 0 else detections.shape[0]\n\n # Extract boxes, class_ids, scores, and class-specific masks\n boxes = detections[:N, :4]\n class_ids = detections[:N, 4].astype(np.int32)\n scores = detections[:N, 5]\n masks = mrcnn_mask[np.arange(N), :, :, class_ids]\n\n # Translate normalized coordinates in the resized image to pixel\n # coordinates in the original image before resizing\n window = utils.norm_boxes(window, image_shape[:2])\n wy1, wx1, wy2, wx2 = window\n shift = np.array([wy1, wx1, wy1, wx1])\n wh = wy2 - wy1 # window height\n ww = wx2 - wx1 # window width\n scale = np.array([wh, ww, wh, ww])\n # Convert boxes to normalized coordinates on the window\n boxes = np.divide(boxes - shift, scale)\n # Convert boxes to pixel coordinates on the original image\n boxes = utils.denorm_boxes(boxes, original_image_shape[:2])\n\n # Filter out detections with zero area. Happens in early training when\n # network weights are still random\n exclude_ix = np.where(\n (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1]) <= 0)[0]\n if exclude_ix.shape[0] > 0:\n boxes = np.delete(boxes, exclude_ix, axis=0)\n class_ids = np.delete(class_ids, exclude_ix, axis=0)\n scores = np.delete(scores, exclude_ix, axis=0)\n masks = np.delete(masks, exclude_ix, axis=0)\n N = class_ids.shape[0]\n\n # Resize masks to original image size and set boundary threshold.\n full_masks = []\n for i in range(N):\n # Convert neural network mask to full size mask\n full_mask = utils.unmold_mask(masks[i], boxes[i], original_image_shape)\n full_masks.append(full_mask)\n full_masks = np.stack(full_masks, axis=-1)\\\n if full_masks else np.empty(original_image_shape[:2] + (0,))\n\n return boxes, class_ids, scores, full_masks\n\n def detect(self, images, verbose=0):\n \"\"\"Runs the detection pipeline.\n\n images: List of images, potentially of different sizes.\n\n Returns a list of dicts, one dict per image. The dict contains:\n rois: [N, (y1, x1, y2, x2)] detection bounding boxes\n class_ids: [N] int class IDs\n scores: [N] float probability scores for the class IDs\n masks: [H, W, N] instance binary masks\n \"\"\"\n assert self.mode == \"inference\", \"Create model in inference mode.\"\n assert len(\n images) == self.config.BATCH_SIZE, \"len(images) must be equal to BATCH_SIZE\"\n\n if verbose:\n log(\"Processing {} images\".format(len(images)))\n for image in images:\n log(\"image\", image)\n\n # Mold inputs to format expected by the neural network\n molded_images, image_metas, windows = self.mold_inputs(images)\n\n # Validate image sizes\n # All images in a batch MUST be of the same size\n image_shape = molded_images[0].shape\n for g in molded_images[1:]:\n assert g.shape == image_shape,\\\n \"After resizing, all images must have the same size. Check IMAGE_RESIZE_MODE and image sizes.\"\n\n # Anchors\n anchors = self.get_anchors(image_shape)\n # Duplicate across the batch dimension because Keras requires it\n # TODO: can this be optimized to avoid duplicating the anchors?\n anchors = np.broadcast_to(anchors, (self.config.BATCH_SIZE,) + anchors.shape)\n\n if verbose:\n log(\"molded_images\", molded_images)\n log(\"image_metas\", image_metas)\n log(\"anchors\", anchors)\n # Run object detection\n detections, _, _, mrcnn_mask, _, _, _ =\\\n self.keras_model.predict([molded_images, image_metas, anchors], verbose=0)\n # Process detections\n results = []\n for i, image in enumerate(images):\n final_rois, final_class_ids, final_scores, final_masks =\\\n self.unmold_detections(detections[i], mrcnn_mask[i],\n image.shape, molded_images[i].shape,\n windows[i])\n results.append({\n \"rois\": final_rois,\n \"class_ids\": final_class_ids,\n \"scores\": final_scores,\n \"masks\": final_masks,\n })\n return results\n\n def detect_molded(self, molded_images, image_metas, verbose=0):\n \"\"\"Runs the detection pipeline, but expect inputs that are\n molded already. Used mostly for debugging and inspecting\n the model.\n\n molded_images: List of images loaded using load_image_gt()\n image_metas: image meta data, also returned by load_image_gt()\n\n Returns a list of dicts, one dict per image. The dict contains:\n rois: [N, (y1, x1, y2, x2)] detection bounding boxes\n class_ids: [N] int class IDs\n scores: [N] float probability scores for the class IDs\n masks: [H, W, N] instance binary masks\n \"\"\"\n assert self.mode == \"inference\", \"Create model in inference mode.\"\n assert len(molded_images) == self.config.BATCH_SIZE,\\\n \"Number of images must be equal to BATCH_SIZE\"\n\n if verbose:\n log(\"Processing {} images\".format(len(molded_images)))\n for image in molded_images:\n log(\"image\", image)\n\n # Validate image sizes\n # All images in a batch MUST be of the same size\n image_shape = molded_images[0].shape\n for g in molded_images[1:]:\n assert g.shape == image_shape, \"Images must have the same size\"\n\n # Anchors\n anchors = self.get_anchors(image_shape)\n # Duplicate across the batch dimension because Keras requires it\n # TODO: can this be optimized to avoid duplicating the anchors?\n anchors = np.broadcast_to(anchors, (self.config.BATCH_SIZE,) + anchors.shape)\n\n if verbose:\n log(\"molded_images\", molded_images)\n log(\"image_metas\", image_metas)\n log(\"anchors\", anchors)\n # Run object detection\n detections, _, _, mrcnn_mask, _, _, _ =\\\n self.keras_model.predict([molded_images, image_metas, anchors], verbose=0)\n # Process detections\n results = []\n for i, image in enumerate(molded_images):\n window = [0, 0, image.shape[0], image.shape[1]]\n final_rois, final_class_ids, final_scores, final_masks =\\\n self.unmold_detections(detections[i], mrcnn_mask[i],\n image.shape, molded_images[i].shape,\n window)\n results.append({\n \"rois\": final_rois,\n \"class_ids\": final_class_ids,\n \"scores\": final_scores,\n \"masks\": final_masks,\n })\n return results\n\n def get_anchors(self, image_shape):\n \"\"\"Returns anchor pyramid for the given image size.\"\"\"\n backbone_shapes = compute_backbone_shapes(self.config, image_shape)\n # Cache anchors and reuse if image shape is the same\n if not hasattr(self, \"_anchor_cache\"):\n self._anchor_cache = {}\n if not tuple(image_shape) in self._anchor_cache:\n # Generate Anchors\n a = utils.generate_pyramid_anchors(\n self.config.RPN_ANCHOR_SCALES,\n self.config.RPN_ANCHOR_RATIOS,\n backbone_shapes,\n self.config.BACKBONE_STRIDES,\n self.config.RPN_ANCHOR_STRIDE)\n # Keep a copy of the latest anchors in pixel coordinates because\n # it's used in inspect_model notebooks.\n # TODO: Remove this after the notebook are refactored to not use it\n self.anchors = a\n # Normalize coordinates\n self._anchor_cache[tuple(image_shape)] = utils.norm_boxes(a, image_shape[:2])\n return self._anchor_cache[tuple(image_shape)]\n\n def ancestor(self, tensor, name, checked=None):\n \"\"\"Finds the ancestor of a TF tensor in the computation graph.\n tensor: TensorFlow symbolic tensor.\n name: Name of ancestor tensor to find\n checked: For internal use. A list of tensors that were already\n searched to avoid loops in traversing the graph.\n \"\"\"\n checked = checked if checked is not None else []\n # Put a limit on how deep we go to avoid very long loops\n if len(checked) > 500:\n return None\n # Convert name to a regex and allow matching a number prefix\n # because Keras adds them automatically\n if isinstance(name, str):\n name = re.compile(name.replace(\"/\", r\"(\\_\\d+)*/\"))\n\n parents = tensor.op.inputs\n for p in parents:\n if p in checked:\n continue\n if bool(re.fullmatch(name, p.name)):\n return p\n checked.append(p)\n a = self.ancestor(p, name, checked)\n if a is not None:\n return a\n return None\n\n def find_trainable_layer(self, layer):\n \"\"\"If a layer is encapsulated by another layer, this function\n digs through the encapsulation and returns the layer that holds\n the weights.\n \"\"\"\n if layer.__class__.__name__ == 'TimeDistributed':\n return self.find_trainable_layer(layer.layer)\n return layer\n\n def get_trainable_layers(self):\n \"\"\"Returns a list of layers that have weights.\"\"\"\n layers = []\n # Loop through all layers\n for l in self.keras_model.layers:\n # If layer is a wrapper, find inner trainable layer\n l = self.find_trainable_layer(l)\n # Include layer if it has weights\n if l.get_weights():\n layers.append(l)\n return layers\n\n def run_graph(self, images, outputs, image_metas=None):\n \"\"\"Runs a sub-set of the computation graph that computes the given\n outputs.\n\n image_metas: If provided, the images are assumed to be already\n molded (i.e. resized, padded, and normalized)\n\n outputs: List of tuples (name, tensor) to compute. The tensors are\n symbolic TensorFlow tensors and the names are for easy tracking.\n\n Returns an ordered dict of results. Keys are the names received in the\n input and values are Numpy arrays.\n \"\"\"\n model = self.keras_model\n\n # Organize desired outputs into an ordered dict\n outputs = OrderedDict(outputs)\n for o in outputs.values():\n assert o is not None\n\n # Build a Keras function to run parts of the computation graph\n inputs = model.inputs\n if model.uses_learning_phase and not isinstance(K.learning_phase(), int):\n inputs += [K.learning_phase()]\n kf = K.function(model.inputs, list(outputs.values()))\n\n # Prepare inputs\n if image_metas is None:\n molded_images, image_metas, _ = self.mold_inputs(images)\n else:\n molded_images = images\n image_shape = molded_images[0].shape\n # Anchors\n anchors = self.get_anchors(image_shape)\n # Duplicate across the batch dimension because Keras requires it\n # TODO: can this be optimized to avoid duplicating the anchors?\n anchors = np.broadcast_to(anchors, (self.config.BATCH_SIZE,) + anchors.shape)\n model_in = [molded_images, image_metas, anchors]\n\n # Run inference\n if model.uses_learning_phase and not isinstance(K.learning_phase(), int):\n model_in.append(0.)\n outputs_np = kf(model_in)\n\n # Pack the generated Numpy arrays into a a dict and log the results.\n outputs_np = OrderedDict([(k, v)\n for k, v in zip(outputs.keys(), outputs_np)])\n for k, v in outputs_np.items():\n log(k, v)\n return outputs_np\n\n\n############################################################\n# Data Formatting\n############################################################\n\ndef compose_image_meta(image_id, original_image_shape, image_shape,\n window, scale, active_class_ids):\n \"\"\"Takes attributes of an image and puts them in one 1D array.\n\n image_id: An int ID of the image. Useful for debugging.\n original_image_shape: [H, W, C] before resizing or padding.\n image_shape: [H, W, C] after resizing and padding\n window: (y1, x1, y2, x2) in pixels. The area of the image where the real\n image is (excluding the padding)\n scale: The scaling factor applied to the original image (float32)\n active_class_ids: List of class_ids available in the dataset from which\n the image came. Useful if training on images from multiple datasets\n where not all classes are present in all datasets.\n \"\"\"\n meta = np.array(\n [image_id] + # size=1\n list(original_image_shape) + # size=3\n list(image_shape) + # size=3\n list(window) + # size=4 (y1, x1, y2, x2) in image cooredinates\n [scale] + # size=1\n list(active_class_ids) # size=num_classes\n )\n return meta\n\n\ndef parse_image_meta(meta):\n \"\"\"Parses an array that contains image attributes to its components.\n See compose_image_meta() for more details.\n\n meta: [batch, meta length] where meta length depends on NUM_CLASSES\n\n Returns a dict of the parsed values.\n \"\"\"\n image_id = meta[:, 0]\n original_image_shape = meta[:, 1:4]\n image_shape = meta[:, 4:7]\n window = meta[:, 7:11] # (y1, x1, y2, x2) window of image in in pixels\n scale = meta[:, 11]\n active_class_ids = meta[:, 12:]\n return {\n \"image_id\": image_id.astype(np.int32),\n \"original_image_shape\": original_image_shape.astype(np.int32),\n \"image_shape\": image_shape.astype(np.int32),\n \"window\": window.astype(np.int32),\n \"scale\": scale.astype(np.float32),\n \"active_class_ids\": active_class_ids.astype(np.int32),\n }\n\n\ndef parse_image_meta_graph(meta):\n \"\"\"Parses a tensor that contains image attributes to its components.\n See compose_image_meta() for more details.\n\n meta: [batch, meta length] where meta length depends on NUM_CLASSES\n\n Returns a dict of the parsed tensors.\n \"\"\"\n image_id = meta[:, 0]\n original_image_shape = meta[:, 1:4]\n image_shape = meta[:, 4:7]\n window = meta[:, 7:11] # (y1, x1, y2, x2) window of image in in pixels\n scale = meta[:, 11]\n active_class_ids = meta[:, 12:]\n return {\n \"image_id\": image_id,\n \"original_image_shape\": original_image_shape,\n \"image_shape\": image_shape,\n \"window\": window,\n \"scale\": scale,\n \"active_class_ids\": active_class_ids,\n }\n\n\ndef mold_image(images, config):\n \"\"\"Expects an RGB image (or array of images) and subtracts\n the mean pixel and converts it to float. Expects image\n colors in RGB order.\n \"\"\"\n return images.astype(np.float32) - config.MEAN_PIXEL\n\n\ndef unmold_image(normalized_images, config):\n \"\"\"Takes a image normalized with mold() and returns the original.\"\"\"\n return (normalized_images + config.MEAN_PIXEL).astype(np.uint8)\n\n\n############################################################\n# Miscellenous Graph Functions\n############################################################\n\ndef trim_zeros_graph(boxes, name='trim_zeros'):\n \"\"\"Often boxes are represented with matrices of shape [N, 4] and\n are padded with zeros. This removes zero boxes.\n\n boxes: [N, 4] matrix of boxes.\n non_zeros: [N] a 1D boolean mask identifying the rows to keep\n \"\"\"\n non_zeros = tf.cast(tf.reduce_sum(tf.abs(boxes), axis=1), tf.bool)\n boxes = tf.boolean_mask(boxes, non_zeros, name=name)\n return boxes, non_zeros\n\n\ndef batch_pack_graph(x, counts, num_rows):\n \"\"\"Picks different number of values from each row\n in x depending on the values in counts.\n \"\"\"\n outputs = []\n for i in range(num_rows):\n outputs.append(x[i, :counts[i]])\n return tf.concat(outputs, axis=0)\n\n\ndef norm_boxes_graph(boxes, shape):\n \"\"\"Converts boxes from pixel coordinates to normalized coordinates.\n boxes: [..., (y1, x1, y2, x2)] in pixel coordinates\n shape: [..., (height, width)] in pixels\n\n Note: In pixel coordinates (y2, x2) is outside the box. But in normalized\n coordinates it's inside the box.\n\n Returns:\n [..., (y1, x1, y2, x2)] in normalized coordinates\n \"\"\"\n h, w = tf.split(tf.cast(shape, tf.float32), 2)\n scale = tf.concat([h, w, h, w], axis=-1) - tf.constant(1.0)\n shift = tf.constant([0., 0., 1., 1.])\n return tf.divide(boxes - shift, scale)\n\n\ndef denorm_boxes_graph(boxes, shape):\n \"\"\"Converts boxes from normalized coordinates to pixel coordinates.\n boxes: [..., (y1, x1, y2, x2)] in normalized coordinates\n shape: [..., (height, width)] in pixels\n\n Note: In pixel coordinates (y2, x2) is outside the box. But in normalized\n coordinates it's inside the box.\n\n Returns:\n [..., (y1, x1, y2, x2)] in pixel coordinates\n \"\"\"\n h, w = tf.split(tf.cast(shape, tf.float32), 2)\n scale = tf.concat([h, w, h, w], axis=-1) - tf.constant(1.0)\n shift = tf.constant([0., 0., 1., 1.])\n return tf.cast(tf.round(tf.multiply(boxes, scale) + shift), tf.int32)\n" ]
[ [ "tensorflow.exp", "numpy.random.choice", "tensorflow.image.non_max_suppression", "numpy.copy", "tensorflow.unique", "tensorflow.reshape", "numpy.where", "tensorflow.sqrt", "numpy.sort", "tensorflow.stack", "tensorflow.control_dependencies", "numpy.broadcast_to", "tensorflow.divide", "tensorflow.cast", "tensorflow.identity", "numpy.concatenate", "numpy.divide", "numpy.max", "tensorflow.shape", "numpy.empty", "tensorflow.concat", "numpy.log", "tensorflow.argmax", "tensorflow.image.crop_and_resize", "tensorflow.Variable", "tensorflow.transpose", "tensorflow.add_n", "tensorflow.constant", "tensorflow.squeeze", "numpy.argmax", "numpy.random.randint", "numpy.arange", "tensorflow.split", "tensorflow.pad", "numpy.expand_dims", "tensorflow.abs", "numpy.array", "tensorflow.range", "tensorflow.minimum", "numpy.reshape", "numpy.zeros", "tensorflow.where", "tensorflow.gather_nd", "tensorflow.round", "tensorflow.expand_dims", "numpy.delete", "numpy.random.shuffle", "tensorflow.map_fn", "tensorflow.sparse_tensor_to_dense", "tensorflow.log", "numpy.stack", "numpy.amax", "tensorflow.reduce_sum", "tensorflow.nn.top_k", "numpy.hstack", "tensorflow.boolean_mask", "tensorflow.logical_and", "tensorflow.nn.sparse_softmax_cross_entropy_with_logits", "numpy.fliplr", "tensorflow.random_shuffle", "tensorflow.size", "tensorflow.multiply", "numpy.sum", "numpy.ones", "tensorflow.equal", "numpy.any", "tensorflow.reduce_max", "numpy.abs", "tensorflow.gather", "tensorflow.maximum", "tensorflow.reduce_mean", "tensorflow.stop_gradient" ] ]
Acemyzoe/mnist-TensorRT
[ "df455542d1f889af755e08412b7fd81343cff2ff" ]
[ "mnist-tensorRT.py" ]
[ "#!/usr/bin/python\n# -*- coding:utf-8 -*-\nfrom __future__ import absolute_import, division, print_function, unicode_literals\nimport tensorflow as tf\nimport time\nimport numpy\n\ndef mnist_model():\n mnist = tf.keras.datasets.mnist\n (x_train, y_train), (x_test, y_test) = mnist.load_data()\n x_train = x_train.astype('float32')\n x_test = x_test.astype('float32')\n x_train, x_test = x_train / 255.0, x_test / 255.0\n\n model = tf.keras.models.Sequential()\n model.add(tf.keras.layers.Flatten(input_shape=(28,28)))\n model.add(tf.keras.layers.Dense(512, activation='relu'))\n model.add(tf.keras.layers.Dropout(0.2))\n model.add(tf.keras.layers.Dense(10, activation='softmax'))\n\n model.summary()\n model.compile(optimizer='adam',\n loss='sparse_categorical_crossentropy',\n metrics=['accuracy'])\n model.fit(x_train, y_train, batch_size=64,epochs=10)\n score = model.evaluate(x_test, y_test, verbose=2)\n print('loss:',score[0])\n print('accuracy:',score[1])\n #model.save('tf_model',save_format = 'tf')\n model.save('tf_model.h5')\n\ndef trt(trt_opt):\n converter = tf.experimental.tensorrt.Converter(input_saved_model_dir='tf_model')\n converter.convert()#完成转换,但是此时没有进行优化,优化在执行推理时完成\n if trt_opt == True:\n mnist = tf.keras.datasets.mnist\n (x_train, y_train), (x_test, y_test) = mnist.load_data()\n x_test = x_test.astype('float32')\n x_test = x_test / 255.0\n def input_fn():\n yield (x_test[:1])\n converter.build(input_fn) #优化后保存\n converter.save('trt_model_opt')\n else:\n converter.save('trt_model')\n\ndef opt(model_path):\n mnist = tf.keras.datasets.mnist\n (x_train, y_train), (x_test, y_test) = mnist.load_data()\n x_test = x_test.astype('float32')\n x_test /= 255\n\n model_loaded = tf.saved_model.load(model_path)#读取模型\n graph_func = model_loaded.signatures['serving_default']#获取推理函数\n t=time.time()\n #output = graph_func(tf.constant(x_test))\n output = model_loaded(x_test)\n print(output[0],'\\n',time.time()-t)\n\nif __name__ == '__main__':\n print(\"Num GPUs Available: \", len(tf.config.experimental.list_physical_devices('GPU')))\n mnist_model()\n #trt(True)\n #opt(\"tf_model\")\n #opt(\"trt_model\")\n #opt(\"trt_model_opt\")\n" ]
[ [ "tensorflow.keras.layers.Flatten", "tensorflow.keras.layers.Dense", "tensorflow.keras.layers.Dropout", "tensorflow.experimental.tensorrt.Converter", "tensorflow.keras.models.Sequential", "tensorflow.config.experimental.list_physical_devices", "tensorflow.saved_model.load" ] ]
ZXLam/nnUNet
[ "0cf7c8a857c248d6be171e4945427b405f6ac258" ]
[ "nnunet/evaluation/evaluator.py" ]
[ "# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nimport collections\nimport hashlib\nimport inspect\nimport json\nfrom collections import OrderedDict\nfrom datetime import datetime\nfrom multiprocessing.pool import Pool\n\nimport SimpleITK as sitk\nimport numpy as np\nimport pandas as pd\nfrom batchgenerators.utilities.file_and_folder_operations import save_json, subfiles, join\n\nfrom nnunet.evaluation.metrics import ConfusionMatrix, ALL_METRICS\n\n\nclass Evaluator:\n \"\"\"Object that holds test and reference segmentations with label information\n and computes a number of metrics on the two. 'labels' must either be an\n iterable of numeric values (or tuples thereof) or a dictionary with string\n names and numeric values.\n \"\"\"\n\n default_metrics = [\n \"False Positive Rate\",\n \"Dice\",\n \"Jaccard\",\n \"Precision\",\n \"Recall\",\n \"Accuracy\",\n \"False Omission Rate\",\n \"Negative Predictive Value\",\n \"False Negative Rate\",\n \"True Negative Rate\",\n \"False Discovery Rate\",\n \"Total Positives Test\",\n \"Total Positives Reference\"\n ]\n\n default_advanced_metrics = [\n #\"Hausdorff Distance\",\n \"Hausdorff Distance 95\",\n #\"Avg. Surface Distance\",\n #\"Avg. Symmetric Surface Distance\"\n ]\n\n def __init__(self,\n test=None,\n reference=None,\n labels=None,\n metrics=None,\n advanced_metrics=None,\n nan_for_nonexisting=True):\n\n self.test = None\n self.reference = None\n self.confusion_matrix = ConfusionMatrix()\n self.labels = None\n self.nan_for_nonexisting = nan_for_nonexisting\n self.result = None\n\n self.metrics = []\n if metrics is None:\n for m in self.default_metrics:\n self.metrics.append(m)\n else:\n for m in metrics:\n self.metrics.append(m)\n\n self.advanced_metrics = []\n if advanced_metrics is None:\n for m in self.default_advanced_metrics:\n self.advanced_metrics.append(m)\n else:\n for m in advanced_metrics:\n self.advanced_metrics.append(m)\n\n self.set_reference(reference)\n self.set_test(test)\n if labels is not None:\n self.set_labels(labels)\n else:\n if test is not None and reference is not None:\n self.construct_labels()\n\n def set_test(self, test):\n \"\"\"Set the test segmentation.\"\"\"\n\n self.test = test\n\n def set_reference(self, reference):\n \"\"\"Set the reference segmentation.\"\"\"\n\n self.reference = reference\n\n def set_labels(self, labels):\n \"\"\"Set the labels.\n :param labels= may be a dictionary (int->str), a set (of ints), a tuple (of ints) or a list (of ints). Labels\n will only have names if you pass a dictionary\"\"\"\n\n if isinstance(labels, dict):\n self.labels = collections.OrderedDict(labels)\n elif isinstance(labels, set):\n self.labels = list(labels)\n elif isinstance(labels, np.ndarray):\n self.labels = [i for i in labels]\n elif isinstance(labels, (list, tuple)):\n self.labels = labels\n else:\n raise TypeError(\"Can only handle dict, list, tuple, set & numpy array, but input is of type {}\".format(type(labels)))\n\n def construct_labels(self):\n \"\"\"Construct label set from unique entries in segmentations.\"\"\"\n\n if self.test is None and self.reference is None:\n raise ValueError(\"No test or reference segmentations.\")\n elif self.test is None:\n labels = np.unique(self.reference)\n else:\n labels = np.union1d(np.unique(self.test),\n np.unique(self.reference))\n self.labels = list(map(lambda x: int(x), labels))\n\n def set_metrics(self, metrics):\n \"\"\"Set evaluation metrics\"\"\"\n\n if isinstance(metrics, set):\n self.metrics = list(metrics)\n elif isinstance(metrics, (list, tuple, np.ndarray)):\n self.metrics = metrics\n else:\n raise TypeError(\"Can only handle list, tuple, set & numpy array, but input is of type {}\".format(type(metrics)))\n\n def add_metric(self, metric):\n\n if metric not in self.metrics:\n self.metrics.append(metric)\n\n def evaluate(self, test=None, reference=None, advanced=False, **metric_kwargs):\n \"\"\"Compute metrics for segmentations.\"\"\"\n if test is not None:\n self.set_test(test)\n\n if reference is not None:\n self.set_reference(reference)\n\n if self.test is None or self.reference is None:\n raise ValueError(\"Need both test and reference segmentations.\")\n\n if self.labels is None:\n self.construct_labels()\n\n self.metrics.sort()\n\n # get functions for evaluation\n # somewhat convoluted, but allows users to define additonal metrics\n # on the fly, e.g. inside an IPython console\n _funcs = {m: ALL_METRICS[m] for m in self.metrics + self.advanced_metrics}\n frames = inspect.getouterframes(inspect.currentframe())\n for metric in self.metrics:\n for f in frames:\n if metric in f[0].f_locals:\n _funcs[metric] = f[0].f_locals[metric]\n break\n else:\n if metric in _funcs:\n continue\n else:\n raise NotImplementedError(\n \"Metric {} not implemented.\".format(metric))\n\n # get results\n self.result = OrderedDict()\n\n eval_metrics = self.metrics\n if advanced:\n eval_metrics += self.advanced_metrics\n\n if isinstance(self.labels, dict):\n\n for label, name in self.labels.items():\n k = str(name)\n self.result[k] = OrderedDict()\n if not hasattr(label, \"__iter__\"):\n self.confusion_matrix.set_test(self.test == label)\n self.confusion_matrix.set_reference(self.reference == label)\n else:\n current_test = 0\n current_reference = 0\n for l in label:\n current_test += (self.test == l)\n current_reference += (self.reference == l)\n self.confusion_matrix.set_test(current_test)\n self.confusion_matrix.set_reference(current_reference)\n for metric in eval_metrics:\n self.result[k][metric] = _funcs[metric](confusion_matrix=self.confusion_matrix,\n nan_for_nonexisting=self.nan_for_nonexisting,\n **metric_kwargs)\n\n else:\n\n for i, l in enumerate(self.labels):\n k = str(l)\n self.result[k] = OrderedDict()\n self.confusion_matrix.set_test(self.test == l)\n self.confusion_matrix.set_reference(self.reference == l)\n for metric in eval_metrics:\n self.result[k][metric] = _funcs[metric](confusion_matrix=self.confusion_matrix,\n nan_for_nonexisting=self.nan_for_nonexisting,\n **metric_kwargs)\n\n return self.result\n\n def to_dict(self):\n\n if self.result is None:\n self.evaluate()\n return self.result\n\n def to_array(self):\n \"\"\"Return result as numpy array (labels x metrics).\"\"\"\n\n if self.result is None:\n self.evaluate\n\n result_metrics = sorted(self.result[list(self.result.keys())[0]].keys())\n\n a = np.zeros((len(self.labels), len(result_metrics)), dtype=np.float32)\n\n if isinstance(self.labels, dict):\n for i, label in enumerate(self.labels.keys()):\n for j, metric in enumerate(result_metrics):\n a[i][j] = self.result[self.labels[label]][metric]\n else:\n for i, label in enumerate(self.labels):\n for j, metric in enumerate(result_metrics):\n a[i][j] = self.result[label][metric]\n\n return a\n\n def to_pandas(self):\n \"\"\"Return result as pandas DataFrame.\"\"\"\n\n a = self.to_array()\n\n if isinstance(self.labels, dict):\n labels = list(self.labels.values())\n else:\n labels = self.labels\n\n result_metrics = sorted(self.result[list(self.result.keys())[0]].keys())\n\n return pd.DataFrame(a, index=labels, columns=result_metrics)\n\n\nclass NiftiEvaluator(Evaluator):\n\n def __init__(self, *args, **kwargs):\n\n self.test_nifti = None\n self.reference_nifti = None\n super(NiftiEvaluator, self).__init__(*args, **kwargs)\n\n def set_test(self, test):\n \"\"\"Set the test segmentation.\"\"\"\n\n if test is not None:\n self.test_nifti = sitk.ReadImage(test)\n super(NiftiEvaluator, self).set_test(sitk.GetArrayFromImage(self.test_nifti))\n else:\n self.test_nifti = None\n super(NiftiEvaluator, self).set_test(test)\n\n def set_reference(self, reference):\n \"\"\"Set the reference segmentation.\"\"\"\n\n if reference is not None:\n self.reference_nifti = sitk.ReadImage(reference)\n super(NiftiEvaluator, self).set_reference(sitk.GetArrayFromImage(self.reference_nifti))\n else:\n self.reference_nifti = None\n super(NiftiEvaluator, self).set_reference(reference)\n\n def evaluate(self, test=None, reference=None, voxel_spacing=None, **metric_kwargs):\n\n if voxel_spacing is None:\n voxel_spacing = np.array(self.test_nifti.GetSpacing())[::-1]\n metric_kwargs[\"voxel_spacing\"] = voxel_spacing\n\n return super(NiftiEvaluator, self).evaluate(test, reference, **metric_kwargs)\n\n\ndef run_evaluation(args):\n test, ref, evaluator, metric_kwargs = args\n # evaluate\n evaluator.set_test(test)\n evaluator.set_reference(ref)\n if evaluator.labels is None:\n evaluator.construct_labels()\n current_scores = evaluator.evaluate(**metric_kwargs)\n if type(test) == str:\n current_scores[\"test\"] = test\n if type(ref) == str:\n current_scores[\"reference\"] = ref\n return current_scores\n\n\ndef aggregate_scores(test_ref_pairs,\n evaluator=NiftiEvaluator,\n labels=None,\n nanmean=True,\n json_output_file=None,\n json_name=\"\",\n json_description=\"\",\n json_author=\"Fabian\",\n json_task=\"\",\n num_threads=2,\n **metric_kwargs):\n \"\"\"\n test = predicted image\n :param test_ref_pairs:\n :param evaluator:\n :param labels: must be a dict of int-> str or a list of int\n :param nanmean:\n :param json_output_file:\n :param json_name:\n :param json_description:\n :param json_author:\n :param json_task:\n :param metric_kwargs:\n :return:\n \"\"\"\n\n if type(evaluator) == type:\n evaluator = evaluator()\n\n if labels is not None:\n evaluator.set_labels(labels)\n\n all_scores = OrderedDict()\n all_scores[\"all\"] = []\n all_scores[\"mean\"] = OrderedDict()\n\n test = [i[0] for i in test_ref_pairs]\n ref = [i[1] for i in test_ref_pairs]\n p = Pool(num_threads)\n all_res = p.map(run_evaluation, zip(test, ref, [evaluator]*len(ref), [metric_kwargs]*len(ref)))\n p.close()\n p.join()\n\n for i in range(len(all_res)):\n all_scores[\"all\"].append(all_res[i])\n\n # append score list for mean\n for label, score_dict in all_res[i].items():\n if label in (\"test\", \"reference\"):\n continue\n if label not in all_scores[\"mean\"]:\n all_scores[\"mean\"][label] = OrderedDict()\n for score, value in score_dict.items():\n if score not in all_scores[\"mean\"][label]:\n all_scores[\"mean\"][label][score] = []\n all_scores[\"mean\"][label][score].append(value)\n\n for label in all_scores[\"mean\"]:\n for score in all_scores[\"mean\"][label]:\n if nanmean:\n all_scores[\"mean\"][label][score] = float(np.nanmean(all_scores[\"mean\"][label][score]))\n else:\n all_scores[\"mean\"][label][score] = float(np.mean(all_scores[\"mean\"][label][score]))\n\n # save to file if desired\n # we create a hopefully unique id by hashing the entire output dictionary\n if json_output_file is not None:\n json_dict = OrderedDict()\n json_dict[\"name\"] = json_name\n json_dict[\"description\"] = json_description\n timestamp = datetime.today()\n json_dict[\"timestamp\"] = str(timestamp)\n json_dict[\"task\"] = json_task\n json_dict[\"author\"] = json_author\n json_dict[\"results\"] = all_scores\n json_dict[\"id\"] = hashlib.md5(json.dumps(json_dict).encode(\"utf-8\")).hexdigest()[:12]\n save_json(json_dict, json_output_file)\n\n\n return all_scores\n\n\ndef aggregate_scores_for_experiment(score_file,\n labels=None,\n metrics=Evaluator.default_metrics,\n nanmean=True,\n json_output_file=None,\n json_name=\"\",\n json_description=\"\",\n json_author=\"Fabian\",\n json_task=\"\"):\n\n scores = np.load(score_file)\n scores_mean = scores.mean(0)\n if labels is None:\n labels = list(map(str, range(scores.shape[1])))\n\n results = []\n results_mean = OrderedDict()\n for i in range(scores.shape[0]):\n results.append(OrderedDict())\n for l, label in enumerate(labels):\n results[-1][label] = OrderedDict()\n results_mean[label] = OrderedDict()\n for m, metric in enumerate(metrics):\n results[-1][label][metric] = float(scores[i][l][m])\n results_mean[label][metric] = float(scores_mean[l][m])\n\n json_dict = OrderedDict()\n json_dict[\"name\"] = json_name\n json_dict[\"description\"] = json_description\n timestamp = datetime.today()\n json_dict[\"timestamp\"] = str(timestamp)\n json_dict[\"task\"] = json_task\n json_dict[\"author\"] = json_author\n json_dict[\"results\"] = {\"all\": results, \"mean\": results_mean}\n json_dict[\"id\"] = hashlib.md5(json.dumps(json_dict).encode(\"utf-8\")).hexdigest()[:12]\n if json_output_file is not None:\n json_output_file = open(json_output_file, \"w\")\n json.dump(json_dict, json_output_file, indent=4, separators=(\",\", \": \"))\n json_output_file.close()\n\n return json_dict\n\n\ndef evaluate_folder(folder_with_gts: str, folder_with_predictions: str, labels: tuple, **metric_kwargs):\n \"\"\"\n writes a summary.json to folder_with_predictions\n :param folder_with_gts: folder where the ground truth segmentations are saved. Must be nifti files.\n :param folder_with_predictions: folder where the predicted segmentations are saved. Must be nifti files.\n :param labels: tuple of int with the labels in the dataset. For example (0, 1, 2, 3) for Task001_BrainTumour.\n :return:\n \"\"\"\n files_gt = subfiles(folder_with_gts, suffix=\".nii.gz\", join=False)\n files_pred = subfiles(folder_with_predictions, suffix=\".nii.gz\", join=False)\n # assert all([i in files_pred for i in files_gt]), \"files missing in folder_with_predictions\"\n assert all([i in files_gt for i in files_pred]), \"files missing in folder_with_gts\"\n test_ref_pairs = [(join(folder_with_predictions, i), join(folder_with_gts, i)) for i in files_pred]\n res = aggregate_scores(test_ref_pairs, json_output_file=join(folder_with_predictions, \"summary.json\"),\n num_threads=8, labels=labels, **metric_kwargs)\n return res\n\n\ndef nnunet_evaluate_folder():\n import argparse\n parser = argparse.ArgumentParser(\"Evaluates the segmentations located in the folder pred. Output of this script is \"\n \"a json file. At the very bottom of the json file is going to be a 'mean' \"\n \"entry with averages metrics across all cases\")\n parser.add_argument('-ref', required=True, type=str, help=\"Folder containing the reference segmentations in nifti \"\n \"format.\")\n parser.add_argument('-pred', required=True, type=str, help=\"Folder containing the predicted segmentations in nifti \"\n \"format. File names must match between the folders!\")\n parser.add_argument('-l', nargs='+', type=int, required=True, help=\"List of label IDs (integer values) that should \"\n \"be evaluated. Best practice is to use all int \"\n \"values present in the dataset, so for example \"\n \"for LiTS the labels are 0: background, 1: \"\n \"liver, 2: tumor. So this argument \"\n \"should be -l 1 2. You can if you want also \"\n \"evaluate the background label (0) but in \"\n \"this case that would not gie any useful \"\n \"information.\")\n args = parser.parse_args()\n return evaluate_folder(args.ref, args.pred, args.l)" ]
[ [ "pandas.DataFrame", "numpy.load", "numpy.mean", "numpy.nanmean", "numpy.unique" ] ]
PanJinquan/pytorch-base-trainer
[ "37799c948f72b2f9d3771ff469e06cdbff4a1d07" ]
[ "basetrainer/metric/eval_tools/acc.py" ]
[ "# -*-coding: utf-8 -*-\n\"\"\"\n @Project: python-learning-notes\n @File : acc.py\n @Author : panjq\n @E-mail : [email protected]\n @Date : 2019-07-12 18:22:29\n\"\"\"\n\nimport matplotlib\n\n# matplotlib.use('TkAgg')\nimport matplotlib.pyplot as plt\nfrom sklearn import metrics\nimport numpy as np\n\n\ndef plot_acc_curve(acc_list, threshold_list, line_names,title='Accuracy-Threshold'):\n '''\n 绘制roc曲线\n :param acc_list:\n :param threshold_list:\n :param roc_auc_list:\n :param line_names:曲线名称\n :return:\n '''\n # 绘图\n # plt.figure()\n lw = 2\n plt.figure(figsize=(10, 10))\n colors = [\"b\", \"r\", \"c\", \"m\", \"g\", \"lt_steps\", \"k\", \"w\"]\n xlim_max = 0\n for acc, th, color, line_name in zip(acc_list, threshold_list, colors, line_names):\n max_acc = max(acc)\n if xlim_max < max(th):\n xlim_max = max(th)\n plt.plot(th, acc, color=color, lw=lw,\n label='{} max Accuracy:{:.3f})'.format(line_name, max_acc)) # 假正率为横坐标,真正率为纵坐标做曲线\n # plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')\n # plt.plot([0, 1], [1, 0], color='navy', lw=lw, linestyle='--') # 绘制y=1-x的直线\n\n plt.xlim([0.0, xlim_max])\n\n plt.ylim([0.0, 1.05])\n # 设置横纵坐标的名称以及对应字体格式\n font = {'family': 'Times New Roman',\n 'weight': 'normal',\n 'size': 20,\n }\n plt.xlabel('Threshold', font)\n plt.ylabel('Accuracy ', font)\n\n plt.title(title)\n plt.legend(loc=\"lower right\") # \"upper right\"\n # plt.legend(loc=\"upper right\")#\"upper right\"\n\n plt.show()\n\n\ndef get_accuracy_list(y_true, y_pred, threshold_list, invert=False, plot_acc=True):\n if isinstance(y_pred, list):\n y_pred = np.asarray(y_pred)\n if isinstance(y_true, list):\n y_true = np.asarray(y_true)\n\n acc_list = []\n for th in threshold_list:\n if invert:\n pred_label = np.where(y_pred <= th, 1, 0)\n # pred_label = np.less(y_pred, th)\n else:\n pred_label = np.where(y_pred >= th, 1, 0)\n # pred_label = np.greater(y_pred, th)\n\n true_label = y_true\n accuracy = metrics.accuracy_score(true_label, pred_label)\n acc_list.append(accuracy)\n max_acc = max(acc_list)\n index = np.where(np.asarray(acc_list) == max_acc)[0]\n best_acc_index = max(index)\n best_threshold = threshold_list[best_acc_index]\n # print(\"acc_list :{}\".format(acc_list))\n # print(\"max accuracy:{},best_acc_index:>{},best_threshold:>{}\".format(max_acc, best_acc_index,best_threshold))\n if plot_acc:\n acc_list = [acc_list]\n threshold_list = [threshold_list]\n line_names = [\"\"]\n title = 'Accuracy-Threshold,BT:{}'.format(best_threshold)\n plot_acc_curve(acc_list, threshold_list, line_names,title=title)\n return max_acc,best_threshold\n\n\nif __name__ == \"__main__\":\n y_pred = [0, 0.2, 0.4, 0.6, 0.8, 0.8, 0.6, 0.4, 0.2, 0.0]\n y_true = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]\n get_accuracy_list(y_true, y_pred, threshold_list=[0.1, 0.2, 0.4, 0.5])\n" ]
[ [ "matplotlib.pyplot.xlim", "numpy.asarray", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.ylim", "matplotlib.pyplot.title", "matplotlib.pyplot.legend", "matplotlib.pyplot.figure", "sklearn.metrics.accuracy_score", "numpy.where", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.show" ] ]
Miki-lin/YOLOXR
[ "16eb48c76e97c36e4f53e40ee74115799238eea9" ]
[ "tools/demo_obb_kld.py" ]
[ "#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n# Copyright (c) Megvii, Inc. and its affiliates.\n\nimport argparse\nimport os\nimport time\nfrom loguru import logger\n\nimport cv2\n\nimport torch\n\nfrom yolox.data.data_augment import preproc\nfrom yolox.data.datasets import COCO_CLASSES\nfrom yolox.data.datasets.dota_classes import VOC_CLASSES\nfrom yolox.exp import get_exp\nfrom yolox.utils import fuse_model, get_model_info, postprocessobb_kld, visobb\n\nIMAGE_EXT = [\".jpg\", \".jpeg\", \".webp\", \".bmp\", \".png\"]\n\n\ndef make_parser():\n parser = argparse.ArgumentParser(\"YOLOX Demo!\")\n parser.add_argument(\n \"--demo\", default=\"image\", help=\"demo type, eg. image, video and webcam\"\n )\n parser.add_argument(\"-expn\", \"--experiment-name\", type=str, default=None)\n parser.add_argument(\"-n\", \"--name\", type=str, default=None, help=\"model name\")\n\n parser.add_argument(\n \"--path\",\n default=\"/media/data/miki/HRSC2016/Test/images\",\n # default=\"/media/data/ljp/UCAS_AOD/UCAS_AOD/Test\",\n help=\"path to images or video\"\n )\n parser.add_argument(\"--camid\", type=int, default=0, help=\"webcam demo camera id\")\n parser.add_argument(\n \"--save_result\",\n action=\"store_false\",\n help=\"whether to save the inference result of image/video\",\n )\n\n # exp file\n parser.add_argument(\n \"-f\",\n \"--exp_file\",\n default='../exps/example/yolox_voc/yolox_dota_s_obb_kld.py',\n type=str,\n help=\"pls input your expriment description file\",\n )\n parser.add_argument(\"-c\", \"--ckpt\",\n default='/media/data/miki/yolox-obb/outputs/yolox_dota_s_obb_kld/best_ckpt.pth',\n type=str, help=\"ckpt for eval\")\n parser.add_argument(\n \"--device\",\n default=\"gpu\",\n type=str,\n help=\"device to run our model, can either be cpu or gpu\",\n )\n parser.add_argument(\"--conf\", default=0.1, type=float, help=\"test conf\")\n parser.add_argument(\"--nms\", default=0.25, type=float, help=\"test nms threshold\")\n parser.add_argument(\"--tsize\", default=1024, type=int, help=\"test img size\")\n parser.add_argument(\n \"--fp16\",\n dest=\"fp16\",\n default=False,\n action=\"store_true\",\n help=\"Adopting mix precision evaluating.\",\n )\n parser.add_argument(\n \"--fuse\",\n dest=\"fuse\",\n default=False,\n action=\"store_true\",\n help=\"Fuse conv and bn for testing.\",\n )\n parser.add_argument(\n \"--trt\",\n dest=\"trt\",\n default=False,\n action=\"store_true\",\n help=\"Using TensorRT model for testing.\",\n )\n return parser\n\n\ndef get_image_list(path):\n image_names = []\n for maindir, subdir, file_name_list in os.walk(path):\n for filename in file_name_list:\n apath = os.path.join(maindir, filename)\n ext = os.path.splitext(apath)[1]\n if ext in IMAGE_EXT:\n image_names.append(apath)\n return image_names\n\n\nclass Predictor(object):\n def __init__(\n self,\n model,\n exp,\n cls_names=COCO_CLASSES,\n trt_file=None,\n decoder=None,\n device=\"cpu\",\n ):\n self.model = model\n self.cls_names = cls_names\n self.decoder = decoder\n self.num_classes = exp.num_classes\n self.confthre = exp.test_conf\n self.nmsthre = exp.nmsthre\n self.test_size = exp.test_size\n self.device = device\n # if trt_file is not None:\n # from torch2trt import TRTModule\n #\n # model_trt = TRTModule()\n # model_trt.load_state_dict(torch.load(trt_file))\n #\n # x = torch.ones(1, 3, exp.test_size[0], exp.test_size[1]).cuda()\n # self.model(x)\n # self.model = model_trt\n self.rgb_means = (0.485, 0.456, 0.406)\n self.std = (0.229, 0.224, 0.225)\n\n def inference(self, img):\n img_info = {\"id\": 0}\n if isinstance(img, str):\n img_info[\"file_name\"] = os.path.basename(img)\n img = cv2.imread(img)\n else:\n img_info[\"file_name\"] = None\n\n height, width = img.shape[:2]\n img_info[\"height\"] = height\n img_info[\"width\"] = width\n img_info[\"raw_img\"] = img\n\n img, ratio = preproc(img, self.test_size, self.rgb_means, self.std)\n img_info[\"ratio\"] = ratio\n img = torch.from_numpy(img).unsqueeze(0)\n if self.device == \"gpu\":\n img = img.cuda()\n\n with torch.no_grad():\n t0 = time.time()\n outputs = self.model(img)\n if self.decoder is not None:\n outputs = self.decoder(outputs, dtype=outputs.type())\n if self.device == \"gpu\": #add\n outputs = outputs.cpu() #add\n outputs = postprocessobb_kld(\n outputs, self.num_classes, self.confthre, self.nmsthre\n ) #(x1,y1,x2,y2,x3,y3,x4,y4, score, class_pred)\n logger.info(\"Infer time: {:.4f}s\".format(time.time() - t0))\n return outputs, img_info\n\n def visual(self, output, img_info, cls_conf=0.35):\n # (n, 10) # (x1,y1,x2,y2,x3,y3,x4,y4, class_conf*obj_conf, class_pred)\n ratio = img_info[\"ratio\"]\n img = img_info[\"raw_img\"]\n if output is None:\n return img\n output = output.cpu()\n\n bboxes = output[:, 0:8]\n\n # preprocessing: resize\n bboxes /= ratio\n\n cls = output[:, 9]\n scores = output[:, 8]\n\n vis_res = visobb(img, bboxes, scores, cls, cls_conf, self.cls_names)\n return vis_res\n\n\ndef image_demo(predictor, vis_folder, path, current_time, save_result):\n if os.path.isdir(path):\n files = get_image_list(path)\n else:\n files = [path]\n files.sort()\n for image_name in files:\n outputs, img_info = predictor.inference(image_name)\n result_image = predictor.visual(outputs[0], img_info, predictor.confthre)\n # output[0] shape (n, 9) (x1,y1,x2,y2,x3,y3,x4,y4, class_conf*obj_conf)\n if save_result:\n save_folder = os.path.join(\n vis_folder, time.strftime(\"%Y_%m_%d_%H_%M_%S\", current_time)\n )\n os.makedirs(save_folder, exist_ok=True)\n save_file_name = os.path.join(save_folder, os.path.basename(image_name))\n logger.info(\"Saving detection result in {}\".format(save_file_name))\n cv2.imwrite(save_file_name, result_image)\n ch = cv2.waitKey(0)\n if ch == 27 or ch == ord(\"q\") or ch == ord(\"Q\"):\n break\n\n\ndef imageflow_demo(predictor, vis_folder, current_time, args):\n cap = cv2.VideoCapture(args.path if args.demo == \"video\" else args.camid)\n width = cap.get(cv2.CAP_PROP_FRAME_WIDTH) # float\n height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT) # float\n fps = cap.get(cv2.CAP_PROP_FPS)\n save_folder = os.path.join(\n vis_folder, time.strftime(\"%Y_%m_%d_%H_%M_%S\", current_time)\n )\n os.makedirs(save_folder, exist_ok=True)\n if args.demo == \"video\":\n save_path = os.path.join(save_folder, args.path.split(\"/\")[-1])\n else:\n save_path = os.path.join(save_folder, \"camera.mp4\")\n logger.info(f\"video save_path is {save_path}\")\n vid_writer = cv2.VideoWriter(\n save_path, cv2.VideoWriter_fourcc(*\"mp4v\"), fps, (int(width), int(height))\n )\n while True:\n ret_val, frame = cap.read()\n if ret_val:\n outputs, img_info = predictor.inference(frame)\n result_frame = predictor.visual(outputs[0], img_info, predictor.confthre)\n if args.save_result:\n vid_writer.write(result_frame)\n ch = cv2.waitKey(1)\n if ch == 27 or ch == ord(\"q\") or ch == ord(\"Q\"):\n break\n else:\n break\n\n\ndef main(exp, args):\n if not args.experiment_name:\n args.experiment_name = exp.exp_name\n\n file_name = os.path.join(exp.output_dir, args.experiment_name)\n os.makedirs(file_name, exist_ok=True)\n\n if args.save_result:\n vis_folder = os.path.join(file_name, \"vis_res\")\n os.makedirs(vis_folder, exist_ok=True)\n\n if args.trt:\n args.device = \"gpu\"\n\n logger.info(\"Args: {}\".format(args))\n\n if args.conf is not None:\n exp.test_conf = args.conf\n if args.nms is not None:\n exp.nmsthre = args.nms\n if args.tsize is not None:\n exp.test_size = (args.tsize, args.tsize)\n\n model = exp.get_model()\n logger.info(\"Model Summary: {}\".format(get_model_info(model, exp.test_size)))\n\n if args.device == \"gpu\":\n model.cuda()\n model.eval()\n\n if not args.trt:\n if args.ckpt is None:\n ckpt_file = os.path.join(file_name, \"best_ckpt.pth\")\n else:\n ckpt_file = args.ckpt\n logger.info(\"loading checkpoint\")\n ckpt = torch.load(ckpt_file, map_location=\"cpu\")\n # load the model state dict\n model.load_state_dict(ckpt[\"model\"])\n logger.info(\"loaded checkpoint done.\")\n\n if args.fuse:\n logger.info(\"\\tFusing model...\")\n model = fuse_model(model)\n\n if args.trt:\n assert not args.fuse, \"TensorRT model is not support model fusing!\"\n trt_file = os.path.join(file_name, \"model_trt.pth\")\n assert os.path.exists(\n trt_file\n ), \"TensorRT model is not found!\\n Run python3 tools/trt.py first!\"\n model.head.decode_in_inference = False\n decoder = model.head.decode_outputs\n logger.info(\"Using TensorRT to inference\")\n else:\n trt_file = None\n decoder = None\n\n predictor = Predictor(model, exp, VOC_CLASSES, trt_file, decoder, args.device)\n current_time = time.localtime()\n if args.demo == \"image\":\n image_demo(predictor, vis_folder, args.path, current_time, args.save_result)\n elif args.demo == \"video\" or args.demo == \"webcam\":\n imageflow_demo(predictor, vis_folder, current_time, args)\n\n\nif __name__ == \"__main__\":\n args = make_parser().parse_args()\n exp = get_exp(args.exp_file, args.name)\n\n main(exp, args)\n" ]
[ [ "torch.no_grad", "torch.load", "torch.from_numpy" ] ]
sumanth13131/COVID19-Pneumonia-Detection
[ "3bd4d0f8d4c115d14ed2237921e775bafae9642c" ]
[ "helper.py" ]
[ "# #Helper packages \nimport tensorflow as tf\nimport numpy as np\nimport cv2\n\n#decode the image\nimport base64\n\nclass Helper:\n def __init__(self) -> None:\n self.model = tf.keras.models.load_model('./models/Covid_Binary.h5')\n self.classes = ['COVID19 Pneumonia','Normal'] # covid== < 0.5 , normal== > 0.5\n\n def predict(self,bs4string) ->dict:\n '''\n :input -> base64 encoded string\n '''\n #decode image string and pre-processing\n img = base64.b64decode(bs4string)\n img = cv2.imdecode(np.fromstring(img,np.uint8), cv2.IMREAD_ANYCOLOR)\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n img = cv2.resize(img,(150,150))\n img = img[np.newaxis, :, :, :]\n img = img/255.0\n prob = self.model.predict(img)[0][0] # output from model example: [[0.9891]]\n res = dict()\n if prob > 0.5 : \n res['cls'] = self.classes[1]\n res['acc'] = round(prob*100,2)\n else:\n res['cls'] = self.classes[0]\n res['acc'] = round((1-prob)*100,2)\n return res" ]
[ [ "tensorflow.keras.models.load_model", "numpy.fromstring" ] ]
huangxu96/Paddle
[ "5e59a8666ddde20867c6d976a3720f543b55bf83" ]
[ "python/paddle/fluid/framework.py" ]
[ "# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import print_function\n\nimport collections\nfrom collections import defaultdict\nfrom collections import Iterable\nimport contextlib\nfrom .wrapped_decorator import signature_safe_contextmanager, wrap_decorator\nimport os\nimport re\nimport traceback\nimport six\nimport copy\n\nimport numpy as np\nimport subprocess\nimport multiprocessing\nimport sys\nimport logging\nfrom .. import compat as cpt\nfrom .proto import framework_pb2\n\nfrom . import core\nfrom . import unique_name\nimport paddle.version as fluid_version\nimport warnings\nimport functools\n\n__all__ = [\n 'Program',\n 'default_startup_program',\n 'default_main_program',\n 'program_guard',\n 'name_scope',\n 'cuda_places',\n 'cpu_places',\n 'xpu_places',\n 'cuda_pinned_places',\n 'in_dygraph_mode',\n 'is_compiled_with_cuda',\n 'is_compiled_with_xpu',\n 'Variable',\n 'load_op_library',\n 'require_version',\n 'device_guard',\n 'set_flags',\n 'get_flags',\n]\n\nEMPTY_VAR_NAME = core.kEmptyVarName()\nTEMP_VAR_NAME = core.kTempVarName()\nGRAD_VAR_SUFFIX = core.kGradVarSuffix()\nZERO_VAR_SUFFIX = core.kZeroVarSuffix()\nCONTROL_DEP_VAR_PREFIX = core.kControlDepVarName()\n\n_dygraph_tracer_ = None\n_global_expected_place_ = None\n_current_device = None\nglobal_prog_seed = 0\n\n\ndef require_version(min_version, max_version=None):\n \"\"\"\n Check if the installed version of PaddlePaddle is in [min_version, max_version],\n if the installed version is lower than ``min_version`` or higher than ``max_version``,\n an exception will be thrown, NO returns if the installed version is satisfied.\n\n Args:\n min_version (str): the minimum version required (like '1.4.0').\n max_version (str, optional): the max version required (like '1.6.0'), default is None,\n meaning any version equal or higher than ``min_version`` is acceptable.\n\n Returns:\n None.\n\n Raises:\n TypeError: if the type of ``min_version`` is not str.\n TypeError: if the type of ``max_version`` is not str or type(None).\n ValueError: if the value of ``min_version`` is not in version format.\n ValueError: if the value of ``max_version`` is not in version format or None.\n Exception: if the installed version is lower than ``min_version`` or higher than ``max_version``.\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n\n # any version >= 0.1.0 is acceptable.\n fluid.require_version('0.1.0')\n\n # if 0.1.0 <= version <= 10.0.0, it is acceptable.\n fluid.require_version(min_version='0.1.0', max_version='10.0.0')\n \"\"\"\n if not isinstance(min_version, str):\n raise TypeError(\n \"The type of 'min_version' in require_version must be str, but received %s.\"\n % (type(min_version)))\n\n if not isinstance(max_version, (str, type(None))):\n raise TypeError(\n \"The type of 'max_version' in require_version must be str or type(None), but received %s.\"\n % (type(max_version)))\n\n check_format = re.match(r'\\d+(\\.\\d+){0,3}', min_version)\n if check_format is None or check_format.group() != min_version:\n raise ValueError(\n \"The value of 'min_version' in require_version must be in format '\\\\d+(\\\\.\\\\d+){0,3}', \"\n \"like '1.5.2.0', but received %s\" % min_version)\n\n if max_version is not None:\n check_format = re.match(r'\\d+(\\.\\d+){0,3}', max_version)\n if check_format is None or check_format.group() != max_version:\n raise ValueError(\n \"The value of 'max_version' in require_version must be in format '\\\\d+(\\\\.\\\\d+){0,3}', \"\n \"like '1.5.2.0', but received %s\" % max_version)\n\n version_installed = [\n fluid_version.major, fluid_version.minor, fluid_version.patch,\n fluid_version.rc\n ]\n zero_version = ['0', '0', '0', '0']\n\n def version_cmp(ver_a, ver_b):\n for i in six.moves.range(len(ver_a)):\n if int(ver_a[i]) > int(ver_b[i]):\n return 1\n elif int(ver_a[i]) < int(ver_b[i]):\n return -1\n return 0\n\n if version_cmp(version_installed, zero_version) == 0:\n if max_version is not None:\n warnings.warn(\n \"PaddlePaddle version in [%s, %s] required, but %s installed. \"\n \"Maybe you are using a develop version, \"\n \"please make sure the version is good with your code.\" %\n (min_version, max_version, fluid_version.full_version))\n else:\n warnings.warn(\n \"PaddlePaddle version %s or higher is required, but %s installed, \"\n \"Maybe you are using a develop version, \"\n \"please make sure the version is good with your code.\" %\n (min_version, fluid_version.full_version))\n return\n\n min_version_split = min_version.split('.')\n min_version_to_check = min_version_split + zero_version[len(\n min_version_split):]\n\n if max_version is not None:\n max_version_split = max_version.split('.')\n max_version_to_check = max_version_split + zero_version[len(\n max_version_split):]\n\n if version_cmp(version_installed,\n max_version_to_check) > 0 or version_cmp(\n version_installed, min_version_to_check) < 0:\n raise Exception(\n \"VersionError: PaddlePaddle version in [%s, %s] required, but %s installed.\"\n % (min_version, max_version, fluid_version.full_version))\n else:\n if version_cmp(version_installed, min_version_to_check) < 0:\n raise Exception(\n \"VersionError: PaddlePaddle version %s or higher is required, but %s installed, \"\n \"please upgrade your PaddlePaddle to %s or other higher version.\"\n % (min_version, fluid_version.full_version, min_version))\n\n\ndef in_dygraph_mode():\n \"\"\"\n\n .. note::\n Dynamic graph mode is turn ON by default since paddle 2.0.0\n\n This API checks whether paddle runs in dynamic graph mode.\n\n You can turn ON static graph mode by `enable_static <../dygraph/base/disable_dygraph_en.html>`_ ,\n and turn OFF static graph mode by `disable_static <../dygraph/base/enable_dygraph_en.html>`_ .\n\n Returns:\n bool: Whether paddle runs in dynamic graph mode.\n\n Examples:\n .. code-block:: python\n\n import paddle\n print(paddle.in_dynamic_mode()) # True, dynamic mode is turn ON by default since paddle 2.0.0\n\n paddle.enable_static()\n print(paddle.in_dynamic_mode()) # False, Now we are in static mode\n\n paddle.disable_static()\n print(paddle.in_dynamic_mode()) # True, Now we are in dynamic mode\n\n \"\"\"\n return _dygraph_tracer_ is not None\n\n\ndef _dygraph_not_support_(func):\n def __impl__(*args, **kwargs):\n assert not in_dygraph_mode(\n ), \"We don't support %s in imperative mode\" % func.__name__\n return func(*args, **kwargs)\n\n return __impl__\n\n\ndef _dygraph_only_(func):\n def __impl__(*args, **kwargs):\n assert in_dygraph_mode(\n ), \"We only support '%s()' in dynamic graph mode, please call 'paddle.disable_static()' to enter dynamic graph mode.\" % func.__name__\n return func(*args, **kwargs)\n\n return __impl__\n\n\ndef _static_only_(func):\n def __impl__(*args, **kwargs):\n assert not in_dygraph_mode(\n ), \"In PaddlePaddle 2.x, we turn on dynamic graph mode by default, and '%s()' is only supported in static graph mode. So if you want to use this api, please call 'paddle.enable_static()' before this api to enter static graph mode.\" % func.__name__\n return func(*args, **kwargs)\n\n return __impl__\n\n\n# NOTE(zhiqiu): This decorator is used for the APIs of Variable which is only\n# used to make Variable and VarBase has same interfaces, like numpy. Since VarBase is not exposed in our\n# official docments, logically, we want to keep VarBase and logically consistent. While, actually,\n# in our implementation, there some APIs not supported, like numpy, because Variable contains the desc.\n# So, those APIs are listed under class Variable to generate docs only.\n# TODO(zhiqiu): We should make VarBase consistent with Variable in future, for example, by inheritting\n# same base class.\ndef _fake_interface_only_(func):\n def __impl__(*args, **kwargs):\n raise AssertionError(\n \"'%s' should be called by imperative Varible in imperative mode, please run it in dygraph \"\n \"mode. You can turn off paddle.enable_static() if you are in static mode, or turn off \"\n \"ProgramTranslator if you are using @paddle.jit.to_static. If you have to run ProgramTranslator, \"\n \"please use other API to replace '%s'\" % (func.__name__,\n func.__name__))\n\n return __impl__\n\n\n# NOTE(chenweihang): There is argument name typo (stat_dict, correct name is state_dict)\n# in fluid api Layer.set_dict, Optimizer.load, in order to correct the argument without\n# introducing compatibility issues, add this decorator\n# NOTE(chenweihang): not using `wrap_decorator` here is because `wrap_decorator` will\n# move kwargs to args, which doesn't work in this decorate case\ndef deprecate_stat_dict(func):\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n if 'stat_dict' in kwargs:\n warnings.warn(\n \"The argument `stat_dict` has deprecated, please change it to `state_dict`.\",\n DeprecationWarning)\n kwargs['state_dict'] = kwargs['stat_dict']\n kwargs.pop('stat_dict')\n return func(*args, **kwargs)\n\n return wrapper\n\n\ndygraph_not_support = wrap_decorator(_dygraph_not_support_)\ndygraph_only = wrap_decorator(_dygraph_only_)\nstatic_only = wrap_decorator(_static_only_)\nfake_interface_only = wrap_decorator(_fake_interface_only_)\n\n\ndef _dygraph_tracer():\n return _dygraph_tracer_\n\n\ndef _current_expected_place():\n global _global_expected_place_\n if _global_expected_place_ is None:\n if core.is_compiled_with_cuda():\n try:\n device_count = core.get_cuda_device_count()\n except Exception as e:\n device_count = 0\n if device_count > 0:\n _global_expected_place_ = core.CUDAPlace(0)\n else:\n warnings.warn(\n \"You are using GPU version Paddle, but your CUDA device is not set properly. CPU device will be used by default.\"\n )\n _global_expected_place_ = core.CPUPlace()\n else:\n _global_expected_place_ = core.CPUPlace()\n\n return _global_expected_place_\n\n\ndef _set_dygraph_tracer_expected_place(place):\n global _dygraph_tracer_\n if _dygraph_tracer_ is not None:\n _dygraph_tracer_._expected_place = place\n\n\ndef _set_expected_place(place):\n global _global_expected_place_\n _global_expected_place_ = place\n _set_dygraph_tracer_expected_place(place)\n\n\n# TODO(zhiqiu): remove this function.\ndef _var_base_to_np(var_base):\n \"\"\"\t\n convert VarBase tp numpy\t\n\n Args:\t\n var_base(VarBase) : the VarBase to convert\t\n Returns (np.ndarray): the np.ndarray contain the value of VarBase\t\n \"\"\"\n\n warnings.warn(\n \"paddle.fluid.framework._var_base_to_np is deprecated, please use var_base.numpy() instead of _var_base_to_np(var_base).\"\n )\n\n return var_base.numpy()\n\n\ndef _cpu_num():\n if \"CPU_NUM\" not in os.environ.keys():\n if multiprocessing.cpu_count() > 1:\n sys.stderr.write(\n '!!! The CPU_NUM is not specified, you should set CPU_NUM in the environment variable list.\\n'\n 'CPU_NUM indicates that how many CPUPlace are used in the current task.\\n'\n 'And if this parameter are set as N (equal to the number of physical CPU core) the program may be faster.\\n\\n'\n 'export CPU_NUM={} # for example, set CPU_NUM as number of physical CPU core which is {}.\\n\\n'\n '!!! The default number of CPU_NUM=1.\\n'.format(\n multiprocessing.cpu_count(), multiprocessing.cpu_count()))\n os.environ['CPU_NUM'] = str(1)\n cpu_num = os.environ.get('CPU_NUM')\n return int(cpu_num)\n\n\ndef _cuda_ids():\n gpus_env = os.getenv(\"FLAGS_selected_gpus\")\n if gpus_env:\n device_ids = [int(s) for s in gpus_env.split(\",\")]\n else:\n device_ids = six.moves.range(core.get_cuda_device_count())\n return device_ids\n\n\ndef _xpu_ids():\n xpus_env = os.getenv(\"FLAGS_selected_xpus\")\n if xpus_env:\n device_ids = [int(s) for s in xpus_env.split(\",\")]\n else:\n device_ids = six.moves.range(core.get_xpu_device_count())\n return device_ids\n\n\ndef is_compiled_with_xpu():\n \"\"\"\n Whether this whl package can be used to run the model on XPU.\n\n Returns (bool): support xpu or not.\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n support_xpu = fluid.is_compiled_with_xpu()\n \"\"\"\n return core.is_compiled_with_xpu()\n\n\ndef is_compiled_with_cuda():\n \"\"\"\n Whether this whl package can be used to run the model on GPU.\n\n Returns (bool): `True` if CUDA is currently available, otherwise `False`.\n\n Examples:\n .. code-block:: python\n\n import paddle\n support_gpu = paddle.is_compiled_with_cuda()\n \"\"\"\n return core.is_compiled_with_cuda()\n\n\ndef cuda_places(device_ids=None):\n \"\"\"\n **Note**:\n For multi-card tasks, please use `FLAGS_selected_gpus` environment variable to set the visible GPU device.\n The next version will fix the problem with `CUDA_VISIBLE_DEVICES` environment variable.\n\n This function creates a list of :code:`paddle.CUDAPlace` objects.\n\n If :code:`device_ids` is None, environment variable of\n :code:`FLAGS_selected_gpus` would be checked first. For example, if\n :code:`FLAGS_selected_gpus=0,1,2`, the returned list would\n be [paddle.CUDAPlace(0), paddle.CUDAPlace(1), paddle.CUDAPlace(2)].\n If :code:`FLAGS_selected_gpus` is not set, all visible\n gpu places would be returned according to the :code:`CUDA_VISIBLE_DEVICES` environment variable.\n\n If :code:`device_ids` is not None, it should be the device\n ids of GPUs. For example, if :code:`device_ids=[0,1,2]`,\n the returned list would be \n [paddle.CUDAPlace(0), paddle.CUDAPlace(1), paddle.CUDAPlace(2)].\n\n Parameters:\n device_ids (list or tuple of int, optional): list of GPU device ids.\n\n Returns:\n list of paddle.CUDAPlace: Created GPU place list.\n\n Examples:\n .. code-block:: python\n\n import paddle\n import paddle.static as static\n\n paddle.enable_static()\n\n cuda_places = static.cuda_places()\n\n \"\"\"\n assert core.is_compiled_with_cuda(), \\\n \"Not compiled with CUDA\"\n if device_ids is None:\n device_ids = _cuda_ids()\n elif not isinstance(device_ids, (list, tuple)):\n device_ids = [device_ids]\n return [core.CUDAPlace(dev_id) for dev_id in device_ids]\n\n\ndef xpu_places(device_ids=None):\n \"\"\"\n **Note**:\n For multi-card tasks, please use `FLAGS_selected_xpus` environment variable to set the visible XPU device.\n This function creates a list of :code:`paddle.XPUPlace` objects.\n If :code:`device_ids` is None, environment variable of\n :code:`FLAGS_selected_xpus` would be checked first. For example, if\n :code:`FLAGS_selected_xpus=0,1,2`, the returned list would\n be [paddle.XPUPlace(0), paddle.XPUPlace(1), paddle.XPUPlace(2)].\n If :code:`FLAGS_selected_xpus` is not set, all visible\n xpu places would be returned.\n If :code:`device_ids` is not None, it should be the device\n ids of XPUs. For example, if :code:`device_ids=[0,1,2]`,\n the returned list would be \n [paddle.XPUPlace(0), paddle.XPUPlace(1), paddle.XPUPlace(2)].\n \n Parameters:\n device_ids (list or tuple of int, optional): list of XPU device ids.\n Returns:\n list of paddle.XPUPlace: Created XPU place list.\n Examples:\n .. code-block:: python\n \n import paddle\n import paddle.static as static\n \n paddle.enable_static()\n xpu_places = static.xpu_places()\n \"\"\"\n assert core.is_compiled_with_xpu(), \\\n \"Not compiled with XPU\"\n if device_ids is None:\n device_ids = _xpu_ids()\n elif not isinstance(device_ids, (list, tuple)):\n device_ids = [device_ids]\n return [core.XPUPlace(dev_id) for dev_id in device_ids]\n\n\ndef cpu_places(device_count=None):\n \"\"\"\n This function creates a list of :code:`paddle.CPUPlace` objects, and returns the created list.\n\n If :code:`device_count` is None, the device count would\n be determined by environment variable :code:`CPU_NUM`. \n If :code:`CPU_NUM` is not set, the default value is 1,\n i.e. CPU_NUM=1.\n :code:`CPU_NUM` indicates the number of devices used in the current task.\n The running of the program can be accelerated if :code:`CPU_NUM` is the same as the number of physical cores.\n\n Parameters:\n device_count (int, optional): device number. Default: None.\n\n Returns:\n list of paddle.CPUPlace: Created list of CPU places.\n\n Examples:\n .. code-block:: python\n\n import paddle\n import paddle.static as static\n\n paddle.enable_static()\n\n cpu_places = static.cpu_places()\n \"\"\"\n\n if device_count is None:\n device_count = _cpu_num()\n return [core.CPUPlace()] * device_count\n\n\ndef cuda_pinned_places(device_count=None):\n \"\"\"\n This function creates a list of :code:`fluid.CUDAPinnedPlace` objects.\n\n If :code:`device_count` is None, the device count would\n be determined by environment variable :code:`CPU_NUM`. \n If :code:`CPU_NUM` is not set, the default value is 1,\n i.e. CPU_NUM=1.\n :code:`CPU_NUM` indicates the number of devices used in the current task.\n The running of the program can be accelerated if :code:`CPU_NUM` is the same as the number of physical cores.\n\n Parameters:\n device_count (int, optional): device number. Default: None.\n\n Returns:\n list of fluid.CUDAPinnedPlace: Created list of CUDA pinned places.\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n cuda_pinned_places_cpu_num = fluid.cuda_pinned_places()\n # or\n cuda_pinned_places = fluid.cuda_pinned_places(1)\n\n \"\"\"\n assert core.is_compiled_with_cuda(), \\\n \"Not compiled with CUDA\"\n if device_count is None:\n device_count = len(_cuda_ids())\n return [core.CUDAPinnedPlace()] * device_count\n\n\nclass NameScope(object):\n def __init__(self, name=\"\", parent=None):\n self._children = dict()\n self._name = name\n self._parent = parent\n\n def child(self, prefix):\n if prefix not in self._children:\n new_child = NameScope(prefix, self)\n self._children[prefix] = [new_child]\n else:\n new_child = NameScope(prefix + \"_%d\" % len(self._children[prefix]),\n self)\n self._children[prefix].append(new_child)\n return new_child\n\n def parent(self):\n return self._parent\n\n def name(self):\n return self._name\n\n\n_name_scope = NameScope()\n\n\n@signature_safe_contextmanager\ndef name_scope(prefix=None):\n \"\"\"\n :api_attr: Static Graph\n\n Generate hierarchical name prefix for the operators in Static Graph.\n\n Note: \n This should only used for debugging and visualization purpose.\n Don't use it for serious analysis such as graph/program transformations.\n Don't use it in dygraph, since it will cause memory leak.\n\n Args:\n prefix(str, optional): prefix. Default is none.\n\n Examples:\n .. code-block:: python\n\n import paddle\n paddle.enable_static()\n with paddle.static.name_scope(\"s1\"):\n a = paddle.static.data(name='data', shape=[None, 1], dtype='int32')\n b = a + 1\n with paddle.static.name_scope(\"s2\"):\n c = b * 1\n with paddle.static.name_scope(\"s3\"):\n d = c / 1\n with paddle.static.name_scope(\"s1\"):\n f = paddle.tensor.pow(d, 2.0)\n with paddle.static.name_scope(\"s4\"):\n g = f - 1\n\n # Op are created in the default main program. \n for op in paddle.static.default_main_program().block(0).ops:\n # elementwise_add is created in /s1/\n if op.type == 'elementwise_add':\n assert op.desc.attr(\"op_namescope\") == '/s1/'\n # elementwise_mul is created in '/s1/s2'\n elif op.type == 'elementwise_mul':\n assert op.desc.attr(\"op_namescope\") == '/s1/s2/'\n # elementwise_div is created in '/s1/s3'\n elif op.type == 'elementwise_div':\n assert op.desc.attr(\"op_namescope\") == '/s1/s3/'\n # elementwise_sum is created in '/s4'\n elif op.type == 'elementwise_sub':\n assert op.desc.attr(\"op_namescope\") == '/s4/'\n # pow is created in /s1_1/\n elif op.type == 'pow':\n assert op.desc.attr(\"op_namescope\") == '/s1_1/'\n \"\"\"\n # TODO(panyx0718): Only [0-9a-z].\n # in dygraph we don't need namescope since it will cause mem leak\n if in_dygraph_mode():\n yield\n else:\n assert prefix, \"namescope prefix can not be empty.\"\n global _name_scope\n _name_scope = _name_scope.child(prefix)\n try:\n yield\n finally:\n _name_scope = _name_scope.parent()\n\n\ndef _full_name_scope():\n global _name_scope\n scope = _name_scope\n name = \"\"\n while scope:\n name = scope.name() + \"/\" + name\n scope = scope.parent()\n return name\n\n\ndef generate_control_dev_var_name():\n import random\n return CONTROL_DEP_VAR_PREFIX + \"@\" + str(random.random())\n\n\ndef grad_var_name(var_name):\n \"\"\"\n Returns:\n str: gradient name for a certain var name\n \"\"\"\n return var_name + GRAD_VAR_SUFFIX\n\n\ndef convert_np_dtype_to_dtype_(np_dtype):\n \"\"\"\n Convert the data type in numpy to the data type in Paddle\n\n Args:\n np_dtype(np.dtype): the data type in numpy.\n\n Returns:\n core.VarDesc.VarType: the data type in Paddle.\n\n \"\"\"\n dtype = np.dtype(np_dtype)\n if dtype == np.float32:\n return core.VarDesc.VarType.FP32\n elif dtype == np.float64:\n return core.VarDesc.VarType.FP64\n elif dtype == np.float16:\n return core.VarDesc.VarType.FP16\n elif dtype == np.int32:\n return core.VarDesc.VarType.INT32\n elif dtype == np.int16:\n return core.VarDesc.VarType.INT16\n elif dtype == np.int64:\n return core.VarDesc.VarType.INT64\n elif dtype == np.bool:\n return core.VarDesc.VarType.BOOL\n elif dtype == np.uint16:\n # since there is still no support for bfloat16 in NumPy,\n # uint16 is used for casting bfloat16\n return core.VarDesc.VarType.BF16\n elif dtype == np.uint8:\n return core.VarDesc.VarType.UINT8\n elif dtype == np.int8:\n return core.VarDesc.VarType.INT8\n elif dtype == np.complex64:\n return core.VarDesc.VarType.COMPLEX64\n elif dtype == np.complex128:\n return core.VarDesc.VarType.COMPLEX128\n else:\n raise ValueError(\"Not supported numpy dtype %s\" % dtype)\n\n\ndef dtype_is_floating(dtype):\n \"\"\"\n Check the data type is floating or not.\n Args:\n dtype(np.dtype|core.VarDesc.VarType): data type.\n Could be numpy format or Paddle format\n\n Returns(bool): True if data type is a float value\n\n \"\"\"\n if not isinstance(dtype, core.VarDesc.VarType):\n dtype = convert_np_dtype_to_dtype_(dtype)\n\n return dtype in [\n core.VarDesc.VarType.FP16, core.VarDesc.VarType.FP32,\n core.VarDesc.VarType.FP64\n ]\n\n\ndef _debug_string_(proto, throw_on_error=True):\n \"\"\"\n Get the debug string of a protobuf message. The message could be not\n initialized.\n Args:\n proto(google.protobuf.message.Message): The protobuf message\n throw_on_error(bool): True if raise an error when the protobuf message\n is not initialized.\n\n Returns(str): The debug string of the protobuf message\n\n \"\"\"\n error_fields = list()\n if not proto.IsInitialized(error_fields) and throw_on_error:\n raise ValueError(\"{0} are not initialized.\\nThe message is {1}:\\n\".\n format(error_fields, proto))\n return proto.__str__()\n\n\ndef _varbase_creator(type=core.VarDesc.VarType.LOD_TENSOR,\n name=None,\n shape=None,\n dtype=None,\n persistable=None,\n **kwargs):\n if dtype is not None:\n if not isinstance(dtype, core.VarDesc.VarType):\n dtype = convert_np_dtype_to_dtype_(dtype)\n\n return core.VarBase(dtype if dtype else core.VarDesc.VarType.FP32,\n list(shape) if shape else [], name, type\n if type else core.VarDesc.VarType.LOD_TENSOR, True\n if persistable else False)\n\n\nclass VariableMetaClass(type):\n @classmethod\n def __instancecheck__(cls, instance):\n t = type(instance)\n if in_dygraph_mode():\n return issubclass(t, core.VarBase)\n else:\n return issubclass(t, Variable)\n\n\nclass ParameterMetaClass(VariableMetaClass):\n @classmethod\n def __instancecheck__(cls, instance):\n t = type(instance)\n if in_dygraph_mode():\n return issubclass(t, ParamBase)\n else:\n return issubclass(t, Parameter)\n\n\ndef _getitem_impl_(var, item):\n \"\"\"\n Slice the variable.\n\n Args:\n item(int/slice/tuple) : the index.\n\n Returns:\n Sliced variable\n \"\"\"\n\n if not isinstance(item, tuple):\n item = [item]\n\n decrease_axis = []\n slice_axis = []\n slice_start = []\n slice_end = []\n slice_step = []\n use_strided_slice = False\n reverse_axis = []\n target_block = default_main_program().current_block()\n\n def fill_constant(shape, value, force_cpu=False, out=None):\n var.block.append_op(\n type='fill_constant',\n inputs={},\n outputs={'Out': [out]},\n attrs={\n 'shape': shape,\n 'dtype': out.dtype,\n 'value': float(value),\n 'force_cpu': force_cpu\n })\n out.stop_gradient = True\n return out\n\n for dim, slice_item in enumerate(item):\n if isinstance(slice_item, slice):\n start = slice_item.start\n end = slice_item.stop\n step = slice_item.step\n\n if start is None and end is None and step is None:\n continue\n\n if step is None:\n step = 1\n\n if start is None and end is None:\n assert (step == -1)\n reverse_axis.append(dim)\n continue\n\n if start is None:\n start = 0\n\n if end is None:\n end = 10000000\n\n if step != 1:\n use_strided_slice = True\n\n slice_axis.append(dim)\n slice_start.append(start)\n slice_end.append(end)\n slice_step.append(step)\n else:\n decrease_axis.append(dim)\n slice_axis.append(dim)\n slice_start.append(slice_item)\n slice_step.append(1)\n if isinstance(slice_item, Variable):\n temp_1 = var.block.create_var(dtype=slice_item.dtype)\n fill_constant([1], 1, force_cpu=True, out=temp_1)\n temp_end = target_block.create_var(dtype=slice_item.dtype)\n target_block.append_op(\n type='elementwise_add',\n inputs={'X': slice_item,\n 'Y': temp_1},\n outputs={'Out': temp_end},\n attrs={'axis': -1})\n slice_end.append(temp_end)\n else:\n slice_end.append(slice_item + 1\n if slice_item != -1 else 10000000)\n\n def contain_var(one_list):\n for ele in one_list:\n if isinstance(ele, Variable):\n return True\n return False\n\n def get_new_list_tensor(old_list):\n new_list_tensor = []\n for dim in old_list:\n if isinstance(dim, Variable):\n dim.stop_gradient = True\n new_list_tensor.append(dim)\n else:\n assert (isinstance(dim, int))\n temp_out = var.block.create_var(dtype='int64')\n fill_constant([1], dim, force_cpu=True, out=temp_out)\n new_list_tensor.append(temp_out)\n return new_list_tensor\n\n inputs = {'Input': [var]}\n attrs = {\n 'axes': slice_axis,\n 'starts': [],\n 'ends': [],\n 'decrease_axis': decrease_axis\n }\n if (use_strided_slice == True):\n attrs['strides'] = []\n infer_flags = list(1 for i in range(len(slice_axis)))\n\n # starts\n if contain_var(slice_start):\n inputs['StartsTensorList'] = get_new_list_tensor(slice_start)\n for i, dim in enumerate(slice_start):\n if isinstance(dim, Variable):\n attrs['starts'].append(-1)\n infer_flags[i] = -1\n else:\n attrs['starts'].append(dim)\n else:\n attrs['starts'] = slice_start\n\n # ends\n if contain_var(slice_end):\n inputs['EndsTensorList'] = get_new_list_tensor(slice_end)\n for i, dim in enumerate(slice_end):\n if isinstance(dim, Variable):\n attrs['ends'].append(-1)\n infer_flags[i] = -1\n else:\n attrs['ends'].append(dim)\n else:\n attrs['ends'] = slice_end\n\n # strides\n if use_strided_slice == True:\n if contain_var(slice_step):\n inputs['StridesTensorList'] = get_new_list_tensor(slice_step)\n for i, dim in enumerate(slice_step):\n if isinstance(dim, Variable):\n attrs['strides'].append(-1)\n infer_flags[i] = -1\n else:\n attrs['strides'].append(dim)\n else:\n attrs['strides'] = slice_step\n # infer_flags\n attrs['infer_flags'] = infer_flags\n\n out = var\n if use_strided_slice == False and len(slice_axis) > 0:\n # append slice_op here\n slice_out_var = target_block.create_var(\n name=unique_name.generate_with_ignorable_key(var.name + \"_slice\"),\n dtype=var.dtype)\n\n target_block.append_op(\n type=\"slice\",\n inputs=inputs,\n outputs={'Out': [slice_out_var]},\n attrs=attrs)\n\n out = slice_out_var\n elif use_strided_slice == True and len(slice_axis) > 0:\n strided_slice_out_var = target_block.create_var(\n name=unique_name.generate_with_ignorable_key(var.name +\n \"_strided_slice\"),\n dtype=var.dtype)\n target_block.append_op(\n type=\"strided_slice\",\n inputs=inputs,\n outputs={'Out': [strided_slice_out_var]},\n attrs=attrs)\n\n out = strided_slice_out_var\n\n if len(reverse_axis) > 0:\n reverse_out_var = target_block.create_var(\n name=unique_name.generate_with_ignorable_key(var.name +\n \"_slice_reverse\"),\n dtype=var.dtype)\n target_block.append_op(\n type=\"reverse\",\n inputs={'X': out},\n outputs={'Out': [reverse_out_var]},\n attrs={'axis': reverse_axis})\n\n out = reverse_out_var\n\n return out\n\n\[email protected]_metaclass(VariableMetaClass)\nclass Variable(object):\n \"\"\"\n **Notes**:\n **The constructor of Variable should not be invoked directly.**\n\n **In Static Graph Mode: Please use** `Block.create_var` **to create a Static variable which has no data until being feed.**\n\n **In Dygraph Mode: Please use** :ref:`api_fluid_dygraph_to_variable` **to create a dygraph variable with real data**\n\n In Fluid, every input and output of an OP is a variable. In most\n cases, variables are used for holding different kinds of data or training\n labels. A variable belongs to a :ref:`api_guide_Block_en` . All variable has its own name and\n two variables in different :ref:`api_guide_Block_en` could have the same name.\n\n There are many kinds of variables. Each kind of them has its own attributes\n and usages. Please refer to the `framework.proto <https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/fluid/framework/framework.proto>`_ for details.\n\n Most of a Variable's member variables can be set to be None. It mean\n it is not available or will be specified later.\n\n Examples:\n In Static Graph Mode:\n\n .. code-block:: python\n\n import paddle.fluid as fluid\n cur_program = fluid.Program()\n cur_block = cur_program.current_block()\n new_variable = cur_block.create_var(name=\"X\",\n shape=[-1, 23, 48],\n dtype='float32')\n In `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ Mode:\n\n .. code-block:: python\n\n import paddle.fluid as fluid\n import numpy as np\n\n with fluid.dygraph.guard():\n new_variable = fluid.dygraph.to_variable(np.arange(10))\n\n \"\"\"\n\n def __init__(self,\n block,\n type=core.VarDesc.VarType.LOD_TENSOR,\n name=None,\n shape=None,\n dtype=None,\n lod_level=None,\n capacity=None,\n persistable=None,\n error_clip=None,\n stop_gradient=False,\n is_data=False,\n need_check_feed=False,\n belong_to_optimizer=False,\n **kwargs):\n self.block = block\n if name is None:\n name = unique_name.generate('_generated_var')\n\n if dtype is not None:\n if not isinstance(dtype, core.VarDesc.VarType):\n dtype = convert_np_dtype_to_dtype_(dtype)\n\n self.belong_to_optimizer = belong_to_optimizer\n\n self.error_clip = error_clip\n\n is_new_var = False\n name = cpt.to_text(name)\n self.desc = self.block.desc.find_var(cpt.to_bytes(name))\n\n if self.desc is None:\n self.desc = self.block.desc.var(cpt.to_bytes(name))\n is_new_var = True\n\n if is_new_var:\n self.desc.set_type(type)\n elif self.desc.type() != type:\n raise ValueError(\"Variable '{0}' has been created before. The \"\n \"previous type is {1}, the new type is {2}. They\"\n \" are not matched\".format(self.name,\n self.desc.type(), type))\n\n if shape is not None:\n if is_new_var:\n self.desc.set_shape(shape)\n else:\n old_shape = self.shape\n shape = tuple(shape)\n if shape != old_shape:\n raise ValueError(\n \"Variable '{0}' has been created before. The previous \"\n \"shape is {1}, the new shape is {2}. They are not \"\n \"matched.\".format(self.name, old_shape, shape))\n if dtype is not None:\n if is_new_var:\n self.desc.set_dtype(dtype)\n else:\n old_dtype = self.dtype\n if dtype != old_dtype:\n raise ValueError(\"Variable '{0}' has been created before. \"\n \"The previous data type is {1}, the new \"\n \"data type is {2}. They are not \"\n \"matched.\".format(self.name, old_dtype,\n dtype))\n\n if lod_level is not None:\n if is_new_var:\n self.desc.set_lod_level(lod_level)\n else:\n if lod_level != self.lod_level:\n raise ValueError(\"Variable '{0}' has been created before. \"\n \"The previous lod_level is {1}, the new \"\n \"lod_level is {2}. They are not \"\n \"matched\".format(self.name, self.lod_level,\n lod_level))\n if persistable is not None:\n if is_new_var:\n self.desc.set_persistable(persistable)\n else:\n if persistable != self.persistable:\n raise ValueError(\n \"Variable '{0}' has been created before.\"\n \"The previous persistable is {1}, the new \"\n \"persistable is {2}. They are not matched\".format(\n self.name, self.persistable, persistable))\n\n if need_check_feed and is_new_var:\n self.desc.set_need_check_feed(need_check_feed)\n\n if capacity is not None:\n if is_new_var:\n self.desc.set_capacity(capacity)\n else:\n # TODO(abhinavarora) : Compare with set capacity once,\n # get_capacity is implemented\n pass\n\n self.block.vars[name] = self\n self.op = None\n self._stop_gradient = stop_gradient\n self.is_data = is_data\n\n @fake_interface_only\n def detach(self):\n \"\"\"\n **Notes**:\n **This API is ONLY available in Dygraph mode**\n\n Returns a new Variable, detached from the current graph.\n\n Returns:\n ( :ref:`api_guide_Variable_en` | dtype is same as current Variable): The detached Variable.\n\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n from paddle.fluid.dygraph.base import to_variable\n from paddle.fluid.dygraph import Linear\n import numpy as np\n\n data = np.random.uniform(-1, 1, [30, 10, 32]).astype('float32')\n with fluid.dygraph.guard():\n linear = Linear(32, 64)\n data = to_variable(data)\n x = linear(data)\n y = x.detach()\n\n \"\"\"\n pass\n\n @fake_interface_only\n def numpy(self):\n \"\"\"\n **Notes**:\n **This API is ONLY available in Dygraph mode**\n\n Returns a numpy array shows the value of current :ref:`api_guide_Variable_en`\n\n Returns:\n ndarray: The numpy value of current Variable.\n\n Returns type:\n ndarray: dtype is same as current Variable\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n from paddle.fluid.dygraph.base import to_variable\n from paddle.fluid.dygraph import Linear\n import numpy as np\n\n data = np.random.uniform(-1, 1, [30, 10, 32]).astype('float32')\n with fluid.dygraph.guard():\n linear = Linear(32, 64)\n data = to_variable(data)\n x = linear(data)\n print(x.numpy())\n\n \"\"\"\n pass\n\n @fake_interface_only\n def set_value(self, value):\n \"\"\"\n **Notes**:\n **This API is ONLY available in Dygraph mode**\n\n Set a new value for this Variable.\n\n Args:\n value (Variable|np.ndarray): the new value.\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n from paddle.fluid.dygraph.base import to_variable\n from paddle.fluid.dygraph import Linear\n import numpy as np\n\n data = np.ones([3, 1024], dtype='float32')\n with fluid.dygraph.guard():\n linear = fluid.dygraph.Linear(1024, 4)\n t = to_variable(data)\n linear(t) # call with default weight\n custom_weight = np.random.randn(1024, 4).astype(\"float32\")\n linear.weight.set_value(custom_weight) # change existing weight\n out = linear(t) # call with different weight\n\n \"\"\"\n pass\n\n @fake_interface_only\n def backward(self, retain_graph=False):\n \"\"\"\n **Notes**:\n **This API is ONLY available in Dygraph mode**\n\n Run backward of current Graph which starts from current Tensor.\n\n Args:\n retain_graph(bool, optional): If False, the graph used to compute grads will be freed. If you would\n like to add more ops to the built graph after calling this method( :code:`backward` ), set the parameter\n :code:`retain_graph` to True, then the grads will be retained. Thus, seting it to False is much more memory-efficient.\n Defaults to False.\n\n Returns:\n NoneType: None\n\n Examples:\n .. code-block:: python\n\n import numpy as np\n import paddle\n paddle.disable_static()\n\n x = np.ones([2, 2], np.float32)\n inputs = []\n for _ in range(10):\n tmp = paddle.to_tensor(x)\n # if we don't set tmp's stop_gradient as False then, all path to loss will has no gradient since\n # there is no one need gradient on it.\n tmp.stop_gradient=False\n inputs.append(tmp)\n ret = paddle.add_n(inputs)\n loss = paddle.sum(ret)\n loss.backward()\n\n \"\"\"\n pass\n\n @fake_interface_only\n def gradient(self):\n \"\"\"\n **Notes**:\n **This API is ONLY available in Dygraph mode**\n\n Get the Gradient of Current Variable\n\n Returns:\n ndarray or tuple of ndarray: if Variable's type is LoDTensor, return numpy value of the gradient of current Variable, if Variable's type is SelectedRows, return tuple of ndarray, first element of tuple is numpy value of the gradient of current Variable, second element of tuple is numpy value of the rows of current Variable.\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n import numpy as np\n\n # example1: return ndarray\n x = np.ones([2, 2], np.float32)\n with fluid.dygraph.guard():\n inputs2 = []\n for _ in range(10):\n tmp = fluid.dygraph.base.to_variable(x)\n tmp.stop_gradient=False\n inputs2.append(tmp)\n ret2 = fluid.layers.sums(inputs2)\n loss2 = fluid.layers.reduce_sum(ret2)\n loss2.backward()\n print(loss2.gradient())\n\n # example2: return tuple of ndarray\n with fluid.dygraph.guard():\n embedding = fluid.dygraph.Embedding(\n size=[20, 32],\n param_attr='emb.w',\n is_sparse=True)\n x_data = np.arange(12).reshape(4, 3).astype('int64')\n x_data = x_data.reshape((-1, 3, 1))\n x = fluid.dygraph.base.to_variable(x_data)\n out = embedding(x)\n out.backward()\n print(embedding.weight.gradient())\n\n \"\"\"\n pass\n\n @fake_interface_only\n def clear_gradient(self):\n \"\"\"\n **Notes**:\n **1. This API is ONLY available in Dygraph mode**\n\n **2. Use it only Variable has gradient, normally we use this for Parameters since other temporal Variable will be deleted by Python's GC**\n\n Clear (set to ``0`` ) the Gradient of Current Variable\n\n Returns: None\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n import numpy as np\n\n x = np.ones([2, 2], np.float32)\n with fluid.dygraph.guard():\n inputs2 = []\n for _ in range(10):\n tmp = fluid.dygraph.base.to_variable(x)\n tmp.stop_gradient=False\n inputs2.append(tmp)\n ret2 = fluid.layers.sums(inputs2)\n loss2 = fluid.layers.reduce_sum(ret2)\n loss2.backward()\n print(loss2.gradient())\n loss2.clear_gradient()\n print(\"After clear {}\".format(loss2.gradient()))\n\n \"\"\"\n pass\n\n def __str__(self):\n return self._to_readable_code()\n\n def _to_readable_code(self):\n \"\"\"\n Get readable debug string of Variable.\n\n .. note::\n If you want to get the debug string in protobuf format,\n please use :code:`to_string` method.\n\n Returns:\n string: The formatted Variable string.\n\n Examples:\n .. code-block:: python\n\n import paddle\n import paddle.static as static\n\n paddle.enable_static()\n\n cur_program = static.Program()\n cur_block = cur_program.current_block()\n new_variable = cur_block.create_var(name=\"X\",\n shape=[-1, 23, 48],\n dtype='float32')\n print(new_variable._to_readable_code())\n \"\"\"\n # VarType.LOD_TENSOR -> LOD_TENSOR\n type_str = str(self.type).split('.')[1]\n if self.type == core.VarDesc.VarType.SELECTED_ROWS or self.type == core.VarDesc.VarType.LOD_TENSOR:\n dtype_str = str(self.dtype).split('.')[1]\n var_str = \"{name} : {type}.shape{shape}.dtype({dtype}).stop_gradient({stop_gradient})\".\\\n format(name=self.name, type=type_str, shape=self.shape,\n dtype=dtype_str, stop_gradient=self.stop_gradient)\n else:\n var_str = \"{name} : {type})\".\\\n format(name=self.name, type=type_str)\n\n if type(self) == Parameter:\n if self.trainable:\n var_str = \"trainable param \" + var_str\n else:\n var_str = \"param \" + var_str\n else:\n var_str = \"var \" + var_str\n\n if self.persistable:\n var_str = \"persist \" + var_str\n\n return var_str\n\n def to_string(self, throw_on_error, with_details=False):\n \"\"\"\n Get debug string.\n\n Args:\n\n throw_on_error (bool): True if raise an exception when self is not initialized.\n\n with_details (bool): more details about variables and parameters (e.g. trainable, optimize_attr, ...) will be printed when with_details is True. Default value is False;\n\n Returns:\n str: The debug string.\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n import paddle\n\n paddle.enable_static()\n cur_program = fluid.Program()\n cur_block = cur_program.current_block()\n new_variable = cur_block.create_var(name=\"X\",\n shape=[-1, 23, 48],\n dtype='float32')\n print(new_variable.to_string(True))\n print(\"=============with detail===============\")\n print(new_variable.to_string(True, True))\n \"\"\"\n assert isinstance(throw_on_error, bool) and isinstance(with_details,\n bool)\n protostr = self.desc.serialize_to_string()\n proto = framework_pb2.VarDesc.FromString(six.binary_type(protostr))\n res_str = _debug_string_(proto, throw_on_error)\n if with_details:\n additional_attr = (\"error_clip\", \"stop_gradient\")\n for attr_name in additional_attr:\n res_str += \"%s: %s\\n\" % (attr_name,\n cpt.to_text(getattr(self, attr_name)))\n\n return res_str\n\n __repr__ = __str__\n\n @property\n def stop_gradient(self):\n \"\"\"\n Indicating if we stop gradient from current Variable\n\n **Notes: This Property has default value as** ``True`` **in** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **mode, while Parameter's default value is False. However, in Static Graph Mode all Variable's default stop_gradient value is** ``False``\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n import numpy as np\n\n with fluid.dygraph.guard():\n value0 = np.arange(26).reshape(2, 13).astype(\"float32\")\n value1 = np.arange(6).reshape(2, 3).astype(\"float32\")\n value2 = np.arange(10).reshape(2, 5).astype(\"float32\")\n linear = fluid.Linear(13, 5, dtype=\"float32\")\n linear2 = fluid.Linear(3, 3, dtype=\"float32\")\n a = fluid.dygraph.to_variable(value0)\n b = fluid.dygraph.to_variable(value1)\n c = fluid.dygraph.to_variable(value2)\n out1 = linear(a)\n out2 = linear2(b)\n out1.stop_gradient = True\n out = fluid.layers.concat(input=[out1, out2, c], axis=1)\n out.backward()\n\n assert linear.weight.gradient() is None\n assert (out1.gradient() == 0).all()\n \"\"\"\n return self._stop_gradient\n\n @stop_gradient.setter\n def stop_gradient(self, s):\n self._stop_gradient = s\n\n @property\n def persistable(self):\n \"\"\"\n Indicating if we current Variable should be long-term alive\n\n\n **Notes: This Property will be deprecated and this API is just to help user understand concept**\n\n **1. All Variable's persistable is** ``False`` **except Parameters.**\n\n **2. In** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **mode, this property should not be changed**\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n cur_program = fluid.Program()\n cur_block = cur_program.current_block()\n new_variable = cur_block.create_var(name=\"X\",\n shape=[-1, 23, 48],\n dtype='float32')\n print(\"persistable of current Var is: {}\".format(new_variable.persistable))\n \"\"\"\n return self.desc.persistable()\n\n @persistable.setter\n def persistable(self, p):\n self.desc.set_persistable(p)\n\n @property\n def name(self):\n \"\"\"\n Indicating name of current Variable\n\n **Notes: If it has two or more Varaible share the same name in the same** :ref:`api_guide_Block_en` **, it means these Variable will share content in no-** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **mode. This is how we achieve Parameter sharing**\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n cur_program = fluid.Program()\n cur_block = cur_program.current_block()\n new_variable = cur_block.create_var(name=\"X\",\n shape=[-1, 23, 48],\n dtype='float32')\n print(\"name of current Var is: {}\".format(new_variable.name))\n \"\"\"\n return cpt.to_text(self.desc.name())\n\n @property\n def grad_name(self):\n \"\"\"\n Indicating name of the gradient Variable of current Variable.\n\n **Notes: This is a read-only property. It simply returns name of\n gradient Variable from a naming convention but doesn't guarantee\n the gradient exists.**\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n\n x = fluid.data(name=\"x\", shape=[-1, 23, 48], dtype='float32')\n print(x.grad_name) # output is \"x@GRAD\"\n\n \"\"\"\n return self.name + \"@GRAD\"\n\n @name.setter\n def name(self, new_name):\n self.desc.set_name(new_name)\n\n @property\n def shape(self):\n \"\"\"\n Indicating shape of current Variable\n\n **Notes: This is a read-only property**\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n cur_program = fluid.Program()\n cur_block = cur_program.current_block()\n new_variable = cur_block.create_var(name=\"X\",\n shape=[-1, 23, 48],\n dtype='float32')\n print(\"shape of current Var is: {}\".format(new_variable.shape))\n\n \"\"\"\n # convert to tuple, make it as same as numpy API.\n return tuple(self.desc.shape())\n\n @property\n def dtype(self):\n \"\"\"\n Indicating data type of current Variable\n\n **Notes: This is a read-only property**\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n cur_program = fluid.Program()\n cur_block = cur_program.current_block()\n new_variable = cur_block.create_var(name=\"X\",\n shape=[-1, 23, 48],\n dtype='float32')\n print(\"Dtype of current Var is: {}\".format(new_variable.dtype))\n \"\"\"\n return self.desc.dtype()\n\n @property\n def lod_level(self):\n \"\"\"\n Indicating ``LoD`` info of current Variable, please refer to :ref:`api_fluid_LoDTensor_en` to check the meaning\n of ``LoD``\n\n **Notes**:\n\n **1. This is a read-only property**\n\n **2. Don't support this property in** `Dygraph <../../user_guides/howto/dygraph/DyGraph.html>`_ **mode, it's value should be** ``0(int)``\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n cur_program = fluid.Program()\n cur_block = cur_program.current_block()\n new_variable = cur_block.create_var(name=\"X\",\n shape=[-1, 23, 48],\n dtype='float32')\n print(\"LoD Level of current Var is: {}\".format(new_variable.lod_level))\n \"\"\"\n if self.type == core.VarDesc.VarType.SELECTED_ROWS:\n raise Exception(\"SelectedRows DO NOT supprt lod\")\n\n return self.desc.lod_level()\n\n @property\n def type(self):\n \"\"\"\n Indicating Type of current Variable\n\n **Notes: This is a read-only property**\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n cur_program = fluid.Program()\n cur_block = cur_program.current_block()\n new_variable = cur_block.create_var(name=\"X\",\n shape=[-1, 23, 48],\n dtype='float32')\n print(\"Type of current Var is: {}\".format(new_variable.type))\n \"\"\"\n return self.desc.type()\n\n def clone(self):\n \"\"\"\n Returns a new static Variable, which is the clone of the original static\n Variable. It remains in the current graph, that is, the cloned Variable \n provides gradient propagation. Calling ``out = tensor.clone()`` is same\n as ``out = assign(tensor)`` .\n\n Returns:\n Variable: The cloned Variable.\n\n Examples:\n .. code-block:: python\n\n import paddle\n\n paddle.enable_static()\n\n # create a static Variable\n x = paddle.static.data(name='x', shape=[3, 2, 1])\n # create a cloned Variable\n y = x.clone()\n\n \"\"\"\n output = self.block.create_var(\n name=unique_name.generate_with_ignorable_key(self.name + \"_clone\"),\n dtype=self.dtype,\n type=self.type,\n persistable=self.persistable,\n stop_gradient=self.stop_gradient)\n\n self.block.append_op(\n type='assign', inputs={'X': [self]}, outputs={'Out': [output]})\n return output\n\n def _set_error_clip(self, error_clip):\n \"\"\"\n Set the error_clip.\n\n Args:\n error_clip(BaseErrorClipAttr) : The new error_clip.\n\n Returns:\n None\n \"\"\"\n self.error_clip = error_clip\n\n def _set_info(self, key, value):\n \"\"\"\n Set key-value information for this variable.\n\n Args:\n key(str): Key for this information.\n value(object): The value associated to the key.\n\n Returns: \n None\n \"\"\"\n if not hasattr(self, \"_info\"):\n self._info = {}\n self._info[key] = value\n\n def _get_info(self, key):\n \"\"\"\n Get the information of this variable corresponding to key.\n\n Args:\n key(str): Key for this information.\n\n Returns: \n object\n \"\"\"\n if hasattr(self, \"_info\") and key in self._info:\n return self._info[key]\n return None\n\n def _slice_indices(self, slice, length):\n \"\"\"\n Reference implementation for the slice.indices method.\n \"\"\"\n # Compute step and length as integers.\n step = 1 if slice.step is None else slice.step\n\n # Raise ValueError for negative length or zero step.\n if length < 0:\n raise ValueError(\"length should not be negative\")\n if step == 0:\n raise ValueError(\"slice step can not be zero\")\n\n # Find lower and upper bounds for start and stop.\n lower = -1 if step < 0 else 0\n upper = length - 1 if step < 0 else length\n\n # Compute start.\n if slice.start is None:\n start = upper if step < 0 else lower\n else:\n start = slice.start\n start = max(start + length, lower) if start < 0 else min(start,\n upper)\n\n # Compute stop.\n if slice.stop is None:\n stop = lower if step < 0 else upper\n else:\n stop = slice.stop\n stop = max(stop + length, lower) if stop < 0 else min(stop, upper)\n\n return start, stop, step\n\n def _detectEllipsis(self, item):\n has_ellipsis = False\n start = 0\n end = len(self.shape)\n for index, o in enumerate(item):\n if o is Ellipsis:\n if has_ellipsis:\n raise ValueError(\"Index can have one ellipsis only.\")\n has_ellipsis = True\n start = index\n else:\n if has_ellipsis:\n end = index\n return has_ellipsis, start, end\n\n def _reconstructSliceinfo(self, item):\n has_ellipsis, start, end = self._detectEllipsis(item)\n if has_ellipsis:\n newitem = []\n for i in range(start):\n newitem.append(item[i])\n for i in range(start, end):\n newitem.append(slice(None, None, None))\n for i in range(end, len(item)):\n newitem.append(item[i])\n return newitem\n else:\n return None\n\n def _detectContinuesSlice(self, item):\n starts = []\n ends = []\n for index, o in enumerate(item):\n if isinstance(o, int):\n start = int(o)\n if (index > 0 and index >= self.shape[index]) \\\n or (index < 0 and (index + self.shape[index]) < 0):\n raise IndexError(\"invalid index\")\n start = max(start + self.shape[index], 0) if start < 0 else min(\n start, self.shape[index])\n starts.append(start)\n ends.append(start + 1)\n elif isinstance(o, slice):\n start, stop, step = self._slice_indices(o, self.shape[index])\n if step == 1 or step == -1:\n starts.append(start)\n ends.append(stop)\n else:\n return False, None\n else:\n raise IndexError(\"Valid index accept int or slice or ellipsis\")\n return True, [starts, ends]\n\n def _cloneVar(self, copy=False):\n if not copy:\n return self.block.create_var(\n name=unique_name.generate_with_ignorable_key(self.name),\n dtype=self.dtype)\n else:\n return self\n\n def _sliceVar(self, axes, starts, ends):\n new_var = self._cloneVar()\n self.block.append_op(\n type=\"slice\",\n inputs={'Input': [self]},\n outputs={'Out': [new_var]},\n attrs={'axes': axes,\n 'starts': starts,\n 'ends': ends})\n return new_var\n\n def _concatVar(self, inputs, axis):\n new_var = self._cloneVar()\n self.block.append_op(\n type=\"concat\",\n inputs={'X': inputs},\n outputs={'Out': [new_var]},\n attrs={'axis': axis, })\n return new_var\n\n def _sliceAndConcatVar(self, item, axis):\n if isinstance(item, slice):\n if self.shape[axis] < 0:\n return self._cloneVar(True)\n start, stop, step = self._slice_indices(item, self.shape[axis])\n if step == 1:\n return self._sliceVar([axis], [start], [stop])\n else:\n vars = []\n if step > 0:\n while start < stop:\n vars.append(\n self._sliceVar([axis], [start], [start + 1]))\n start += step\n else:\n while start > stop:\n vars.append(\n self._sliceVar([axis], [start], [start + 1]))\n start += step\n return self._concatVar(vars, axis)\n elif isinstance(item, int):\n if self.shape[axis] < 0:\n return self._cloneVar(True)\n index = int(item)\n if (index > 0 and index >= self.shape[axis]) \\\n or (index < 0 and (index + self.shape[axis]) < 0):\n raise IndexError(\"invalid index\")\n return self._sliceVar([axis], [index], [index + 1])\n else:\n raise IndexError(\"Valid index accept int or slice or tuple\")\n\n def __getitem__(self, item):\n return _getitem_impl_(self, item)\n\n def __setitem__(self, item, value):\n inputs = {'Input': self}\n\n # 1. Parse item\n if not isinstance(item, tuple):\n item = [item]\n\n axes = []\n starts = []\n ends = []\n steps = []\n\n max_integer = sys.maxsize\n\n def replace_ellipsis(item):\n # Use slice(None) to replace Ellipsis.\n # For var, var.shape = [3,4,5,6]\n #\n # var[..., 1:2] -> var[:, :, :, 1:2]\n # var[0, ...] -> var[0]\n # var[0, ..., 1:2] -> var[0, :, :, 1:2]\n\n item = list(item)\n\n # Remove Variable to skip bug when counting Ellipsis\n item_remove_var = [\n ele for ele in item if not isinstance(ele, Variable)\n ]\n ell_count = item_remove_var.count(Ellipsis)\n if ell_count == 0:\n return item\n elif ell_count > 1:\n raise IndexError(\n \"An index can only have a single ellipsis ('...')\")\n\n ell_idx = item.index(Ellipsis)\n\n if ell_idx == len(item) - 1:\n return item[:-1]\n else:\n item[ell_idx:ell_idx + 1] = [slice(None)] * (\n len(self.shape) - len(item) + 1)\n\n return item\n\n item = replace_ellipsis(item)\n\n for dim, slice_item in enumerate(item):\n if isinstance(slice_item, slice):\n start = slice_item.start\n end = slice_item.stop\n step = slice_item.step\n\n if start is None and end is None and step is None:\n continue\n\n step = 1 if step is None else step\n\n # TODO: support cases when step < 1\n if not isinstance(step, Variable) and step == 0:\n raise ValueError(\n \"When assign a value to a paddle.Tensor, step can not be 0, \"\n \"but received step is {}.\".format(step))\n\n if isinstance(step, Variable) and (start is None or\n end is None):\n raise ValueError(\n \"When assign a value to a paddle.Tensor, it's not supported that \"\n \"the start or end is None when the type of step is paddle.Tensor.\"\n )\n\n if start is None:\n start = 0 if step > 0 else max_integer\n\n if end is None:\n end = max_integer if step > 0 else (0 - max_integer)\n else:\n start = slice_item\n end = slice_item + 1 if slice_item != -1 else max_integer\n step = 1\n axes.append(dim)\n starts.append(start)\n ends.append(end)\n steps.append(step)\n\n attrs = {'axes': axes, 'starts': starts, 'ends': ends, 'steps': steps}\n\n from .layers import utils\n if utils._contain_var(starts):\n inputs['StartsTensorList'] = utils._convert_to_tensor_list(starts)\n del attrs['starts']\n if utils._contain_var(ends):\n inputs['EndsTensorList'] = utils._convert_to_tensor_list(ends)\n del attrs['ends']\n if utils._contain_var(steps):\n inputs['StepsTensorList'] = utils._convert_to_tensor_list(steps)\n del attrs['steps']\n\n # 2. Parse value\n dtype = self.dtype\n attrs['dtype'] = dtype\n\n from .data_feeder import convert_dtype\n # 2.1 value is an integer of float\n if isinstance(value, (int, float)):\n value = np.array([value]).astype(convert_dtype(dtype))\n\n # 2.2 value is a np.ndarray\n if isinstance(value, np.ndarray):\n shape = list(value.shape)\n if dtype == core.VarDesc.VarType.BOOL:\n value_name = \"bool_values\"\n values = [bool(v) for v in value.flat]\n elif dtype == core.VarDesc.VarType.FP32:\n value_name = \"fp32_values\"\n values = [float(v) for v in value.flat]\n elif dtype == core.VarDesc.VarType.FP64:\n value_name = \"fp64_values\"\n values = [float(v) for v in value.flat]\n elif dtype == core.VarDesc.VarType.INT32:\n value_name = \"int32_values\"\n values = [int(v) for v in value.flat]\n elif dtype == core.VarDesc.VarType.INT64:\n value_name = \"int64_values\"\n values = [int(v) for v in value.flat]\n else:\n raise TypeError(\n \"When assign a numpy.ndarray, integer or float to a paddle.Tensor, \"\n \"the data type of the paddle.Tensor must be bool, float32, int32 or int64, but \"\n \"received %s.\" % convert_dtype(dtype))\n attrs[value_name] = values\n attrs[\"shape\"] = shape\n\n elif isinstance(value, Variable):\n inputs[\"ValueTensor\"] = value\n else:\n raise TypeError(\n \"Only support to assign an integer, float, numpy.ndarray or \"\n \"paddle.Tensor to a paddle.Tensor, but received {}\".format(\n type(value)))\n\n self.block.append_op(\n type=\"set_value\", inputs=inputs, outputs={'Out': self}, attrs=attrs)\n\n return self\n\n\ndef get_all_op_protos():\n \"\"\"\n Get all registered op proto from PaddlePaddle C++ end.\n\n Returns:\n list: list of OpProto.\n \"\"\"\n protostrs = core.get_all_op_protos()\n ret_values = []\n for pbstr in protostrs:\n op_proto = framework_pb2.OpProto.FromString(six.binary_type(pbstr))\n ret_values.append(op_proto)\n return ret_values\n\n\nclass OpProtoHolder(object):\n \"\"\"\n A global variable to hold all OpProtos from C++ as a map\n \"\"\"\n\n @classmethod\n def instance(cls):\n if not hasattr(cls, '_instance'):\n cls._instance = cls()\n return cls._instance\n\n def __init__(self):\n assert not hasattr(\n self.__class__,\n '_instance'), 'Please use `instance()` to get OpProtoHolder object!'\n op_protos = get_all_op_protos()\n self.op_proto_map = {}\n for proto in op_protos:\n self.op_proto_map[proto.type] = proto\n\n def get_op_proto(self, type):\n \"\"\"\n Get OpProto by a type string.\n Args:\n type(str): The type that operator registered in C++ side.\n\n Returns(framework_pb2.OpProto): The OpProto\n\n \"\"\"\n if type not in self.op_proto_map:\n raise ValueError(\"Operator \\\"%s\\\" has not been registered.\" % type)\n return self.op_proto_map[type]\n\n def update_op_proto(self):\n op_protos = get_all_op_protos()\n custom_op_names = []\n for proto in op_protos:\n if proto.type not in self.op_proto_map:\n self.op_proto_map[proto.type] = proto\n custom_op_names.append(proto.type)\n\n return custom_op_names\n\n @staticmethod\n def generated_op_attr_names():\n return {\n core.op_proto_and_checker_maker.kOpRoleAttrName(),\n core.op_proto_and_checker_maker.kOpRoleVarAttrName(),\n core.op_proto_and_checker_maker.kOpNameScopeAttrName(),\n core.op_proto_and_checker_maker.kOpCreationCallstackAttrName(),\n core.op_proto_and_checker_maker.kOpDeviceAttrName()\n }\n\n\nclass Operator(object):\n \"\"\"\n In Fluid, all the operation are represented by Operator, and Operator\n is regarded as a build in an instruction of a Block. Users can use the\n build in instructions to describe their neural network.\n\n Args:\n block(Block): The block has the current operator.\n desc(core.OpDesc): The protobuf description of Operator.\n type(str): The type of operator. Default None.\n inputs(dict): The input of this Operator. it is a dictionary, for every\n element, key is the input parameter name, and value is a list of\n variables. Default None.\n outputs(dict): The output of this Operator. it is a dictionary, for\n every element, key is the input parameter name, and value is a list\n of variables. Default None.\n attrs(dict): The attributes of this Operator. it is a dictionary, for\n every element, key is attribute name, and value is the attribute value.\n The attribute type should be as same as the type registered in C++ side.\n Default None.\n\n Returns:\n Operator: The initialized Operator.\n\n Raises:\n ValueError: If the passed input, output and attrs doesn't match the\n initializing Operator's that registered in C++ side.\n\n Notes:\n The constructor of operator should not be invoked directly. Use\n Block.append_op or Block._prepend_op instead.\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n cur_program = fluid.Program()\n cur_block = cur_program.current_block()\n # var1 += var2 + var3\n cur_block.append_op(type=\"sum\",\n inputs={\"X\": [var1, var2, var3]},\n outputs={\"Out\": [var1]})\n \"\"\"\n OP_WITHOUT_KERNEL_SET = {\n 'feed', 'fetch', 'recurrent', 'go', 'rnn_memory_helper_grad',\n 'conditional_block', 'while', 'send', 'recv', 'listen_and_serv',\n 'fl_listen_and_serv', 'ncclInit', 'select', 'checkpoint_notify',\n 'gen_bkcl_id', 'c_gen_bkcl_id', 'gen_nccl_id', 'c_gen_nccl_id',\n 'c_comm_init', 'c_sync_calc_stream', 'c_sync_comm_stream',\n 'queue_generator', 'dequeue', 'enqueue', 'heter_listen_and_serv',\n 'c_wait_comm', 'c_wait_compute'\n }\n\n def __init__(self,\n block,\n desc,\n type=None,\n inputs=None,\n outputs=None,\n attrs=None):\n if in_dygraph_mode():\n if type is None:\n raise ValueError(\n \"`type` to initialized an Operator can not be None.\")\n self._type = type\n self.attrs = attrs if attrs else {}\n else:\n self.block = block\n self.desc = desc\n # note: not add self.attrs here:\n # https://github.com/PaddlePaddle/Paddle/pull/12583#pullrequestreview-145093173\n op_attrs = attrs\n if op_attrs is None:\n op_attrs = dict()\n del attrs\n\n op_maker = core.op_proto_and_checker_maker\n\n if op_maker.kOpRoleAttrName() not in op_attrs:\n op_attrs[op_maker.kOpRoleAttrName(\n )] = self.block.program._op_role\n\n role_var_name = op_maker.kOpRoleVarAttrName()\n if len(self.block.program.\n _op_role_var) != 0 and role_var_name not in op_attrs:\n op_attrs[role_var_name] = self.block.program._op_role_var\n\n if role_var_name in op_attrs and len(op_attrs[role_var_name]) == 0:\n del op_attrs[role_var_name]\n\n if len(self.desc.type()) != 0:\n return\n if type is None:\n raise ValueError(\n \"`type` to initialized an Operator can not be None.\")\n else:\n callstack_var_name = op_maker.kOpCreationCallstackAttrName()\n op_attrs[callstack_var_name] = []\n for frame in traceback.extract_stack():\n op_attrs[callstack_var_name].append(\n ' File \"{}\", line {}, in {}'.format(frame[0], frame[1],\n frame[2]))\n op_attrs[callstack_var_name].append(' {}'.format(frame[\n 3]))\n\n self.desc.set_type(type)\n proto = OpProtoHolder.instance().get_op_proto(type)\n\n namescope_var_name = op_maker.kOpNameScopeAttrName()\n op_attrs[namescope_var_name] = _full_name_scope()\n\n # set device for op with kernels, give warning for op without kernels\n # when force_cpu and device_guard are used at the same time, a warning will be given.\n # TODO(zhangting2020): when force_cpu is removed, clear warning below.\n if _current_device is not None:\n if self._has_kernel(type):\n op_device = op_maker.kOpDeviceAttrName()\n op_attrs[op_device] = _current_device\n else:\n warnings.warn(\"The Op(%s) is not support to set device.\" %\n type)\n if 'force_cpu' in op_attrs:\n if (type is 'less_than' and op_attrs['force_cpu'] != None\n ) or op_attrs['force_cpu'] != False:\n warnings.warn(\n \"The Attr(force_cpu) of Op(%s) will be deprecated in the future, \"\n \"please use 'device_guard' instead. 'device_guard' has higher priority when they are \"\n \"used at the same time.\" % type)\n\n def find_name(var_list, name):\n for var_name in var_list:\n if var_list[var_name] is not None and var_name == name:\n return True\n return False\n\n if inputs is not None:\n for in_proto in proto.inputs:\n found = find_name(inputs, in_proto.name)\n assert found or in_proto.dispensable, \"Input {} not found\".format(\n in_proto.name)\n if found:\n in_args = inputs[in_proto.name]\n if not isinstance(in_args, (list, tuple)):\n in_args = [in_args]\n if not in_proto.duplicable and len(in_args) > 1:\n raise ValueError(\n \"Input %s expects only one input, but %d are given.\"\n % (in_proto.name, len(in_args)))\n in_arg_names = []\n for index, arg in enumerate(in_args):\n if isinstance(arg, six.string_types):\n in_arg_names.append(arg)\n elif isinstance(arg, six.binary_type):\n in_arg_names.append(arg.decode())\n elif isinstance(arg, (Variable, core.VarBase)):\n in_arg_names.append(cpt.to_text(arg.name))\n else:\n raise TypeError(\n \"The type of '%s' in operator %s should be \"\n \"one of [basestring(), str, Varibale] in python2, \"\n \"or one of [str, bytes, Variable] in python3.\"\n \"but received : %s\" %\n (in_proto.name, type, arg))\n self.desc.set_input(in_proto.name, in_arg_names)\n else:\n self.desc.set_input(in_proto.name, [])\n\n if outputs is not None:\n for m in proto.outputs:\n if (m.name not in outputs) and m.dispensable:\n continue\n if not ((m.name in outputs) or m.dispensable):\n raise ValueError((\"Incorrect setting for output(s) of \"\n \"operator \\\"%s\\\", should set: [%s].\")\n % (type, m.name))\n for out_proto in proto.outputs:\n if out_proto.name not in outputs:\n continue\n out_args = outputs[out_proto.name]\n if not isinstance(out_args, list):\n out_args = [out_args]\n if not out_proto.duplicable and len(out_args) > 1:\n raise ValueError(\n \"Output %s expects only one output, but %d are given.\"\n % (out_proto.name, len(out_args)))\n out_arg_names = []\n for arg in out_args:\n if isinstance(arg, six.string_types):\n out_arg_names.append(arg)\n else:\n out_arg_names.append(cpt.to_text(arg.name))\n # TODO(minqiyang): could we remove variable's op in static mode?\n if not in_dygraph_mode():\n if isinstance(arg, six.string_types):\n block.var(arg).op = self\n else:\n arg.op = self\n self.desc.set_output(out_proto.name, out_arg_names)\n\n if op_attrs is not None:\n if not isinstance(op_attrs, dict):\n raise TypeError(\"'attrs' should be a dict.\")\n for attr in proto.attrs:\n attr_name = attr.name\n if (attr_name not in op_attrs) or (\n op_attrs[attr_name] is None):\n continue\n attr_val = op_attrs[attr_name]\n self._update_desc_attr(attr_name, attr_val)\n\n self.desc.check_attrs()\n if self._has_kernel(type):\n self.desc.infer_var_type(self.block.desc)\n self.desc.infer_shape(self.block.desc)\n\n def _has_kernel(self, op_type):\n return op_type not in self.OP_WITHOUT_KERNEL_SET\n\n def to_string(self, throw_on_error):\n \"\"\"\n Get debug string.\n\n Args:\n throw_on_error(bool): Whether to raise exception if self is not\n initialized.\n\n Returns:\n str: The debug string.\n\n \"\"\"\n protostr = self.desc.serialize_to_string()\n proto = framework_pb2.OpDesc.FromString(six.binary_type(protostr))\n return _debug_string_(proto, throw_on_error)\n\n def _to_readable_code(self, skip_op_callstack=True):\n \"\"\"\n Get readable debug string of Operator.\n\n .. note::\n If you want to get the debug string in protobuf format,\n please use :code:`to_string` method.\n\n Args:\n skip_op_callstack(bool): whether to skip parsing Operator's attribute\n op_callstack, default value is True\n\n Returns:\n string: The formatted Operator string.\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n\n cur_program = fluid.Program()\n cur_block = cur_program.current_block()\n var = cur_block.create_var(name=\"X\",\n shape=[-1, 23, 48],\n dtype='float32')\n new_op = cur_block.append_op(type=\"abs\",\n inputs={\"X\": [var]},\n outputs={\"Out\": [var]})\n print(new_op._to_readable_code())\n \"\"\"\n assert isinstance(\n skip_op_callstack, bool\n ), \"skip_op_callstack parameter's type is error, expect bool, received %s\".format(\n type(skip_op_callstack))\n outputs_str = \"{\"\n for i in range(0, len(self.output_names)):\n outputs_str += \"{name}=\".format(name=self.output_names[i])\n o = self.output(self.output_names[i])\n outputs_str += \"{value}\".format(value=o)\n if i != len(self.output_names) - 1:\n outputs_str += \", \"\n outputs_str += \"}\"\n\n inputs_str = \"{\"\n for i in range(0, len(self.input_names)):\n inputs_str += \"{name}=\".format(name=self.input_names[i])\n o = self.input(self.input_names[i])\n inputs_str += \"{value}\".format(value=o)\n\n if i != len(self.input_names) - 1:\n inputs_str += \", \"\n inputs_str += \"}\"\n\n attr_names = sorted(self.attr_names)\n attrs_str = \"\"\n for i in range(0, len(attr_names)):\n name = attr_names[i]\n if skip_op_callstack and name == \"op_callstack\":\n continue\n\n attr_type = self.desc.attr_type(name)\n if attr_type == core.AttrType.BLOCK:\n a = \"{name} = block[{value}]\".format(\n name=name, type=attr_type, value=self._block_attr_id(name))\n attrs_str += a\n if i != len(attr_names) - 1:\n attrs_str += \", \"\n continue\n\n if attr_type == core.AttrType.BLOCKS:\n a = \"{name} = blocks{value}\".format(\n name=name,\n type=attr_type,\n value=self._blocks_attr_ids(name))\n attrs_str += a\n if i != len(attr_names) - 1:\n attrs_str += \", \"\n continue\n\n a = \"{name} = {value}\".format(\n name=name, type=attr_type, value=self.desc.attr(name))\n attrs_str += a\n if i != len(attr_names) - 1:\n attrs_str += \", \"\n\n if outputs_str != \"{}\":\n op_str = \"{outputs} = {op_type}(inputs={inputs}, {attrs})\".\\\n format(outputs=outputs_str, op_type=self.type,\n inputs=inputs_str, attrs=attrs_str)\n else:\n op_str = \"{op_type}(inputs={inputs}, {attrs})\".\\\n format(op_type=self.type, inputs=inputs_str, attrs=attrs_str)\n return op_str\n\n def __str__(self):\n return self._to_readable_code()\n\n __repr__ = __str__\n\n @property\n def type(self):\n return self.desc.type()\n\n def input(self, name):\n r\"\"\"\n Get the input arguments according to the input parameter name.\n\n Args:\n name(str): The input parameter name.\n\n Returns:\n list: return the list of argument names that associated with \\\n the specific parameter name.\n \"\"\"\n return self.desc.input(name)\n\n def _rename_input(self, old_name, new_name):\n \"\"\"\n Rename the `old_name` to `new_name`.\n\n Args:\n old_name(str): The old name of the Operator's input.\n new_name(str): The new name of the Operator's input.\n\n Returns:\n None\n \"\"\"\n self.desc._rename_input(old_name, new_name)\n\n def _rename_output(self, old_name, new_name):\n \"\"\"\n Rename the `old_name` to `new_name`.\n\n Args:\n old_name(str): The old name of the Operator's output.\n new_name(str): The new name of the Operator's output.\n\n Returns:\n None\n \"\"\"\n self.desc._rename_output(old_name, new_name)\n\n @property\n def input_names(self):\n return self.desc.input_names()\n\n @property\n def input_arg_names(self):\n return self.desc.input_arg_names()\n\n @property\n def output_arg_names(self):\n return self.desc.output_arg_names()\n\n def output(self, name):\n r\"\"\"\n Get output arguments by the output parameter name.\n\n Args:\n name(str): The output parameter name.\n\n Returns:\n list: return the list of argument names associated with \\\n the specific parameter name.\n \"\"\"\n return self.desc.output(name)\n\n @property\n def output_names(self):\n return self.desc.output_names()\n\n @property\n def idx(self):\n for i, op in enumerate(self.block.ops):\n if op == self:\n return i\n raise ValueError(\n \"Can't find op itself in it's block. It could be a bug of Paddle.\")\n\n def has_attr(self, name):\n \"\"\"\n Whether this Operator has the attribute with name or not.\n\n Args:\n name(str): the attribute name.\n\n Returns:\n bool: True if has this attribute.\n\n \"\"\"\n return self.desc.has_attr(name)\n\n def attr_type(self, name):\n \"\"\"\n Get the type of attribute by attribute's name.\n\n Args:\n name(str): the attribute name.\n\n Returns:\n core.AttrType: the attribute type.\n \"\"\"\n return self.desc.attr_type(name)\n\n def _set_attr(self, name, val):\n \"\"\"\n Set the value of attribute by attribute's name.\n\n Args:\n name(str): the attribute name.\n val(bool|int|str|float|list): the value of the attribute.\n\n Raises:\n ValueError: If the type of value doesn't match with desc.attr_type(name).\n \"\"\"\n self._update_desc_attr(name, val)\n\n def _remove_attr(self, name):\n self.desc.remove_attr(name)\n\n def _update_desc_attr(self, name, val):\n \"\"\"\n Update the value of desc's attribute by attribute's name.\n\n Args:\n name(str): the attribute name.\n val(bool|int|str|float|list): the value of the attribute.\n\n Raises:\n ValueError: If the type of value doesn't match with desc.attr_type(name).\n \"\"\"\n if isinstance(val, Block):\n self.desc.set_block_attr(name, val.desc)\n elif isinstance(val, list) and val and all(\n isinstance(v, Block) for v in val):\n self.desc.set_blocks_attr(name, [v.desc for v in val])\n elif isinstance(val, core.BlockDesc) or \\\n isinstance(val, core.ProgramDesc):\n self.desc.set_serialized_attr(name, val.serialize_to_string())\n else:\n self.desc._set_attr(name, val)\n\n @property\n def attr_names(self):\n return self.desc.attr_names()\n\n def attr(self, name):\n \"\"\"\n Get the attribute by name.\n\n Args:\n name(str): the attribute name.\n\n Returns:\n bool|int|str|float|list: The attribute value. The return value\n can be any valid attribute type.\n \"\"\"\n return self.desc.attr(name)\n\n def _block_attr_id(self, name):\n \"\"\"\n Get the block attribute's id by name.\n\n Args:\n name(str): the attribute name.\n\n Returns:\n int: the block index.\n \"\"\"\n return self.desc._block_attr_id(name)\n\n def _block_attr(self, name):\n \"\"\"\n Get the block attribute by name.\n\n Args:\n name(str): the attribute name.\n\n Returns:\n block: the block attribute.\n \"\"\"\n\n id = self._block_attr_id(name)\n assert (id >= 0 and id < len(self.block.program.blocks))\n return self.block.program.blocks[id]\n\n def _blocks_attr(self, name):\n \"\"\"\n Get the blocks attribute by name.\n\n Args:\n name(str): the attribute name.\n\n Returns:\n list: list of the blocks attribute.\n \"\"\"\n attrs = []\n for i in self._blocks_attr_ids(name):\n assert (i >= 0 and i < len(self.block.program.blocks))\n attrs.append(self.block.program.blocks[i])\n\n return attrs\n\n def _blocks_attr_ids(self, name):\n \"\"\"\n Get the blocks attribute's ids by name.\n\n Args:\n name(str): the attribute name.\n\n Returns:\n list: list of the blocks ids.\n \"\"\"\n\n return self.desc._blocks_attr_ids(name)\n\n def all_attrs(self):\n \"\"\"\n Get the attribute dict.\n\n Returns:\n dict: The Operator's attribute dict, name->attr.\n \"\"\"\n attr_names = self.attr_names\n attr_map = {}\n for n in attr_names:\n attr_type = self.desc.attr_type(n)\n if attr_type == core.AttrType.BLOCK:\n attr_map[n] = self._block_attr(n)\n continue\n\n if attr_type == core.AttrType.BLOCKS:\n attr_map[n] = self._blocks_attr(n)\n continue\n\n attr_map[n] = self.attr(n)\n\n return attr_map\n\n def _is_optimize_op(self):\n op_maker = core.op_proto_and_checker_maker\n OPTIMIZE = core.op_proto_and_checker_maker.OpRole.Optimize\n\n if not self.desc.has_attr(op_maker.kOpRoleAttrName()):\n return False\n\n op_role = self.desc.attr(op_maker.kOpRoleAttrName())\n if op_role & int(OPTIMIZE):\n return True\n\n return False\n\n def _is_backward_op(self):\n op_maker = core.op_proto_and_checker_maker\n BACKWARD = core.op_proto_and_checker_maker.OpRole.Backward\n\n if not self.desc.has_attr(op_maker.kOpRoleAttrName()):\n return False\n\n op_role = self.desc.attr(op_maker.kOpRoleAttrName())\n if op_role & int(BACKWARD):\n return True\n\n return False\n\n\nclass Block(object):\n \"\"\"\n In Fluid, a Program is consistence of multi-Block, and Block stores\n VarDesc and OpDesc. In a specific Block, a VarDesc have a unique name.\n One block could have some child blocks, and child block's name scopes\n should inherit the parent's so that OpDesc in child block can reference\n a VarDesc that is stored in the parent block.\n Please reference the framework.proto for details.\n\n Args:\n program(Program): The Program that the Block belongs to.\n idx(int): The block's id in the Program.\n\n Notes:\n The constructor of Block should not be invoked directly. Please\n use `Program._create_block()` to create a block.\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n\n cur_program = fluid.Program()\n cur_block = cur_program.current_block()\n var = cur_block.create_var(name=\"X\",\n shape=[-1, 23, 48],\n dtype='float32')\n cur_block.append_op(type=\"abs\",\n inputs={\"X\": [var]},\n outputs={\"Out\": [var]})\n \"\"\"\n\n def __init__(self, program, idx):\n self.desc = program.desc.block(idx)\n self.vars = collections.OrderedDict() # var_name --> var\n self.ops = list() # operator list\n self.program = program\n self.removed_vars = collections.OrderedDict()\n\n def __str__(self):\n return self._to_readable_code()\n\n def _to_readable_code(self, skip_op_callstack=True):\n \"\"\"\n Get readable debug string of Block.\n\n .. note::\n If you want to get the debug string in protobuf format,\n please use :code:`to_string` method.\n\n Args:\n skip_op_callstack(bool): whether to skip parsing Operator's attribute\n op_callstack, default value is True\n\n Returns:\n string: The formatted Block string.\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n\n cur_program = fluid.Program()\n cur_block = cur_program.current_block()\n new_var = cur_block.create_var(name=\"X\",\n shape=[-1, 23, 48],\n dtype='float32')\n new_op = cur_block.append_op(type=\"abs\",\n inputs={\"X\": [new_var]},\n outputs={\"Out\": [new_var]})\n print(cur_block._to_readable_code())\n \"\"\"\n assert isinstance(\n skip_op_callstack, bool\n ), \"skip_op_callstack parameter's type is error, expect bool, received %s\".format(\n type(skip_op_callstack))\n block_str = \"{ // block \"\n block_str += \"{}\\n\".format(self.idx)\n for var in list(self.vars.values()):\n block_str += \" {}\\n\".format(var._to_readable_code())\n block_str += \"\\n\"\n for op in self.ops:\n block_str += \" {}\\n\".format(\n op._to_readable_code(skip_op_callstack))\n block_str += \"}\"\n return block_str\n\n def to_string(self, throw_on_error, with_details=False):\n \"\"\"\n Get debug string.\n\n Args:\n throw_on_error(bool): raise exception when self is not initialized\n when throw_on_error is True.\n with_details(bool): more details about variables and parameters\n (e.g. trainable, optimize_attr, ...) will be printed when\n with_details is True. Default False.\n\n Returns:\n str: The debug string.\n \"\"\"\n assert isinstance(throw_on_error, bool) and isinstance(with_details,\n bool)\n if with_details:\n re_add_indent = re.compile(r\"\\n(.)\")\n res_str = \"blocks {\\n idx: %d\\n parent_idx: %d\" % (\n self.idx, self.parent_idx)\n for var in list(self.vars.values()):\n res_str += \"\\n vars {\\n %s }\" % re_add_indent.sub(\n r\"\\n \\1\", var.to_string(throw_on_error, with_details))\n for op in self.ops:\n res_str += \"\\n ops {\\n %s }\" % re_add_indent.sub(\n r\"\\n \\1\", op.to_string(throw_on_error))\n res_str += \"\\n}\"\n else:\n protostr = self.desc.serialize_to_string()\n proto = framework_pb2.BlockDesc.FromString(\n six.binary_type(protostr))\n res_str = _debug_string_(proto, throw_on_error)\n return res_str\n\n __repr__ = __str__\n\n @property\n def parent_idx(self):\n return self.desc.parent\n\n @property\n def forward_block_idx(self):\n return self.desc.get_forward_block_idx()\n\n def _set_forward_block_idx(self, idx):\n \"\"\"\n Set the forward block Idx.\n\n Args:\n idx(int): the block index.\n\n Returns:\n None\n \"\"\"\n self.desc._set_forward_block_idx(idx)\n\n @property\n def backward_block_idx(self):\n cur_block_idx = self.idx\n for block in self.program.blocks:\n if block.forward_block_idx == cur_block_idx:\n return block.idx\n return -1\n\n @property\n def idx(self):\n return self.desc.id\n\n def var(self, name):\n \"\"\"\n Get a Variable by name from this block.\n\n Args:\n name(str): the Variable's name.\n\n Raises:\n ValueError: The If input's type is not str, or this block\n doesn't have a Variable with the giving name.\n\n Returns:\n Variable: the Variable with the giving name.\n \"\"\"\n if not isinstance(name, six.string_types):\n raise TypeError(\n \"var require string as parameter, but get %s instead.\" %\n (type(name)))\n v = self.vars.get(name, None)\n if v is None:\n raise ValueError(\"var %s not in this block\" % name)\n return v\n\n def _find_var_recursive(self, name):\n \"\"\"\n Get a Variable by name from this block recursively.\n\n Args:\n name(str): the Variable's name.\n\n Returns:\n Variable: the Variable with the giving name. Or None if not found.\n \"\"\"\n frontier = list()\n visited = set()\n\n frontier.append(self)\n\n prog = self.program\n\n while len(frontier) != 0: # BFS\n cur = frontier[0]\n frontier = frontier[1:]\n\n if id(cur) in visited:\n continue\n\n if cur.has_var(name):\n return cur.var(name)\n\n if cur.parent_idx != -1:\n frontier.append(prog.block(cur.parent_idx))\n\n if cur.forward_block_idx != -1:\n frontier.append(prog.block(cur.forward_block_idx))\n\n visited.add(id(cur))\n return None\n\n def _var_recursive(self, name):\n \"\"\"\n Get a Variable by name from this block recursively.\n\n Args:\n name(str): the Variable's name.\n\n Raises:\n ValueError: this block and this parent block doesn't\n have a Variable with the giving name.\n\n Returns:\n Variable: the Variable with the giving name.\n \"\"\"\n var = self._find_var_recursive(name)\n if var:\n return var\n else:\n raise ValueError(\"Var {0} is not found recursively\".format(name))\n\n def all_parameters(self):\n return list(self.iter_parameters())\n\n def iter_parameters(self):\n return (item[1] for item in six.iteritems(self.vars)\n if isinstance(item[1], Parameter))\n\n def create_var(self, *args, **kwargs):\n if in_dygraph_mode():\n var = _varbase_creator(*args, **kwargs)\n else:\n var = Variable(block=self, *args, **kwargs)\n if 'initializer' in kwargs:\n kwargs['initializer'](var, self)\n return var\n\n def has_var(self, name):\n return name in self.vars\n\n def _rename_var(self, name, new_name):\n \"\"\"\n Rename variable in vars and ops' inputs and outputs\n\n Args:\n name(str): the name that need to be renamed.\n new_name(str): the name that need to rename to.\n\n Raises:\n ValueError: If this block doesn't have this the giving name,\n or the type of the var with the giving name is not Parameter\n or Variable.\n\n Returns:\n Variable: the Variable with the giving name.\n \"\"\"\n name = cpt.to_text(name)\n new_name = cpt.to_text(new_name)\n\n if not self.has_var(name):\n raise ValueError(\"var %s is not in current block\" % name)\n v = self.var(name)\n if type(v) == Parameter:\n var_type = \"Parameter\"\n stop_gradient = v.stop_gradient\n trainable = v.trainable\n optimize_attr = v.optimize_attr\n regularizer = v.regularizer\n error_clip = v.error_clip\n elif type(v) == Variable:\n var_type = \"Variable\"\n error_clip = v.error_clip\n stop_gradient = v.stop_gradient\n else:\n raise ValueError(\"unsupported var type: %s\", type(v))\n orig_var_type = v.type\n self.desc._rename_var(cpt.to_bytes(name), cpt.to_bytes(new_name))\n # NOTE: v is destroyed by C++ after calling _rename_var.\n d = self.desc.find_var(cpt.to_bytes(new_name))\n if var_type == \"Parameter\":\n if in_dygraph_mode():\n var = ParamBase(\n d.shape(),\n d.dtype(),\n type=orig_var_type,\n name=new_name,\n stop_gradient=stop_gradient,\n trainable=trainable,\n optimize_attr=optimize_attr,\n regularizer=regularizer,\n error_clip=error_clip)\n else:\n var = Parameter(\n self,\n d.shape(),\n d.dtype(),\n type=orig_var_type,\n name=new_name,\n stop_gradient=stop_gradient,\n trainable=trainable,\n optimize_attr=optimize_attr,\n regularizer=regularizer,\n error_clip=error_clip)\n elif var_type == \"Variable\":\n var = Variable(\n self,\n type=orig_var_type,\n name=new_name,\n error_clip=error_clip,\n stop_gradient=stop_gradient)\n\n # rename the python side, _sync_with_cpp will only add\n # new vars/ops to python side.\n self.vars[new_name] = var\n del self.vars[name]\n self._sync_with_cpp()\n return var\n\n def _remove_var(self, name, sync=True):\n if sync == True:\n self._sync_with_cpp()\n self.desc._remove_var(cpt.to_bytes(name))\n del self.vars[name]\n\n def create_parameter(self, *args, **kwargs):\n global_block = self.program.global_block()\n param = None\n if in_dygraph_mode():\n param = ParamBase(*args, **kwargs)\n else:\n param = Parameter(global_block, *args, **kwargs)\n # NOTE: Why only set stop_gradient=False in static mode\n # Because in dygraph mode, the `stop_gradient` and `trainable`\n # are related, and `trainable` default vallue is `True` or\n # it is specified by users, there is no need to set\n # `stop_gradient` for ParamBase here.\n param.stop_gradient = False\n if 'initializer' in kwargs:\n\n def _is_inited_by(block, var):\n init_ops = []\n for op in block.ops:\n if var.name in op.output_arg_names:\n # In startup_program, \"c_broadcast\" and \"c_sync_comm_stream\"\n # are treated as initialization ops that cause error.\n # Think of \"c_broadcast\" and \"c_sync_comm_stream\" as a special case here.\n if op.type in [\"c_broadcast\", \"c_sync_comm_stream\"]:\n continue\n init_ops.append(op)\n return init_ops\n\n initializer = kwargs['initializer']\n init_ops = _is_inited_by(global_block, param)\n init_ops_len = len(init_ops)\n if init_ops_len > 1:\n raise RuntimeError(\"param \" + param.name +\n \" is inited by multiple init ops \" + str(\n init_ops))\n elif init_ops_len == 1:\n # TODO already inited, do nothing, should log a warning\n pass\n else:\n initializer(param, self)\n return param\n\n def append_op(self, *args, **kwargs):\n \"\"\"\n Appends a new Operator according to the giving arguments.\n\n Returns:\n Operator: the append Operator.\n \"\"\"\n if in_dygraph_mode():\n attrs = kwargs.get(\"attrs\", {})\n type = kwargs.get(\"type\", None)\n op = Operator(\n block=self,\n desc=None,\n type=type,\n inputs=None,\n outputs=None,\n attrs=attrs)\n\n # record ops in tracer rather than blocks\n #\n # TODO(minqiyang): add op stop_gradient support in static mode too.\n # currently, we only support stop_gradient in dygraph mode.\n\n _dygraph_tracer().trace_op(type,\n kwargs.get(\"inputs\", {}),\n kwargs.get(\"outputs\", {}), attrs\n if attrs else {},\n kwargs.get(\"stop_gradient\", False))\n else:\n op_desc = self.desc.append_op()\n op = Operator(\n block=self,\n desc=op_desc,\n type=kwargs.get(\"type\", None),\n inputs=kwargs.get(\"inputs\", None),\n outputs=kwargs.get(\"outputs\", None),\n attrs=kwargs.get(\"attrs\", None))\n\n self.ops.append(op)\n\n return op\n\n def _insert_op(self, index, *args, **kwargs):\n \"\"\"\n Insert a Operator according to the giving arguments.\n\n Args:\n index(int): the place that the operator to insert.\n\n Returns:\n Operator: the insert Operator.\n \"\"\"\n self._sync_with_cpp()\n op_desc = self.desc._insert_op(index)\n op = Operator(block=self, desc=op_desc, *args, **kwargs)\n self.ops.insert(index, op)\n return op\n\n def _insert_op_without_sync(self, index, *args, **kwargs):\n \"\"\"\n Insert an Operator according to the giving arguments, \n without sync_with_cpp to meke the compilation faster.\n\n Args:\n index(int): the place that the operator to insert.\n\n Returns:\n Operator: the insert Operator.\n \"\"\"\n op_desc = self.desc._insert_op(index)\n op = Operator(block=self, desc=op_desc, *args, **kwargs)\n self.ops.insert(index, op)\n return op\n\n def _remove_op(self, index, sync=True):\n \"\"\"\n Remove the specific position operator.\n\n Args:\n index(int): the position that the operator to insert.\n\n Returns:\n None\n \"\"\"\n if sync == True:\n self._sync_with_cpp()\n self.desc._remove_op(index, index + 1)\n del self.ops[index]\n\n def _slice_ops(self, start, end):\n \"\"\"\n Return the Operator between start and end.\n\n Args:\n start(int): the start position.\n end(int): the end position.\n\n Returns:\n list: the Operators between start and end.\n \"\"\"\n return self.ops[start:end]\n\n def _prepend_op(self, *args, **kwargs):\n if in_dygraph_mode():\n type = kwargs.get(\"type\", None)\n attrs = kwargs.get(\"attrs\", {})\n op = Operator(\n self, None, type=type, inputs=None, outputs=None, attrs=attrs)\n\n _dygraph_tracer().trace_op(type,\n kwargs.get(\"inputs\", {}),\n kwargs.get(\"outputs\", {}), attrs\n if attrs else {},\n kwargs.get(\"stop_gradient\", False))\n else:\n op_desc = self.desc._prepend_op()\n op = Operator(\n self,\n op_desc,\n type=kwargs.get(\"type\", None),\n inputs=kwargs.get(\"inputs\", None),\n outputs=kwargs.get(\"outputs\", None),\n attrs=kwargs.get(\"attrs\", None))\n self.ops.insert(0, op)\n\n return op\n\n def _sync_with_cpp(self):\n \"\"\"\n Sync from the desc on the c++ end. This method is used to synchronize\n the c++ desc instance generated by backward.\n \"\"\"\n # sync variables from cpp\n for var in self.desc.all_vars():\n if not self.has_var(var.name()):\n self.create_var(name=var.name(), desc=var, type=var.type())\n\n # sync variables removed from c++ end\n for var in list(self.vars.keys()):\n if not self.desc.find_var(cpt.to_bytes(var)):\n self.vars.pop(var)\n\n # sync operators from cpp\n ops_in_cpp = []\n for op_idx in range(0, self.desc.op_size()):\n ops_in_cpp.append(self.desc.op(op_idx))\n\n if len(self.ops) != 0:\n first_op_in_python = self.ops[0].desc\n last_op_in_python = self.ops[len(self.ops) - 1].desc\n start_index = None\n end_index = None\n for index in range(len(ops_in_cpp)):\n if first_op_in_python == ops_in_cpp[index]:\n start_index = index\n if last_op_in_python == ops_in_cpp[index]:\n end_index = index\n assert start_index is not None\n assert end_index is not None\n assert start_index <= end_index\n else:\n start_index = 0\n end_index = -1\n\n # sync ops append to the head of cpp_ops\n for index in range((start_index - 1 - 1), -1, -1):\n op_desc = ops_in_cpp[index]\n op = Operator(self, op_desc)\n self.ops.insert(0, op)\n\n # sync ops append to the end of cpp_ops\n for index in range((end_index + 1), len(ops_in_cpp)):\n op_desc = ops_in_cpp[index]\n op = Operator(self, op_desc)\n self.ops.append(op)\n\n # sync ops removed from c++ end\n if end_index != -1 and end_index < len(self.ops):\n ops_in_cpp_index = 0\n ops_in_python_index = 0\n while ops_in_python_index < len(\n self.ops) and ops_in_cpp_index < len(ops_in_cpp):\n if self.ops[ops_in_python_index].desc != ops_in_cpp[\n ops_in_cpp_index]:\n del self.ops[ops_in_python_index]\n else:\n ops_in_cpp_index += 1\n ops_in_python_index += 1\n\n assert len(self.ops) == len(ops_in_cpp)\n for index in range(len(self.ops)):\n assert self.ops[index].desc == ops_in_cpp[index]\n\n def _copy_param_info_from(self, other):\n \"\"\"\n Copy the information of parameters from the other block.\n\n Args:\n other(Block): the other block.\n\n Raises:\n ValueError: If type of input is not Block, or the `other` and this\n block is not in the same topology.\n\n Returns:\n None\n \"\"\"\n if not isinstance(other, Block):\n raise TypeError(\n \"_copy_param_info_from should be invoked with Block\")\n for p in other.iter_parameters():\n assert isinstance(p, Parameter)\n v = self.vars.get(p.name, None)\n if v is None:\n # if the Parameter is pruned, v may be None\n continue\n assert isinstance(v, Variable)\n new_p = None\n if in_dygraph_mode():\n new_p = ParamBase(\n shape=v.shape,\n dtype=v.dtype,\n type=v.type,\n lod_level=v.lod_level,\n stop_gradient=p.stop_gradient,\n trainable=p.trainable,\n optimize_attr=p.optimize_attr,\n regularizer=p.regularizer,\n error_clip=p.error_clip,\n name=v.name)\n else:\n new_p = Parameter(\n block=self,\n shape=v.shape,\n dtype=v.dtype,\n type=v.type,\n lod_level=v.lod_level\n if v.type == core.VarDesc.VarType.LOD_TENSOR else None,\n stop_gradient=p.stop_gradient,\n trainable=p.trainable,\n optimize_attr=p.optimize_attr,\n regularizer=p.regularizer,\n error_clip=p.error_clip,\n name=v.name)\n self.vars[new_p.name] = new_p\n\n def _clone_variable(self, var, force_persistable=True):\n \"\"\"\n Clone a variable into current block.\n\n Args:\n var: the variable to be cloned.\n force_persistable(bool): True means setting the result variable to being persistable.\n False means setting the persistable the same with that of input var.\n default: True.\n\n Returns:\n Variable: the new variable cloned from 'var' in current block.\n \"\"\"\n assert isinstance(var, Variable)\n ret_var = None\n # make STEP_SCOPES var can be safely cloned.\n if var.type == core.VarDesc.VarType.STEP_SCOPES:\n ret_var = self.create_var(\n name=var.name, persistable=var.persistable, type=var.type)\n elif var.type == core.VarDesc.VarType.RAW:\n ret_var = self.create_var(\n name=var.name, persistable=var.persistable, type=var.type)\n elif var.type == core.VarDesc.VarType.SELECTED_ROWS:\n ret_var = self.create_var(\n name=var.name,\n shape=var.shape,\n dtype=var.dtype,\n type=var.type,\n persistable=True if force_persistable else var.persistable,\n is_data=var.is_data,\n need_check_feed=var.desc.need_check_feed())\n else:\n ret_var = self.create_var(\n name=var.name,\n shape=var.shape,\n dtype=var.dtype,\n type=var.type,\n lod_level=var.lod_level,\n persistable=True if force_persistable else var.persistable,\n is_data=var.is_data,\n need_check_feed=var.desc.need_check_feed())\n return ret_var\n\n\nclass IrNode(object):\n \"\"\"\n Python IrNode. Beneath it is a core.Node, which is used for Ir Pass.\n \"\"\"\n\n def __init__(self, node):\n \"\"\"\n Construct an IrNode using core.Node.\n\n Args:\n node(core.Node): C++ Node.\n \"\"\"\n assert isinstance(node,\n core.Node), 'node must be the instance of core.Node.'\n self.node = node\n\n def name(self):\n \"\"\"\n Return the node name.\n\n Returns:\n str: node name.\n \"\"\"\n return self.node.name()\n\n def node_type(self):\n \"\"\"\n Return the node type.\n\n Returns:\n core.Node.Type: node type(core.Node.Type.Operation or core.Node.Type.Variable).\n \"\"\"\n return self.node.node_type()\n\n def var(self):\n \"\"\"\n Return the node variable description.\n\n Returns:\n core.VarDesc: node variable description.\n \"\"\"\n return self.node.var()\n\n def op(self):\n \"\"\"\n Return the node operator description.\n\n Returns:\n core.OpDesc: node operator description.\n \"\"\"\n return self.node.op()\n\n def id(self):\n \"\"\"\n Return the node id.\n\n Returns:\n int: node id.\n \"\"\"\n return self.node.id()\n\n def is_op(self):\n \"\"\"\n If the node is an operator, then return true.\n\n Returns:\n bool: indicate whether the node is an operator.\n \"\"\"\n return self.node.is_op()\n\n def is_var(self):\n \"\"\"\n If the node is a variable, then return true.\n\n Returns:\n bool: indicate whether the node is a variable.\n \"\"\"\n return self.node.is_var()\n\n def is_ctrl_var(self):\n \"\"\"\n If the node is a control dependence variable, then return true.\n\n Returns:\n bool: indicate whether the node is a control dependence variable.\n \"\"\"\n return self.node.is_ctrl_var()\n\n def clear_inputs(self):\n \"\"\"\n Clear the node inputs. After executing the `clear_inputs` function,\n the node inputs will be empty.\n \"\"\"\n self.node.clear_inputs()\n\n def remove_input_by_id(self, node_id):\n \"\"\"\n Remove a node from inputs by the given node id.\n\n Args:\n node_id(int): the given node id.\n \"\"\"\n self.node.remove_input(node_id)\n\n def remove_input(self, node):\n \"\"\"\n Remove a node from inputs.\n\n Args:\n node(IrNode): the node being removed.\n \"\"\"\n self.node.remove_input(node.node)\n\n def append_input(self, node):\n \"\"\"\n Append a node in inputs.\n\n Args:\n node(IrNode): the node being appended.\n \"\"\"\n self.node.append_input(node.node)\n\n def clear_outputs(self):\n \"\"\"\n Clear the node outputs. After executing the `clear_outputs` function,\n the node outputs will be empty.\n \"\"\"\n self.node.clear_outputs()\n\n def remove_output_by_id(self, node_id):\n \"\"\"\n Remove a node from outputs by the given node id.\n\n Args:\n node_id(int): the given node id.\n \"\"\"\n self.node.remove_output(node_id)\n\n def remove_output(self, node):\n \"\"\"\n Remove a node from outputs.\n\n Args:\n node(IrNode): the node being removed.\n \"\"\"\n self.node.remove_output(node.node)\n\n def append_output(self, node):\n \"\"\"\n Append a node in outputs.\n\n Args:\n node(IrNode): the node being appended.\n \"\"\"\n self.node.append_output(node.node)\n\n @property\n def inputs(self):\n \"\"\"\n Return the node inputs.\n\n Returns:\n list(IrNode): node inputs wrapped by IrNode.\n \"\"\"\n return [IrNode(n) for n in self.node.inputs]\n\n @property\n def outputs(self):\n \"\"\"\n Return the node outputs.\n\n Returns:\n list(IrNode): node outputs wrapped by IrNode.\n \"\"\"\n return [IrNode(n) for n in self.node.outputs]\n\n\nclass IrVarNode(IrNode):\n \"\"\"\n Python IrVarNode. Beneath it is a core.Node, it inherits from IrNode.\n \"\"\"\n\n def __init__(self, node):\n \"\"\"\n Construct an IrVarNode using core.Node.\n\n Args:\n node(core.Node): C++ Node.\n \"\"\"\n assert isinstance(node, core.Node) and node.is_var(), \\\n 'node must be the instance of core.Node and it must be a variable node.'\n super(IrVarNode, self).__init__(node)\n self.node = node\n\n def set_shape(self, shape):\n \"\"\"\n Set the node variable shape.\n\n Args:\n shape(list): shape to be set.\n \"\"\"\n assert self.node.var() is not None, \\\n \"The node variable description can not be None.\"\n self.node.var().set_shape(shape)\n\n def persistable(self):\n \"\"\"\n If the variable node is a persistable variable, then return true.\n\n Returns:\n bool: indicate whether the variable is persistable.\n \"\"\"\n assert self.node.var() is not None, \\\n \"The node variable description can not be None.\"\n return self.node.var().persistable()\n\n def type(self):\n \"\"\"\n Return the variable type.\n\n Returns:\n core.VarDesc.VarType: the variable type.\n \"\"\"\n assert self.node.var() is not None, \\\n \"The node variable description can not be None.\"\n return self.node.var().type()\n\n def dtype(self):\n \"\"\"\n Return the variable data type.\n\n Returns:\n core.VarDesc.VarType: the variable data type.\n \"\"\"\n assert self.node.var() is not None, \\\n \"The node variable description can not be None.\"\n return self.node.var().dtype()\n\n def shape(self):\n \"\"\"\n Return the variable shape.\n\n Returns:\n list: the variable shape.\n \"\"\"\n assert self.node.var() is not None, \\\n \"The node variable description can not be None.\"\n return self.node.var().shape()\n\n @property\n def inputs(self):\n \"\"\"\n Return the node inputs.\n\n Returns:\n list(IrOpNode): node inputs wrapped by IrOpNode.\n \"\"\"\n return [IrOpNode(n) for n in self.node.inputs]\n\n @property\n def outputs(self):\n \"\"\"\n Return the node outputs.\n\n Returns:\n list(IrOpNode): node outputs wrapped by IrOpNode.\n \"\"\"\n return [IrOpNode(n) for n in self.node.outputs]\n\n\nclass IrOpNode(IrNode):\n \"\"\"\n Python IrOpNode. Beneath it is a core.Node, it inherits from IrNode.\n \"\"\"\n\n def __init__(self, node):\n \"\"\"\n Construct an IrOpNode using core.Node.\n\n Args:\n node(core.Node): C++ Node.\n \"\"\"\n assert isinstance(node, core.Node) and node.is_op(), \\\n 'node must be the instance of core.Node and it must be a operator node.'\n super(IrOpNode, self).__init__(node)\n self.node = node\n\n def rename_input(self, old_input_name, new_input_name):\n \"\"\"\n Rename the input of this node.\n\n Args:\n old_input_name(str): the old input name.\n new_input_name(str): the new input name.\n \"\"\"\n assert self.node.op() is not None, \\\n \"The node operator description can not be None.\"\n self.node.op()._rename_input(old_input_name, new_input_name)\n\n def rename_output(self, old_output_name, new_output_name):\n \"\"\"\n Rename the output of this node.\n\n Args:\n old_output_name(str): the old output name.\n new_output_name(str): the new output name.\n \"\"\"\n assert self.node.op() is not None, \\\n \"The node operator description can not be None.\"\n self.node.op()._rename_output(old_output_name, new_output_name)\n\n def input(self, name):\n \"\"\"\n Get the argument name list by the parameter name for input.\n\n Args:\n name(str): the parameter name.\n\n Returns:\n list(str): the argument name list.\n \"\"\"\n assert self.node.op() is not None, \\\n \"The node operator description can not be None.\"\n return self.node.op().input(name)\n\n def output(self, name):\n \"\"\"\n Get the argument name list by the parameter name for output.\n\n Args:\n name(str): the parameter name.\n\n Returns:\n list(str): the argument name list.\n \"\"\"\n assert self.node.op() is not None, \\\n \"The node operator description can not be None.\"\n return self.node.op().output(name)\n\n def set_type(self, new_type):\n \"\"\"\n Change the operator type into new type.\n\n Args:\n new_type(str): new operator type to be set.\n \"\"\"\n assert self.node.op() is not None, \\\n \"The node operator description can not be None.\"\n return self.node.op().set_type(new_type)\n\n def set_attr(self, name, val):\n \"\"\"\n Set the value of attribute by attribute's name.\n\n Args:\n name(str): the attribute name.\n val(bool|int|str|float|list): the value of the attribute.\n \"\"\"\n self._update_desc_attr(name, val)\n\n def _update_desc_attr(self, name, val):\n \"\"\"\n Update the value of the op desc's attribute by attribute's name.\n \"\"\"\n assert self.node.op() is not None, \\\n \"The node operator description can not be None.\"\n desc = self.node.op()\n if isinstance(val, Block):\n desc.set_block_attr(name, val.desc)\n elif isinstance(val, list) and val and \\\n all(isinstance(v, Block) for v in val):\n desc.set_blocks_attr(name, [v.desc for v in val])\n elif isinstance(val, core.BlockDesc) or \\\n isinstance(val, core.ProgramDesc):\n desc.set_serialized_attr(name, val.serialize_to_string())\n else:\n desc._set_attr(name, val)\n\n def input_arg_names(self):\n \"\"\"\n Return input arguments' names of this op node.\n\n Returns:\n list(str): input arguments' names of this op node.\n \"\"\"\n assert self.node.op() is not None, \\\n \"The node operator description can not be None.\"\n return self.node.op().input_arg_names()\n\n def output_arg_names(self):\n \"\"\"\n Return output arguments' names of this op node.\n\n Returns:\n list(str): output arguments' names of this op node.\n \"\"\"\n assert self.node.op() is not None, \\\n \"The node operator description can not be None.\"\n return self.node.op().output_arg_names()\n\n @property\n def inputs(self):\n \"\"\"\n Return the node inputs.\n\n Returns:\n list(IrVarNode): node inputs wrapped by IrVarNode.\n \"\"\"\n return [IrVarNode(n) for n in self.node.inputs]\n\n @property\n def outputs(self):\n \"\"\"\n Return the node outputs.\n\n Returns:\n list(IrVarNode): node outputs wrapped by IrVarNode.\n \"\"\"\n return [IrVarNode(n) for n in self.node.outputs]\n\n\nclass IrGraph(object):\n \"\"\"\n Python IrGraph. Beneath it is a core.Graph, which is used for\n creating a c++ Ir Pass Graph. An IrGraph is just a graph view of\n a Program. In an IrGraph, both Variables and Operators are graph\n nodes.\n \"\"\"\n\n def __init__(self, graph, for_test=False):\n \"\"\"\n Construct an IrGraph using core.Graph.\n\n Args:\n graph(core.Graph): C++ Graph.\n for_test(bool): True for the test graph and false for the train graph.\n \"\"\"\n assert isinstance(\n graph, core.Graph), 'graph must be the instance of core.Graph.'\n self.graph = graph\n self._for_test = for_test\n\n def clone(self):\n \"\"\"\n Create a new and duplicated IrGraph.\n\n Warns:\n The method only clones the graph structure, not its attributes.\n\n Returns:\n IrGraph: A new and duplicated graph.\n \"\"\"\n g = self.graph.clone()\n return IrGraph(g, self._for_test)\n\n def is_test(self):\n \"\"\"\n If the graph is used for testing, the function returns true. Otherwise, returns false.\n \"\"\"\n return self._for_test\n\n def all_nodes(self):\n \"\"\"\n Return all nodes included in the graph as a set.\n \"\"\"\n return {IrNode(node) for node in self.graph.nodes()}\n\n def all_var_nodes(self):\n \"\"\"\n Return all variable nodes included in the graph as a set.\n \"\"\"\n return {IrVarNode(node) for node in self.graph.nodes() if node.is_var()}\n\n def all_persistable_nodes(self):\n \"\"\"\n Return all persistable variable nodes included in the graph as a set.\n \"\"\"\n persistable_nodes = set()\n for node in self.graph.nodes():\n if node.is_var() and node.var() is not None and node.var(\n ).persistable():\n persistable_nodes.add(node)\n return {IrVarNode(p) for p in persistable_nodes}\n\n def all_op_nodes(self):\n \"\"\"\n Return all operator nodes included in the graph as a set.\n \"\"\"\n return {IrOpNode(node) for node in self.graph.nodes() if node.is_op()}\n\n def create_persistable_node(self, name, var_type, shape, var_dtype):\n \"\"\"\n Create a persistable variable node in the graph. In IrGraph,\n it can not distinguish between persistable variables and parameters.\n\n Args:\n name(str): the name of the persistable variable node.\n vart_type(core.VarDesc.VarType): the type of the persistable variable node.\n shape(list): the shape of the persistable variable node.\n var_dtype(core.VarDesc.VarType): the data type of the persistable variable node.\n\n Returns:\n IrVarNode: the created persistable variable node.\n \"\"\"\n var_desc = core.VarDesc(name)\n var_desc.set_type(var_type)\n var_desc.set_shape(shape)\n var_desc.set_dtype(var_dtype)\n var_desc.set_persistable(True)\n return IrVarNode(self.graph.create_var_node(var_desc))\n\n def create_var_node(self, name, var_type, shape, var_dtype):\n \"\"\"\n Create a variable node in the graph. The created variable node is\n not persistable.\n\n Args:\n name(str): the name of the variable node.\n vart_type(core.VarDesc.VarType): the type of the variable node.\n shape(list): the shape of the variable node.\n var_dtype(core.VarDesc.VarType): the data type of the variable node.\n\n Returns:\n IrVarNode: the created variable node.\n \"\"\"\n\n var_desc = core.VarDesc(name)\n var_desc.set_type(var_type)\n var_desc.set_shape(shape)\n var_desc.set_dtype(var_dtype)\n return IrVarNode(self.graph.create_var_node(var_desc))\n\n def create_control_dep_var(self):\n \"\"\"\n create a control var\n \"\"\"\n return IrVarNode(self.graph.create_control_dep_var())\n\n def create_var_node_from_desc(self, var_desc):\n \"\"\"\n Create a variable node by using an existing VarDesc in the graph.\n Depend on the giving VarDesc, the created variable node may be persistable.\n\n Args:\n var_desc(core.VarDesc): the giving variable description.\n\n Returns:\n IrVarNode: the created variable node.\n \"\"\"\n return IrVarNode(self.graph.create_var_node(var_desc))\n\n def create_op_node(self, op_type, attrs, inputs, outputs):\n \"\"\"\n Create a operator node in the graph.\n\n Args:\n op_type(str): the type of the operator node.\n attrs(dict): the attributes of the operator node.\n inputs(dict): the inputs of the operator node.\n outputs(dict): the outputs of the operator node.\n\n Returns:\n IrOpNode: the created operator node.\n \"\"\"\n op_desc = core.OpDesc()\n op_desc.set_type(op_type)\n for attr, value in six.iteritems(attrs):\n self._update_desc_attr(op_desc, attr, value)\n for input_name, var_nodes in six.iteritems(inputs):\n if not isinstance(var_nodes, list):\n var_nodes = [var_nodes]\n op_desc.set_input(input_name,\n [var_node.name() for var_node in var_nodes])\n for output_name, var_nodes in six.iteritems(outputs):\n if not isinstance(var_nodes, list):\n var_nodes = [var_nodes]\n op_desc.set_output(output_name,\n [var_node.name() for var_node in var_nodes])\n return IrOpNode(self.graph.create_op_node(op_desc))\n\n def create_op_node_from_desc(self, op_desc):\n \"\"\"\n Create a operator node by using an existing OpDesc in the graph.\n\n Args:\n op_desc(core.VarDesc): the giving operator description.\n\n Returns:\n IrOpNode: the created operator node.\n \"\"\"\n return IrOpNode(self.graph.create_op_node(op_desc))\n\n def update_input_link(self, old_input_node, new_input_node, op_node):\n \"\"\"\n Update the input's link of a operator node.\n\n Args:\n old_input_node(IrNode): the old input node of the giving op_node.\n new_input_node(IrNode): the new input node of the giving op_node.\n op_node(IrOpNode): the operator node that is needed to update input's link.\n \"\"\"\n assert old_input_node.node in self.graph.nodes() and new_input_node.node in \\\n self.graph.nodes() and op_node.node in self.graph.nodes(), \\\n 'The three arguments(old_input_node&new_input_node&op_node) must be in the graph nodes.'\n old_input_node.remove_output(op_node)\n op_node.remove_input(old_input_node)\n new_input_node.append_output(op_node)\n op_node.append_input(new_input_node)\n op_node.rename_input(old_input_node.name(), new_input_node.name())\n\n def update_output_link(self, old_output_node, new_output_node, op_node):\n \"\"\"\n Update the output's link of an operator node.\n\n Args:\n old_output_node(IrNode): the old output node of the giving op_node.\n new_output_node(IrNode): the new output node of the giving op_node.\n op_node(IrOpNode): the operator node that is needed to update input's link.\n \"\"\"\n assert old_output_node.node in self.graph.nodes() and new_output_node.node in \\\n self.graph.nodes() and op_node.node in self.graph.nodes(), \\\n 'The three arguments(old_output_node &new_output_node &op_node) must be in the graph nodes.'\n old_output_node.remove_input(op_node)\n op_node.remove_output(old_output_node)\n new_output_node.append_input(op_node)\n op_node.append_output(new_output_node)\n op_node.rename_output(old_output_node.name(), new_output_node.name())\n\n def link_to(self, node_in, node_out):\n \"\"\"\n Connect two nodes.\n\n Args:\n node_in(IrNode): the input node.\n node_out(IrNode): the output node.\n \"\"\"\n assert node_in.node in self.graph.nodes() and node_out.node in self.graph.nodes(), \\\n 'The two arguments(node_in&node_out) must be in the graph nodes.'\n node_in.append_output(node_out)\n node_out.append_input(node_in)\n\n def safe_remove_nodes(self, remove_nodes):\n \"\"\"\n Remove nodes safely since links connected to these removed nodes are\n also removed.\n\n Args:\n remove_nodes(set): the nodes prepared to be removed.\n \"\"\"\n if not isinstance(remove_nodes, set):\n if isinstance(remove_nodes, Iterable):\n remove_nodes = set(remove_nodes)\n else:\n remove_nodes = {remove_nodes}\n original_nodes = {n.node for n in remove_nodes}\n core.graph_safe_remove_nodes(self.graph, original_nodes)\n\n def resolve_hazard(self):\n ordered_nodes = core.topology_sort(self.graph)\n var_nodes = dict()\n for node in ordered_nodes:\n if node.is_op() and node.op() is not None:\n for each_var_name in node.op().input_arg_names():\n if each_var_name not in var_nodes:\n var_nodes[each_var_name] = [\n self._find_node_by_name(node.inputs, each_var_name)\n ]\n for each_var_name in node.op().output_arg_names():\n if each_var_name not in var_nodes:\n var_nodes[each_var_name] = [\n self._find_node_by_name(node.outputs, each_var_name)\n ]\n else:\n var_nodes[each_var_name].append(\n self._find_node_by_name(node.outputs,\n each_var_name))\n self.graph.resolve_hazard(var_nodes)\n\n def has_circle(self):\n \"\"\"\n Check if the graph has a circle.\n\n Returns:\n bool: True if the graph has a circle else False.\n \"\"\"\n return core.has_circle(self.graph)\n\n def graph_num(self):\n \"\"\"\n Count the number of unconnected graphs in this graph.\n\n Returns:\n int: the number of unconnected graphs.\n \"\"\"\n return core.graph_num(self.graph)\n\n def topology_sort(self):\n \"\"\"\n Perform the topology sort operation on the graph.\n\n Notes: the `graph` can not contain a circle.\n\n Returns:\n list(IrNode): nodes in topology order.\n \"\"\"\n ordered_nodes = core.topology_sort(self.graph)\n return [IrNode(n) for n in ordered_nodes]\n\n def build_adjacency_list(self):\n \"\"\"\n Build an adjacency list of operations for the `graph`.\n\n Returns:\n dict{IrNode: set(IrNode)}: the adjacency list.\n \"\"\"\n adj_list = core.build_adjacency_list(self.graph)\n wrapped_adj_list = dict()\n for k, v in six.iteritems(adj_list):\n wrapped_adj_list[IrNode(k)] = {IrNode(n) for n in v}\n return wrapped_adj_list\n\n def draw(self, save_path, name, marked_nodes=None, remove_ctr_var=True):\n \"\"\"\n Draw the graph. If `dot` command is installed, the drawn graph\n will be saved as pdf file type, otherwise dot file type is used.\n\n Args:\n save_path(str): the save path of drawn graph.\n name(str): the name of drawn graph.\n marked_nodes(set(IrNode)): nodes that are needed to be marked.\n Default value is None.\n remove_ctr_var(bool): If it is set True, all control variable nodes\n in the graph will be removed. Default value is True.\n \"\"\"\n\n def _convert_to_pdf(dot_file_path):\n pdf_save_path = os.path.splitext(dot_file_path)[0] + '.pdf'\n exited_code = subprocess.call(\n 'dot -Tpdf ' + dot_file_path + ' -o ' + pdf_save_path,\n shell=True)\n if exited_code != 0:\n print('The dot command is needed for creating pdf files.')\n print('The {} is saved as the dot filetype.'.format(\n dot_file_path))\n\n remove_ctr_vars = set()\n if remove_ctr_var:\n for node in self.all_var_nodes():\n if node.is_ctrl_var():\n remove_ctr_vars.add(node)\n self.safe_remove_nodes(remove_ctr_vars)\n print('Total ops num = {}.'.format(len(self.all_op_nodes())))\n\n if marked_nodes is not None:\n if not isinstance(marked_nodes, set):\n if isinstance(marked_nodes, Iterable):\n marked_nodes = set(marked_nodes)\n else:\n marked_nodes = {marked_nodes}\n marked_nodes = {n.node for n in marked_nodes}\n remove_ctr_vars = {n.node for n in remove_ctr_vars}\n marked_nodes = marked_nodes - remove_ctr_vars\n if self.graph.has('__graphviz__marked_node__'):\n self.graph.erase('__graphviz__marked_node__')\n self.graph.set('__graphviz__marked_node__', marked_nodes)\n if not os.path.exists(save_path):\n os.makedirs(save_path)\n viz_dot_path = os.path.join(save_path, name) + '.dot'\n viz_pass = core.get_pass('graph_viz_pass')\n viz_pass.set('graph_viz_path', viz_dot_path)\n viz_pass.apply(self.graph)\n _convert_to_pdf(viz_dot_path)\n\n def to_program(self):\n \"\"\"\n Convert the graph into a Program.\n\n WARN: When the graph includes backward operator nodes, the\n conversion process may be failed. Usually, this function is\n only used to convert a test graph.\n\n Returns:\n Program: a program converted from the graph.\n \"\"\"\n convert_pass = core.get_pass('graph_to_program_pass')\n desc = core.ProgramDesc()\n convert_pass.set_not_owned('program', desc)\n convert_pass.apply(self.graph)\n program = Program._construct_from_desc(desc)\n return program\n\n def _find_node_by_name(self, nodes, node_name):\n \"\"\"\n Find a node in the giving nodes set by the name.\n \"\"\"\n target_node = None\n for n in nodes:\n if n.name() == node_name:\n target_node = n\n assert target_node is not None, \"Cannot find the target node in the giving set.\"\n return target_node\n\n def _update_desc_attr(self, desc, name, val):\n \"\"\"\n Update the value of desc's attribute by attribute's name.\n \"\"\"\n if isinstance(val, Block):\n desc.set_block_attr(name, val.desc)\n elif isinstance(val, list) and val and all(\n isinstance(v, Block) for v in val):\n desc.set_blocks_attr(name, [v.desc for v in val])\n elif isinstance(val, core.BlockDesc) or \\\n isinstance(val, core.ProgramDesc):\n desc.set_serialized_attr(name, val.serialize_to_string())\n else:\n desc._set_attr(name, val)\n\n\nclass Program(object):\n \"\"\"\n Create Python Program. It has at least one :ref:`api_guide_Block_en`, when the\n control flow op like conditional_block, while :ref:`api_paddle_fluid_layers_While` is included,\n it will contain nested block.\n\n Please reference the\n `framework.proto <https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/fluid/framework/framework.proto>`_\n for details.\n\n A set of Program usually contains startup program and main program.\n A startup program is set to contain some initial work, eg. initialize the ``Parameter``, and the main\n program will contain the network structure and vars for train.\n\n A set of Program can be used for test or train, in train program ,\n Paddle will contain all content to build a train network, in test\n program Paddle will prune some content which is irrelevant to test, eg.\n backward ops and vars.\n\n **Notes**:\n **we have** :ref:`api_paddle_fluid_framework_default_startup_program` **and** :ref:`api_paddle_fluid_framework_default_main_program`\n **by default, a pair of them will shared the parameters. The** :ref:`api_paddle_fluid_framework_default_startup_program` **only run once to initialize parameters,**\n :ref:`api_paddle_fluid_framework_default_main_program` **run in every mini batch and adjust the weights.**\n\n Returns:\n Program: An empty Program.\n\n Examples:\n .. code-block:: python\n\n import paddle\n import paddle.static as static\n\n paddle.enable_static()\n\n main_program = static.Program()\n startup_program = static.Program()\n with static.program_guard(main_program=main_program, startup_program=startup_program):\n x = static.data(name=\"x\", shape=[-1, 784], dtype='float32')\n y = static.data(name=\"y\", shape=[-1, 1], dtype='int32')\n z = static.nn.fc(name=\"fc\", x=x, size=10, activation=\"relu\")\n\n print(\"main program is: {}\".format(main_program))\n print(\"start up program is: {}\".format(startup_program))\n\n \"\"\"\n\n def __init__(self):\n self.desc = core.ProgramDesc()\n self.blocks = [Block(self, 0)]\n self.current_block_idx = 0\n global global_prog_seed\n self._seed = global_prog_seed\n self._current_role = core.op_proto_and_checker_maker.OpRole.Forward\n self.__op_role_var = []\n\n # for distribute training\n # _is_distributed = True if under distributed training\n self._is_distributed = False\n # _is_chief = True if the trainer is the first one, usually No.0\n self._is_chief = False\n # _parameters_on_pservers records all the parameters distributed on parameter servers.\n self._parameters_on_pservers = None\n # _endpoints is a list about parameter servers ip:port, such as [\"ip:port\",\"ip:port\"]\n self._endpoints = []\n # if current role is parameter server, the _ps_endpoint is its \"ip:port\"\n self._ps_endpoint = None\n # trainers_endpoints, it is used for distribution.\n self._trainers_endpoints = []\n # the distributed lookup table names\n self._distributed_lookup_table = None\n\n # use Deep gradient comrepssion or not\n self._enable_dgc = False\n self._use_lamb = False\n\n self._nccl_comm_num = 1\n self._use_hierarchical_allreduce = False\n self._hierarchical_allreduce_inter_nranks = 0\n\n # if this program has been optimized by distributed optimizer\n # fleet_opt will be given a value\n self._fleet_opt = None\n self._program_config = None\n\n # assigned if this program has been parsed by a pipeline optimizer\n self._pipeline_opt = None\n\n # appending gradients times\n self._appending_grad_times = 0\n\n # identifier for auto checkpoint\n self._auto_checkpoint_name = unique_name.generate(\n \"__auto_checkpoint_program__\")\n\n # compiled program, i.e. Graph\n self._graph = None\n\n def global_seed(self, seed=0):\n \"\"\"\n Set global seed for Program\n\n Returns:\n None.\n\n Examples:\n .. code-block:: python\n\n import paddle\n import paddle.static as static\n\n paddle.enable_static()\n\n prog = static.default_main_program()\n print(prog.random_seed)\n ## 0\n ## the default random seed is 0\n\n prog.global_seed(102)\n prog1 = static.default_main_program()\n print(prog1.random_seed)\n ## 102\n ## the random seed is 102\n \"\"\"\n global global_prog_seed\n global_prog_seed = seed\n self._seed = global_prog_seed\n\n @property\n def _op_role(self):\n \"\"\"\n The operator role. In a enum {Forward, Backward, Optimize}.\n\n Notes: this is a low level API. It is used only for ParallelExecutor to\n duplicate or schedule operator to devices.\n\n For example, the forward operator should be executed on every device.\n The backward operator should be executed on every device and the\n parameter gradient of backward (use :code:`_op_role_var` to get this\n variable) operator should be merged to one device. The optimization\n operators should be executed on only one device and broadcast the\n optimization result, i.e., the new parameter, to every other device.\n \"\"\"\n return self._current_role\n\n @_op_role.setter\n def _op_role(self, role):\n self._current_role = role\n\n @property\n def _op_role_var(self):\n \"\"\"\n The auxiliary variables for :code:`_op_role` property.\n\n See Also: :code:`Program._op_role`'s documentation for details.\n\n Notes: This is a very low-level API. Users should not use it directly.\n \"\"\"\n return self.__op_role_var\n\n @signature_safe_contextmanager\n def _backward_role_guard(self):\n tmp_role = self._current_role\n\n OpRole = core.op_proto_and_checker_maker.OpRole\n self._current_role = OpRole.Backward\n try:\n yield\n finally:\n self._current_role = tmp_role\n\n @signature_safe_contextmanager\n def _optimized_guard(self, param_and_grads):\n \"\"\"\n A with guard to set :code:`Optimization` :code:`OpRole` and\n :code:`OpRoleVar` automatically.\n\n Notes: This is a very low level API. Users should not use it directly.\n\n Args:\n param_and_grads(list): The variables (names) to be optimized.\n\n Examples:\n\n >>> import paddle.fluid as fluid\n >>> p, g = backward(...)\n >>> with program._optimized_guard([p,g]):\n >>> p = p - 0.001 * g\n \"\"\"\n tmp_role = self._current_role\n tmp_var = self.__op_role_var\n\n OpRole = core.op_proto_and_checker_maker.OpRole\n self._current_role = OpRole.Optimize\n self.__op_role_var = [\n var.name if isinstance(var, Variable) else var\n for var in param_and_grads\n ]\n try:\n yield\n finally:\n self.__op_role_var = tmp_var\n self._current_role = tmp_role\n\n @signature_safe_contextmanager\n def _lr_schedule_guard(self, is_with_opt=False):\n \"\"\"\n A with guard to set :code:`LRSched` :code:`OpRole` and\n :code:`OpRoleVar` automatically. The :code:`OpRoleVar` is\n set to the target learning rate.\n\n Notes: This is a very low level API. Users should not use it directly.\n\n Args:\n is_with_opt: Only set to true if these ops a in the middle\n of a bunch of optimize ops so that it can be treated\n correctly. For example, sgd->lr_op->sgd->lr_op->sgd.\n\n Examples:\n\n >>> import paddle.fluid as fluid\n >>> p, g = backward(...)\n >>> with program.lr_schedule_guard():\n >>> lr = lr * decay\n \"\"\"\n\n tmp_role = self._current_role\n tmp_var = self.__op_role_var\n\n OpRole = core.op_proto_and_checker_maker.OpRole\n self._current_role = OpRole.LRSched\n if is_with_opt:\n self._current_role = int(OpRole.LRSched) | int(OpRole.Optimize)\n # TODO(typhoonzero): how to set target learning rate var\n self.__op_role_var = []\n try:\n yield\n finally:\n self.__op_role_var = tmp_var\n self._current_role = tmp_role\n\n def __str__(self):\n \"\"\"\n Get the protobuf debug string of this Program.\n\n Returns:\n (str): The protobuf debug string.\n\n Raises:\n ValueError: If any of required fields is not set.\n \"\"\"\n return self._to_readable_code()\n\n def _to_readable_code(self, skip_op_callstack=True):\n \"\"\"\n Get readable debug string of Program.\n\n .. note::\n If you want to get the debug string in protobuf format,\n please use :code:`to_string` method.\n\n Args:\n skip_op_callstack(bool): whether to skip parsing Operator's attribute\n op_callstack, default value is True\n\n Returns:\n string: The formatted Program string.\n\n Examples:\n .. code-block:: python\n\n import paddle\n import paddle.static as static\n\n paddle.enable_static()\n\n cur_program = static.Program()\n cur_block = cur_program.current_block()\n new_var = cur_block.create_var(name=\"X\",\n shape=[-1, 23, 48],\n dtype='float32')\n new_op = cur_block.append_op(type=\"abs\",\n inputs={\"X\": [new_var]},\n outputs={\"Out\": [new_var]})\n print(cur_program._to_readable_code())\n \"\"\"\n assert isinstance(\n skip_op_callstack, bool\n ), \"skip_op_callstack parameter's type is error, expect bool, received %s\".format(\n type(skip_op_callstack))\n program_str = \"\"\n for block in self.blocks:\n program_str += block._to_readable_code(skip_op_callstack)\n program_str += '\\n'\n return program_str\n\n def to_string(self, throw_on_error, with_details=False):\n \"\"\"\n To debug string.\n\n Args:\n\n throw_on_error (bool): raise Value error when any of required fields is not set.\n\n with_details (bool): True if more details about variables and parameters, e.g., :code:`trainable`, :code:`optimize_attr`, need to print.\n\n Returns:\n str: The debug string describe current Program.\n\n Raises:\n ValueError: If any of required fields is not set and throw_on_error is True.\n\n Examples:\n .. code-block:: python\n\n import paddle\n import paddle.static as static\n\n paddle.enable_static()\n\n prog = static.default_main_program()\n x = static.data(name=\"X\", shape=[2,3], dtype=\"float32\")\n pred = static.nn.fc(x, size=3)\n prog_string = prog.to_string(throw_on_error=True, with_details=False)\n prog_string_with_details = prog.to_string(throw_on_error=False, with_details=True)\n print(\"program string without detail: {}\".format(prog_string))\n print(\"program string with detail: {}\".format(prog_string_with_details))\n \"\"\"\n assert isinstance(\n throw_on_error, bool\n ), \"The type of throw_on_error parameter is wrong, expected bool, but received {}.\".format(\n type(throw_on_error))\n assert isinstance(\n with_details, bool\n ), \"The type of with_details parameter is wrong, expected bool, but received {}.\".format(\n type(with_details))\n\n if with_details:\n res_str = \"\"\n for block in self.blocks:\n res_str += block.to_string(throw_on_error, with_details)\n else:\n protostr = self.desc.serialize_to_string()\n proto = framework_pb2.ProgramDesc.FromString(\n six.binary_type(protostr))\n res_str = _debug_string_(proto, throw_on_error)\n return res_str\n\n def _get_desc(self):\n \"\"\"\n Get the C++ side of `ProgramDesc` object pointer. The C++ object is\n exposed by :code:`pybind`.\n\n Notes: This is a very low level API. Users should not use this API\n directly.\n \"\"\"\n return self.desc\n\n def _version(self):\n return self.desc._version()\n\n def clone(self, for_test=False):\n \"\"\"\n .. note:::\n 1. :code:`Program.clone()` method DOES NOT clone :ref:`api_paddle_io_DataLoader` . \n 2. Recommend you to use :code:`clone` before using :code:`Opimizer.minimize` . \n 3. This API has no effect in Dygraph Mode.\n\n Create a new Program with forward content of original one when ``for_test=True``.\n Create a new Program as same as the original one when ``for_test=False``.\n\n Some operators, e.g., :ref:`api_paddle_fluid_layers_batch_norm` , behave differently between\n training and testing. They have an attribute, :code:`is_test`, to\n control this behaviour. This method will change the :code:`is_test`\n attribute of them to :code:`True` when :code:`for_test=True`.\n\n * Set for_test to False when you want to clone the program for training.\n * Set for_test to True when you want to clone the program for testing.\n We will prune the backward and optimize part of the program when you\n use :code:`clone` after :code:`Opimizer.minimize`, but we still\n recommend you to use :code:`clone` before using :code:`Opimizer.minimize`.\n\n For Example:\n ::\n\n import paddle\n import paddle.static as static\n\n paddle.enable_static()\n\n img = static.data(name='image', shape=[None, 784])\n pred = static.nn.fc(x=img, size=10, actvation='relu')\n loss = paddle.mean(pred)\n # Here we use clone before Momentum\n test_program = static.default_main_program().clone(for_test=True)\n optimizer = paddle.optimizer.Momentum(learning_rate=0.01, momentum=0.9)\n optimizer.minimize(loss)\n\n Args:\n\n for_test (bool): True if change the :code:`is_test` attribute of operators to :code:`True`\n and prune the backward and optimize part of the program. The default value is :code:`False` .\n\n Returns:\n Program: A new Program with forward content of original one when ``for_test=True``. A new Program as same as the original one when ``for_test=False``\n\n\n Examples:\n\n .. note::\n The Program's order maybe different after :code:`clone` and\n this will not affect your training or testing progress. In the following\n example we give you an simple method :code:`print_prog(program)` to\n print Program Descs inorder to make sure you have same print result\n after :code:`clone`:\n\n .. code-block:: python\n\n import six\n\n def print_prog(prog):\n for name, value in sorted(six.iteritems(prog.block(0).vars)):\n print(value)\n for op in prog.block(0).ops:\n print(\"op type is {}\".format(op.type))\n print(\"op inputs are {}\".format(op.input_arg_names))\n print(\"op outputs are {}\".format(op.output_arg_names))\n for key, value in sorted(six.iteritems(op.all_attrs())):\n if key not in ['op_callstack', 'op_role_var']:\n print(\" [ attrs: {}: {} ]\".format(key, value))\n\n\n 1. To clone a test program, the sample code is:\n .. code-block:: python\n\n import six\n import paddle\n import paddle.static as static\n import paddle.utils as utils\n import paddle.nn.functional as F\n\n paddle.enable_static()\n\n def print_prog(prog):\n for name, value in sorted(six.iteritems(prog.block(0).vars)):\n print(value)\n for op in prog.block(0).ops:\n print(\"op type is {}\".format(op.type))\n print(\"op inputs are {}\".format(op.input_arg_names))\n print(\"op outputs are {}\".format(op.output_arg_names))\n for key, value in sorted(six.iteritems(op.all_attrs())):\n if key not in ['op_callstack', 'op_role_var']:\n print(\" [ attrs: {}: {} ]\".format(key, value))\n\n train_program = static.Program()\n startup_program = static.Program()\n\n # startup_program is used to do some parameter init work,\n # and main program is used to hold the network\n with static.program_guard(train_program, startup_program):\n with utils.unique_name.guard():\n img = static.data(name='image', shape=[None, 784])\n hidden = static.nn.fc(x=img, size=200, activation='relu')\n hidden = F.dropout(hidden, p=0.5)\n loss = F.cross_entropy(\n input=static.nn.fc(x=hidden, size=10, activation='softmax'),\n label=static.data(name='label', shape=[1], dtype='int64'))\n avg_loss = paddle.mean(loss)\n test_program = train_program.clone(for_test=True)\n print_prog(test_program)\n\n # Due to parameter sharing usage for train and test, so we need to use startup program of train\n # instead of using test startup program, while nothing is in test's startup program\n\n # In Paddle we will share weights by using the same Tensor name. In train and test program\n # all parameters will have the same name and this can make train and test program sharing parameters,\n # that's why we need to use startup program of train. And for startup program of test, it has nothing,\n # since it is a new program.\n\n with static.program_guard(train_program, startup_program):\n with utils.unique_name.guard():\n sgd = paddle.optimizer.SGD(learning_rate=1e-3)\n sgd.minimize(avg_loss)\n\n\n 2. The clone method can be avoid if you create program for training and program for testing individually.\n .. code-block:: python\n\n import six\n import paddle\n import paddle.static as static\n import paddle.utils as utils\n import paddle.nn.functional as F\n\n paddle.enable_static()\n\n def print_prog(prog):\n for name, value in sorted(six.iteritems(prog.block(0).vars)):\n print(value)\n for op in prog.block(0).ops:\n print(\"op type is {}\".format(op.type))\n print(\"op inputs are {}\".format(op.input_arg_names))\n print(\"op outputs are {}\".format(op.output_arg_names))\n for key, value in sorted(six.iteritems(op.all_attrs())):\n if key not in ['op_callstack', 'op_role_var']:\n print(\" [ attrs: {}: {} ]\".format(key, value))\n\n def network():\n img = static.data(name='image', shape=[None, 784])\n hidden = static.nn.fc(x=img, size=200, activation='relu')\n hidden = F.dropout(hidden, p=0.5)\n loss = F.cross_entropy(\n input=static.nn.fc(x=hidden, size=10, activation='softmax'),\n label=static.data(name='label', shape=[1], dtype='int64'))\n avg_loss = paddle.mean(loss)\n return avg_loss\n\n train_program_2 = static.Program()\n startup_program_2 = static.Program()\n test_program_2 = static.Program()\n with static.program_guard(train_program_2, startup_program_2):\n with utils.unique_name.guard():\n avg_loss = network()\n sgd = paddle.optimizer.SGD(learning_rate=1e-3)\n sgd.minimize(avg_loss)\n # the test startup program is not used.\n with static.program_guard(test_program_2, startup_program_2):\n with utils.unique_name.guard():\n avg_loss = network()\n print_prog(test_program_2)\n\n The two code snippets above will generate and print same programs.\n \"\"\"\n\n # NOTE(zhiqiu): we sync the original program first, since its program may diff with\n # its desc due to modifying desc in c++ space. E.g. save op will add kLookupTablePath in desc.\n self._sync_with_cpp()\n\n pruned_origin_block_id_map = None\n if for_test:\n forward_prog = Program()\n forward_prog.desc, pruned_origin_block_id_map = core.prune_backward(\n self.desc)\n forward_prog.blocks = [\n Block(forward_prog, i)\n for i in six.moves.range(forward_prog.desc.num_blocks())\n ]\n forward_prog._sync_with_cpp()\n p = forward_prog._inference_optimize(prune_read_op=False)\n else:\n p = Program()\n p.current_block_idx = self.current_block_idx\n p._seed = self._seed\n p.desc = core.ProgramDesc(self.desc)\n p.blocks = [\n Block(p, i) for i in six.moves.range(self.desc.num_blocks())\n ]\n\n p._current_role = self._current_role\n p.__op_role_var = self.__op_role_var\n p._appending_grad_times = self._appending_grad_times\n if hasattr(self, 'lr_sheduler'):\n p.lr_sheduler = self.lr_sheduler\n\n # NOTE(zhiqiu): we sync the cloned program, to update its program by\n # its desc.\n p._sync_with_cpp()\n\n p._copy_param_info_from(self)\n p._copy_data_info_from(self, pruned_origin_block_id_map)\n p._copy_dist_param_info_from(self)\n return p\n\n def _prune(self, targets):\n \"\"\"\n Prune operators and variables which are not needed to generate\n :code:`targets`.\n\n Notes: This is a very low level API. Users should not use this API\n directly. This API is in flux and not stable.\n\n Args:\n targets(list|Variable|Operator): A list of variables, operators, or variable names\n need to be pruned\n\n Returns:\n Program: A new, pruned program.\n \"\"\"\n return self._prune_with_input([], targets)\n\n def _prune_with_input(self, feeded_var_names, targets):\n \"\"\"\n Prune operators and variables which are not needed to generate\n :code:`targets`. Prune operators and variables which are needed \n to generate feeded_var \n\n Notes: This is a very low level API. Users should not use this API\n directly. This API is in flux and not stable.\n\n Args:\n feeded_var_names(list|str): A list of variable names from where\n pruning start. If it is set as [], this API works just like _prune()\n targets(list|Variable|Operator): A list of variables, operators, or variable names\n need to be pruned\n\n Returns:\n Program: A new, pruned program.\n \"\"\"\n\n # NOTE(zhiqiu): we sync the original program first, since its program may diff with\n # its desc due to modifying desc in c++ space. E.g. save op will add kLookupTablePath in desc.\n self._sync_with_cpp()\n\n if not isinstance(feeded_var_names, list):\n feeded_var_names = [feeded_var_names]\n if not isinstance(targets, list):\n targets = [targets]\n\n for var in feeded_var_names:\n if not isinstance(var, six.string_types):\n raise ValueError(\n \"All feeded_var_names of Program._prune_with_input() can only be \"\n \"str, but received %s.\" % type(var))\n\n targets_idx = []\n for t in targets:\n if not isinstance(t, Operator):\n if isinstance(t, Variable):\n name = t.name\n elif isinstance(t, six.string_types):\n name = str(t)\n else:\n raise ValueError(\n \"All targets of Program._prune_with_input() can only be \"\n \"Variable or Operator, but received %s.\" % type(t))\n\n # NOTEZ(zhiqiu): For variable to be fed in fetch_list, there two cases:\n # (1) the variable is leaf, it has no op that generates it;\n # (2) the variable is not leaf, and we need to prune the op that generates it.\n # In both cases, wo can just skip target_op of that it.\n if name in feeded_var_names:\n continue\n\n # After transpiler processing, the op that output this\n # variable maybe has been changed, so t.op is not reliable\n # and we need to find the current op that generate this\n # variable here.\n target_op = None\n global_block = self.global_block()\n for idx, op in enumerate(global_block.ops):\n if name in op.output_arg_names:\n # NOTE(zhiqiu): Find op that generate target name.\n # Skip optimize op except for optimize op in targets,\n # since optimize op generates parameters.\n if op._is_optimize_op() and op not in targets:\n continue\n else:\n target_op = op\n break\n if target_op is None:\n raise ValueError(\n \"The target variable used for pruning should have an \"\n \"associated operator that generates it.\")\n else:\n targets_idx.append([target_op.block.idx, target_op.idx])\n else:\n targets_idx.append([t.block.idx, t.idx])\n\n res = Program()\n res.desc, pruned_origin_block_id_map = core.prune(self.desc,\n set(feeded_var_names),\n targets_idx)\n res.blocks = [\n Block(res, i) for i in six.moves.range(res.desc.num_blocks())\n ]\n res._sync_with_cpp()\n\n res._copy_param_info_from(self)\n res._copy_data_info_from(self, pruned_origin_block_id_map)\n res._copy_dist_param_info_from(self)\n\n return res\n\n def _inference_optimize(self, prune_read_op=True):\n \"\"\"\n This method will create a new program and do following adjustments on it:\n 1. Remove all reader variables and their creator ops if exist.\n\n 2. Remove the :code:`read_op` if exists.\n\n 3. change the :code:`is_test`\n attribute of operators to :code:`True`. All the :code:`Parameter`\n information will be lost.\n\n Args:\n prune_read_op(bool): remove the read ops that are added by py_reader\n for cpp inference library\n\n Notes: This API is a very low level API. Use\n :code:`Program.clone(for_test=True)` instead.\n\n Returns:\n Program: The new program.\n \"\"\"\n res = Program()\n res.desc = core.ProgramDesc(self.desc)\n\n # remove all readers and the read_op if exist\n read_op_idx = 0\n root_block = res.desc.block(0)\n if prune_read_op:\n while True:\n if read_op_idx >= root_block.op_size() or root_block.op(\n read_op_idx).type() == 'read':\n break\n read_op_idx += 1\n if read_op_idx < root_block.op_size():\n root_block._remove_op(0, read_op_idx + 1)\n for var in root_block.all_vars():\n if var.type() == core.VarDesc.VarType.READER:\n root_block._remove_var(cpt.to_bytes(var.name()))\n\n # change all `is_test` attributes to True\n for i in six.moves.range(res.desc.num_blocks()):\n block = res.desc.block(i)\n for j in six.moves.range(block.op_size()):\n op = block.op(j)\n if op.has_attr('is_test'):\n op._set_attr('is_test', True)\n res.blocks = [\n Block(res, i) for i in six.moves.range(res.desc.num_blocks())\n ]\n res._sync_with_cpp()\n return res\n\n @staticmethod\n def parse_from_string(binary_str):\n \"\"\"\n .. note::\n 1. All information about parameters will be lost after serialization; \n 2. This API has no effect in Dygraph mode.\n\n Deserialize a Program from `protobuf <https://en.wikipedia.org/wiki/Protocol_Buffers>`_ binary string.\n This method always use to save and load model\n\n Args:\n\n binary_str_type (str): the binary prootbuf string.\n\n Returns:\n Program: A deserialized Program.\n\n Examples:\n .. code-block:: python\n\n import paddle\n import paddle.static as static\n\n paddle.enable_static()\n\n startup_prog = static.Program()\n main_prog = static.Program()\n with static.program_guard(startup_prog, main_prog):\n x = static.data(name='X', shape=[1000, 784], dtype='float32')\n\n y = static.data(name='Y', shape=[784, 100], dtype='float32')\n\n z = paddle.matmul(x=x, y=y)\n\n binary_str = static.default_main_program().desc.serialize_to_string()\n prog_restored = static.default_main_program().parse_from_string(binary_str)\n\n print(static.default_main_program())\n print(prog_restored)\n \"\"\"\n p = Program()\n p.desc = core.ProgramDesc(binary_str)\n p.blocks = [Block(p, i) for i in six.moves.range(p.desc.num_blocks())]\n p._sync_with_cpp()\n return p\n\n @staticmethod\n def _construct_from_desc(desc):\n \"\"\"\n Construct a program from program desc.\n\n Args:\n desc(core.ProgramDesc): The program desc for constructing.\n\n Returns:\n Program: A program.\n \"\"\"\n p = Program()\n p.desc = desc\n p.blocks = [Block(p, i) for i in six.moves.range(p.desc.num_blocks())]\n p._sync_with_cpp()\n return p\n\n @property\n def random_seed(self):\n \"\"\"\n The default random seed for random operators in Program. ``0`` means get\n the random seed from random device.\n\n .. note:: \n It must be set before the operators have been added.\n\n Returns:\n int64: Random seed in current Program\n\n\n Examples:\n .. code-block:: python\n\n import paddle\n import paddle.static as static\n import paddle.nn.functional as F\n\n paddle.enable_static()\n\n prog = static.default_main_program()\n random_seed = prog.random_seed\n x_var = static.data(name=\"X\", shape=[3,3], dtype=\"float32\")\n print(random_seed)\n ## 0\n ## the default random seed is 0\n\n # Here we need to set random seed before we use paddle.nn.functional.dropout\n prog.random_seed = 1\n z_var = F.dropout(x_var, 0.7)\n\n print(prog.random_seed)\n ## 1\n ## the random seed is change to 1\n \"\"\"\n return self._seed\n\n @property\n def num_blocks(self):\n \"\"\"\n The number of :ref:`api_guide_Block_en` in this Program.\n\n .. note:: \n This API has no effect in Dygraph mode.\n\n Returns:\n int(Platform-dependent size): num of :ref:`api_guide_Block_en` in current Program\n\n\n Examples:\n .. code-block:: python\n\n import paddle\n import paddle.static as static\n\n paddle.enable_static()\n\n prog = static.default_main_program()\n num_blocks = prog.num_blocks\n print(num_blocks)\n\n # print result:\n # 1\n \"\"\"\n return self.desc.num_blocks()\n\n @random_seed.setter\n def random_seed(self, seed):\n if not isinstance(seed, int):\n raise ValueError(\n \"Program.random_seed's input seed must be an integer, but received %s.\"\n % type(seed))\n self._seed = seed\n\n def __repr__(self):\n return self.__str__()\n\n def global_block(self):\n \"\"\"\n .. note::\n This API has no effect in Dygraph mode.\n\n Get the first :ref:`api_guide_Block_en` of this Program.\n\n Returns:\n :ref:`api_guide_Block_en`: The first :ref:`api_guide_Block_en` of this Program.\n\n\n Examples:\n .. code-block:: python\n\n import paddle\n import paddle.static as static\n\n paddle.enable_static()\n\n prog = static.default_main_program()\n gb_block = prog.global_block()\n print(gb_block)\n\n \"\"\"\n return self.blocks[0]\n\n def block(self, index):\n \"\"\"\n .. note::\n This API has no effect in Dygraph mode.\n\n Get the :code:`index` :ref:`api_guide_Block_en` of this Program\n\n Args:\n index (int) - The index of :ref:`api_guide_Block_en` to get\n\n Returns:\n :ref:`api_guide_Block_en`: The :code:`index` block\n\n Examples:\n .. code-block:: python\n\n import paddle\n import paddle.static as static\n\n paddle.enable_static()\n\n prog = static.default_main_program()\n block_0 = prog.block(0)\n print(block_0)\n \"\"\"\n return self.blocks[index]\n\n def current_block(self):\n \"\"\"\n .. note::\n This API has no effect in Dygraph mode.\n\n Get the current :ref:`api_guide_Block_en` . The :code:`current` :ref:`api_guide_Block_en`\n is the :ref:`api_guide_Block_en` to append operators.\n\n Returns:\n :ref:`api_guide_Block_en`: The :code:`index` :ref:`api_guide_Block_en`\n\n Examples:\n .. code-block:: python\n\n import paddle\n import paddle.static as static\n\n paddle.enable_static()\n\n prog = static.default_main_program()\n current_blk = prog.current_block()\n print(current_blk)\n \"\"\"\n return self.blocks[self.current_block_idx]\n\n def _create_block(self, parent_idx=None):\n \"\"\"\n Create a new block with the :code:`parent_idx` and change the current block\n to new block.\n\n Args:\n\n parent_idx(int): The parent block index.\n\n Returns:\n Block: The new block.\n \"\"\"\n new_block_idx = len(self.blocks)\n parent = self.current_block() if parent_idx is None else self.block(\n parent_idx)\n self.desc.append_block(parent.desc)\n self.current_block_idx = new_block_idx\n self.blocks.append(Block(self, self.current_block_idx))\n return self.current_block()\n\n def _rollback(self):\n \"\"\"\n Exit a code block, i.e., roll back to the parent block.\n Returns:\n None\n \"\"\"\n self.current_block_idx = self.current_block().parent_idx\n\n def _sync_with_cpp(self):\n \"\"\"\n Synchronize Python instance to its binding C++ object instance.\n If the program is modified in C++ space, this method should be invoked.\n\n Notes: This is a very low level API. Users should not invoke it\n directly.\n\n Returns:\n None\n \"\"\"\n for block_idx in range(len(self.blocks), self.desc.num_blocks()):\n self.blocks.append(Block(self, block_idx))\n for block in self.blocks:\n block._sync_with_cpp()\n\n def _copy_param_info_from(self, other):\n \"\"\"\n Copy the information of parameters from other program.\n\n Notes: This is a very low level API. Users should not invoke it\n directly.\n\n Args:\n other(Program): Other program\n\n Returns:\n None\n \"\"\"\n if not isinstance(other, Program):\n raise TypeError(\n \"Function Program._copy_param_info_from() needs to pass in a source Program, but received %s\"\n % type(other))\n\n self.global_block()._copy_param_info_from(other.global_block())\n\n def _copy_dist_param_info_from(self, other):\n \"\"\"\n Copy the information of distributed information from other program.\n\n Args:\n other(Program): Other program\n\n Returns:\n None\n \"\"\"\n if not isinstance(other, Program):\n raise TypeError(\n \"Function Program._copy_param_info_from() needs to pass in a source Program, but received %s\"\n % type(other))\n self._is_distributed = other._is_distributed\n self._is_chief = other._is_chief\n self._parameters_on_pservers = other._parameters_on_pservers\n self._endpoints = other._endpoints\n self._ps_endpoint = other._ps_endpoint\n self._distributed_lookup_table = other._distributed_lookup_table\n\n def _copy_data_info_from(self, other, pruned_origin_block_id_map=None):\n \"\"\"\n Copy the information of data variables from other program.\n\n Notes: This is a very low level API. Users should not invoke it\n directly.\n\n Args:\n other(Program): Other program\n pruned_origin_block_id_map(dict{int:int}): A dict which maps the block id in program\n self to the block id in program other. For example, {0:0, 1:1, 2:3} means block 0 in self is \n cloned from block 0 in other, etc. Default is None, which means default mapped, \n {0:0, 1:1,..., n:n}.\n\n Returns:\n None\n \"\"\"\n if not isinstance(other, Program):\n raise TypeError(\n \"Function Program._copy_param_info_from() needs to pass in a source Program, but received %s\"\n % type(other))\n\n if not pruned_origin_block_id_map:\n pruned_origin_block_id_map = {\n i: i\n for i in six.moves.range(self.desc.num_blocks())\n }\n\n # NOTE(zhiqiu): All vars in cloned program exist in original program.\n # The reverse is not true, due to backward pruning.\n for i, block in enumerate(self.blocks):\n other_block = other.blocks[pruned_origin_block_id_map[i]]\n for var in list(block.vars.values()):\n other_var = other_block.var(var.name)\n if other_var.is_data:\n var.is_data = True\n if other_var.desc.need_check_feed():\n var.desc.set_need_check_feed(True)\n if other_var.stop_gradient:\n var.stop_gradient = True\n\n def list_vars(self):\n \"\"\"\n Get all Tensors from this Program. A iterable object is returned.\n\n Returns:\n iterable Tensors: The Generator will yield every Tensor in this program.\n\n Examples:\n .. code-block:: python\n\n import paddle\n import paddle.static as static\n\n paddle.enable_static()\n\n prog = static.default_main_program()\n img = static.data(name='img', shape=[None, 1,28,28], dtype='float32')\n label = static.data(name='label', shape=[None,1], dtype='int64')\n for var in prog.list_vars():\n print(var)\n\n # var img : paddle.VarType.LOD_TENSOR.shape(-1, 1, 28, 28).astype(VarType.FP32)\n # var label : paddle.VarType.LOD_TENSOR.shape(-1, 1).astype(VarType.INT64)\n \"\"\"\n for each_block in self.blocks:\n for each_var in list(each_block.vars.values()):\n yield each_var\n\n def all_parameters(self):\n \"\"\"\n Get all :ref:`api_guide_parameter_en` from this Program. A list object is returned.\n\n Returns:\n list[ :ref:`api_guide_parameter_en` ]: The list contians all parameters in this program.\n\n Examples:\n .. code-block:: python\n\n import paddle\n import paddle.static as static\n\n paddle.enable_static()\n\n program = static.default_main_program()\n data = static.data(name='x', shape=[None, 13], dtype='float32')\n hidden = static.nn.fc(x=data, size=10)\n loss = paddle.mean(hidden)\n paddle.optimizer.SGD(learning_rate=0.01).minimize(loss)\n\n for param in program.all_parameters():\n print(param)\n\n # Here will print all parameters in current program, in this example,\n # the result is like:\n #\n # persist trainable param fc_0.w_0 : paddle.VarType.LOD_TENSOR.shape(13, 10).astype(VarType.FP32)\n # persist trainable param fc_0.b_0 : paddle.VarType.LOD_TENSOR.shape(10,).astype(VarType.FP32)\n #\n # Here print(param) will print out all the properties of a parameter,\n # including name, type and persistable, you can access to specific\n # property of a parameter, such as param.name, param.type\n \"\"\"\n parameters = []\n for each_block in self.blocks:\n parameters.extend(each_block.all_parameters())\n return parameters\n\n\[email protected]_metaclass(ParameterMetaClass)\nclass Parameter(Variable):\n \"\"\"\n Parameter is derived from Variable. A parameter is a persistable\n Variable, and will be updated by optimizers after each iteration.\n The training of a neural network is essentially the updating of\n its parameters.\n\n Relative to a general Variable, a Parameter has several its own\n member variables:\n\n Args:\n trainable(bool): True if the parameter need to be updated after\n iterations.\n optimize_attr(map): Parameter attributes related with optimizing.\n Currently, it only contains 'learning_rate'.\n Default: {'learning_rate': 1.0}\n regularizer(WeightDecayRegularizer): The Regularizer which will\n be applied on the parameter. Default: None\n do_model_average(bool): True if the model average strategy will\n be applied on this parameter.\n need_clip (bool): Whether the parameter gradient need to be cliped \n in optimizer. Default is True.\n \"\"\"\n\n def __init__(self,\n block,\n shape,\n dtype,\n type=core.VarDesc.VarType.LOD_TENSOR,\n **kwargs):\n if shape is None:\n raise ValueError(\"The shape of Parameter should not be None\")\n if dtype is None:\n raise ValueError(\"The dtype of Parameter should not be None\")\n\n if len(shape) == 0:\n raise ValueError(\n \"The dimensions of shape for Parameter must be greater than 0\")\n\n for each in shape:\n if each < 0:\n raise ValueError(\n \"Each dimension of shape for Parameter must be greater than 0, but received %s\"\n % list(shape))\n\n Variable.__init__(\n self,\n block,\n persistable=True,\n shape=shape,\n dtype=dtype,\n type=type,\n **kwargs)\n self.trainable = kwargs.get('trainable', True)\n\n self.optimize_attr = kwargs.get('optimize_attr', {'learning_rate': 1.0})\n\n self.regularizer = kwargs.get('regularizer', None)\n\n self.do_model_average = kwargs.get('do_model_average', None)\n\n self.need_clip = kwargs.get('need_clip', True)\n\n self.is_distributed = False\n\n def __str__(self):\n return self._to_readable_code()\n\n def to_string(self, throw_on_error, with_details=False):\n \"\"\"\n To debug string.\n\n Args:\n throw_on_error(bool): raise exception when self is not initialized\n when throw_on_error is True\n with_details(bool): more details about variables and parameters\n (e.g. trainable, optimize_attr, ...) will be printed when with_details is True\n\n Returns(str): The debug string.\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n\n prog = fluid.default_main_program()\n rlt = fluid.layers.data(\"fake_data\", shape=[1,1], dtype='float32')\n debug_str = prog.to_string(throw_on_error=True, with_details=False)\n print(debug_str)\n \"\"\"\n assert isinstance(throw_on_error, bool) and isinstance(with_details,\n bool)\n if with_details:\n res_str = Variable.to_string(self, throw_on_error, True)\n additional_attr = (\"trainable\", \"optimize_attr\", \"regularizer\",\n \"do_model_average\", \"need_clip\")\n for attr_name in additional_attr:\n res_str += \"%s: %s\\n\" % (attr_name,\n cpt.to_text(getattr(self, attr_name)))\n else:\n res_str = Variable.to_string(self, throw_on_error, False)\n return res_str\n\n __repr__ = __str__\n\n\nclass ParamBase(core.VarBase):\n \"\"\"\n ParamBase is derived from Tensor( Which is the concept in Dygraph Mode). \n A ParamBase is a persistable Tensor, and will be updated by optimizers \n after each iteration.\n The training of a neural network is essentially the updating of\n its ParamBase.\n\n Relative to a general Tensor, a ParamBase has several its own\n member variables:\n\n Args:\n trainable(bool): True if the ParamBase need to be updated after\n iterations.\n optimize_attr(map): ParamBase attributes related with optimizing.\n Currently, it only contains 'learning_rate'.\n Default: {'learning_rate': 1.0}\n regularizer(WeightDecayRegularizer): The Regularizer which will\n be applied on the ParamBase. Default: None\n do_model_average(bool): True if the model average strategy will\n be applied on this ParamBase.\n need_clip (bool): Whether the parameter gradient need to be cliped \n in optimizer. Default is True.\n \"\"\"\n\n @dygraph_only\n def __init__(self, shape, dtype, **kwargs):\n if shape is None:\n raise ValueError(\"The shape of Parameter should not be None\")\n if dtype is None:\n raise ValueError(\"The dtype of Parameter should not be None\")\n\n if len(shape) == 0:\n raise ValueError(\n \"The dimensions of shape for Parameter must be greater than 0\")\n\n for each in shape:\n if each < 0:\n raise ValueError(\n \"Each dimension of shape for Parameter must be greater than 0, but received %s\"\n % list(shape))\n\n if dtype is not None:\n if not isinstance(dtype, core.VarDesc.VarType):\n dtype = convert_np_dtype_to_dtype_(dtype)\n\n name = kwargs.get('name', unique_name.generate('_param_base'))\n\n super(ParamBase, self).__init__(dtype\n if dtype else core.VarDesc.VarType.FP32,\n list(shape) if shape else [], name,\n core.VarDesc.VarType.LOD_TENSOR, True)\n\n trainable = kwargs.get('trainable', True)\n self.stop_gradient = not trainable\n\n self.optimize_attr = kwargs.get('optimize_attr', {'learning_rate': 1.0})\n\n self.regularizer = kwargs.get('regularizer', None)\n\n self.do_model_average = kwargs.get('do_model_average', None)\n\n self.need_clip = kwargs.get('need_clip', True)\n\n self.is_distributed = False\n # self.block = default_main_program().global_block()\n\n @property\n def trainable(self):\n return not self.stop_gradient\n\n @trainable.setter\n def trainable(self, trainable):\n if isinstance(trainable, bool):\n self.stop_gradient = not trainable\n else:\n raise ValueError(\n \"The type of trainable MUST be bool, but the type is \",\n type(trainable))\n\n def __str__(self):\n \"\"\"\n Convert a ParamBase object to a readable string.\n\n Returns(str): A readable string.\n\n Examples:\n .. code-block:: python\n\n import paddle\n linear = paddle.nn.Linear(3, 3)\n print(linear.weight)\n # Parameter containing:\n # Tensor(shape=[3, 3], dtype=float32, place=CUDAPlace(0), stop_gradient=False,\n # [[ 0.48948765, 0.05829060, -0.25524026],\n # [-0.70368278, 0.52986908, -0.68742192],\n # [-0.54217887, 0.48439729, 0.34082305]])\n \"\"\"\n return \"Parameter containing:\\n{tensor}\".format(\n tensor=super(ParamBase, self).__str__())\n\n def __deepcopy__(self, memo):\n \"\"\"\n Deep copy parameter, it will always performs Tensor copy.\n\n Examples:\n .. code-block:: python\n\n import paddle\n import copy\n linear = paddle.nn.Linear(1, 3)\n linear_copy = copy.deepcopy(linear)\n\n print(linear.weight)\n # Parameter containing:\n # Tensor(shape=[1, 3], dtype=float32, place=CPUPlace, stop_gradient=False,\n # [[-0.30929261, -0.90929240, -1.07851017]])\n\n print(linear_copy.weight)\n # Parameter containing:\n # Tensor(shape=[1, 3], dtype=float32, place=CPUPlace, stop_gradient=False,\n # [[-0.30929261, -0.90929240, -1.07851017]])\n\n \"\"\"\n state = copy.deepcopy(self.__dict__, memo)\n state[\"name\"] = self.name + unique_name.generate(\"_deepcopy\")\n new_param = ParamBase(self.shape, self.dtype, **state)\n memo[id(self)] = new_param\n new_param.copy_(self, True)\n return new_param\n\n __repr__ = __str__\n\n\n# program is a global instance.\n_main_program_ = Program()\n_startup_program_ = Program()\n\n\ndef default_startup_program():\n \"\"\"\n Get default/global startup program.\n\n The :code:`paddle.nn` function will append the initialization operators into startup program.\n The :code:`startup_program` will initialize the parameters by the OPs. \n\n This method will return the default or the current startup program. Users can use\n :ref:`api_paddle_fluid_framework_program_guard` to switch :ref:`api_paddle_fluid_framework_Program` .\n\n Returns:\n Program: current default startup program.\n\n Returns type: \n\n Examples:\n .. code-block:: python\n\n import paddle\n\n paddle.enable_static()\n x = paddle.static.data(name=\"x\", shape=[-1, 784], dtype='float32')\n out = paddle.static.nn.fc(name=\"fc\", x=x, size=10, activation=\"relu\")\n print(\"main program is: {}\".format(paddle.static.default_main_program()))\n print(\"start up program is: {}\".format(paddle.static.default_startup_program()))\n \"\"\"\n return _startup_program_\n\n\ndef default_main_program():\n \"\"\"\n This API can be used to get ``default main program`` which store the \n descriptions of Ops and tensors.\n\n For example ``z = paddle.add(x, y)`` will create a new ``add`` \n Op and a new ``z`` tensor, and they will be recorded in ``default main program`` . \n\n The ``default main program`` is the default value for ``Program`` parameter in \n a lot of APIs. For example, the :code:`Executor.run()` will execute the\n :code:`default_main_program` when the program is not specified.\n\n If you want to switch the ``default main program``, you can use :ref:`api_paddle_fluid_framework_program_guard` .\n\n Returns:\n Program: A ``Program`` which holding the descriptions of OPs and tensors in the network.\n\n Examples:\n .. code-block:: python\n\n import paddle\n\n paddle.enable_static()\n # Sample Network:\n x = paddle.static.data(name='x', shape=[100, 100], dtype='float32')\n y = paddle.static.data(name='x', shape=[100, 100], dtype='float32')\n out = paddle.add(x, y)\n\n #print the number of blocks in the program, 1 in this case\n print(paddle.static.default_main_program().num_blocks) # 1\n #print the default_main_program\n print(paddle.static.default_main_program())\n \"\"\"\n return _main_program_\n\n\ndef switch_main_program(program):\n \"\"\"\n Switch the main program to a new program.\n\n Args:\n program(Program): The new main program\n\n Returns:\n Program: The previous main program\n \"\"\"\n global _main_program_\n prev_program = _main_program_\n _main_program_ = program\n return prev_program\n\n\ndef switch_startup_program(program):\n \"\"\"\n Switch the startup program to a new program\n Args:\n program(Program): The new startup program\n\n Returns:\n Program: The previous startup program\n \"\"\"\n global _startup_program_\n prev_program = _startup_program_\n _startup_program_ = program\n return prev_program\n\n\n@signature_safe_contextmanager\ndef program_guard(main_program, startup_program=None):\n \"\"\"\n :api_attr: Static Graph\n\n Change the global main program and startup program with ``with`` statement.\n Layer functions in the Python ``with`` block will append operators and\n Tensors to the new main programs.\n\n Args:\n main_program(Program): New main program inside ``with`` statement.\n startup_program(Program, optional): New startup program inside ``with`` \n statement. :code:`None` means not changing startup program, \n default_startup_program is still used.\n Default: None.\n\n Examples:\n .. code-block:: python\n\n import paddle\n\n paddle.enable_static()\n main_program = paddle.static.Program()\n startup_program = paddle.static.Program()\n with paddle.static.program_guard(main_program, startup_program):\n data = paddle.static.data(name='image', shape=[None, 784, 784], dtype='float32')\n hidden = paddle.static.nn.fc(x=data, size=10, activation='relu')\n\n Notes: The temporary :code:`Program` can be used if the user does not need\n to construct either of startup program or main program.\n\n Examples:\n .. code-block:: python\n\n import paddle\n\n paddle.enable_static()\n main_program = paddle.static.Program()\n # does not care about startup program. Just pass a temporary value.\n with paddle.static.program_guard(main_program, paddle.static.Program()):\n data = paddle.static.data(name='image', shape=[None, 784, 784], dtype='float32')\n\n \"\"\"\n from .data_feeder import check_type\n check_type(main_program, 'main_program', Program,\n 'paddle.static.program_guard')\n main_program = switch_main_program(main_program)\n if startup_program is not None:\n check_type(startup_program, 'startup_program', Program,\n 'paddle.static.program_guard')\n startup_program = switch_startup_program(startup_program)\n try:\n yield\n finally:\n switch_main_program(main_program)\n if startup_program is not None:\n switch_startup_program(startup_program)\n\n\ndef _get_var(name, program=None):\n \"\"\"\n Get a variable by name from the global block of a program.\n\n Args:\n name(str): name of the variable\n program(Program|None): program object.\n If None, default_global_program() will be used.\n\n Returns:\n Variable\n \"\"\"\n if program is None:\n program = default_main_program()\n assert isinstance(name, str)\n assert isinstance(program, Program)\n\n return program.global_block().var(name)\n\n\n@signature_safe_contextmanager\ndef _dygraph_guard(tracer):\n global _dygraph_tracer_\n tmp_tracer = _dygraph_tracer_\n _dygraph_tracer_ = tracer\n core._switch_tracer(tracer)\n\n try:\n yield\n finally:\n core._switch_tracer(tmp_tracer)\n _dygraph_tracer_ = tmp_tracer\n\n\n@signature_safe_contextmanager\ndef _dygraph_place_guard(place):\n global _global_expected_place_\n tmp_place = _global_expected_place_\n _global_expected_place_ = place\n\n _set_dygraph_tracer_expected_place(place)\n\n try:\n yield\n finally:\n _global_expected_place_ = tmp_place\n _set_dygraph_tracer_expected_place(tmp_place)\n\n\ndef load_op_library(lib_filename):\n \"\"\"\n :api_attr: Static Graph\n\n Load a dynamic library, including custom operators and kernels.\n When library is loaded, ops and kernels registered in the library\n will be available in PaddlePaddle main process.\n Please note, the type of custom operators can't have the same type\n with the existing operators in the framework.\n\n Args:\n lib_filename (str): name of dynamic library.\n \n Returns:\n list[str]: new registered custom op names.\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n #fluid.load_op_library('custom_op.so')\n\n \"\"\"\n core.load_op_library(lib_filename)\n return OpProtoHolder.instance().update_op_proto()\n\n\ndef switch_device(device):\n global _current_device\n pre_device = _current_device\n _current_device = device\n return pre_device\n\n\n@signature_safe_contextmanager\ndef device_guard(device=None):\n \"\"\"\n **Notes**:\n **The API only supports static mode.**\n\n A context manager that specifies the device on which the OP will be placed.\n\n Args:\n device(str|None): Specify the device to use in the context. It should be 'cpu' or 'gpu',\n When it is set to 'cpu' or 'gpu', all OPs created in the context will be\n placed on CPUPlace or CUDAPlace. When 'gpu' is set and the program runs on\n single-card, the device index will be the same as the device on which the\n executor runs. Default: None, OPs in this context will be automatically\n assigned devices.\n\n Examples:\n .. code-block:: python\n\n import paddle\n\n paddle.enable_static()\n support_gpu = paddle.is_compiled_with_cuda()\n place = paddle.CPUPlace()\n if support_gpu:\n place = paddle.CUDAPlace(0)\n\n # if GPU is supported, the three OPs below will be automatically assigned to CUDAPlace(0)\n data1 = paddle.full(shape=[1, 3, 8, 8], fill_value=0.5, dtype='float32')\n data2 = paddle.full(shape=[1, 3, 64], fill_value=0.5, dtype='float32')\n shape = paddle.shape(data2)\n\n with paddle.static.device_guard(\"cpu\"):\n # Ops created here will be placed on CPUPlace\n shape = paddle.slice(shape, axes=[0], starts=[0], ends=[4])\n with paddle.static.device_guard('gpu'):\n # if GPU is supported, OPs created here will be placed on CUDAPlace(0), otherwise on CPUPlace\n out = paddle.reshape(data1, shape=shape)\n\n exe = paddle.static.Executor(place)\n exe.run(paddle.static.default_startup_program())\n result = exe.run(fetch_list=[out])\n \"\"\"\n\n index = None\n if device and ':' in device:\n device, index = device.split(':')\n if device == 'cpu':\n raise ValueError(\"Should not set device id for cpu.\")\n if device not in ['cpu', 'gpu', '', None]:\n raise ValueError(\n \"The Attr(device) should be 'cpu' or 'gpu', and it can also be empty string or None \"\n \"when there is no need to specify device. But received %s\" % device)\n if index:\n device = \":\".join([device, index])\n pre_device = switch_device(device)\n try:\n yield\n finally:\n switch_device(pre_device)\n\n\ndef set_flags(flags):\n \"\"\"\n This function sets the GFlags value in Paddle.\n\n Args:\n flags (dict): A dict contains flags and its value.\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n fluid.set_flags({'FLAGS_eager_delete_tensor_gb': 1.0})\n \"\"\"\n if not isinstance(flags, dict):\n raise TypeError('flags in set_flags should be a dict')\n for key, value in flags.items():\n if core.globals().is_public(key):\n core.globals()[key] = value\n else:\n raise ValueError(\n \"Flag %s cannot set its value through this function.\" % (key))\n\n\ndef get_flags(flags):\n \"\"\"\n This function gets the GFlags value in Paddle.\n\n Args:\n flags(list|tuple|str): A list/tuple of string or a string which is the flag's name.\n\n Returns:\n flag's value in Paddle.\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n\n flags = ['FLAGS_eager_delete_tensor_gb', 'FLAGS_check_nan_inf']\n res = fluid.get_flags(flags)\n print(res)\n # {'FLAGS_eager_delete_tensor_gb': 0.0, 'FLAGS_check_nan_inf': False}\n \"\"\"\n flags_value = {}\n if isinstance(flags, (list, tuple)):\n for key in flags:\n if (core.globals().is_public(key)):\n value = core.globals()[key]\n temp = {key: value}\n flags_value.update(temp)\n else:\n raise ValueError(\n 'Flag %s cannot get its value through this function.' %\n (key))\n elif isinstance(flags, str):\n if (core.globals().is_public(flags)):\n value = core.globals()[flags]\n temp = {flags: value}\n flags_value.update(temp)\n else:\n raise ValueError(\n 'Flag %s cannot get its value through this function.' % (flags))\n else:\n raise TypeError('Flags in get_flags should be a list, tuple or string.')\n return flags_value\n\n\ndef _get_paddle_place(place):\n \"convert the string to paddle Place\"\n if place is None:\n return place\n if isinstance(place, (core.Place, core.XPUPlace, core.CPUPlace,\n core.CUDAPinnedPlace, core.CUDAPlace)):\n return place\n\n if not isinstance(place, str):\n raise ValueError(\n \"place only support string which is 'Place' and so on.\")\n\n place = place.lower()\n if (place == \"cpu\"):\n return core.CPUPlace()\n if (place == \"device\"):\n return core.Place()\n\n avaliable_gpu_place = re.match(r'gpu:\\d+', place)\n if place == \"gpu_pinned\" or place == \"gpu\" or avaliable_gpu_place:\n if not core.is_compiled_with_cuda():\n raise ValueError(\n \"The device should not be {}, since PaddlePaddle is \" \\\n \"not compiled with CUDA\".format(avaliable_gpu_place))\n if place == \"gpu_pinned\":\n return core.CUDAPinnedPlace()\n elif place == \"gpu\":\n return core.CUDAPlace(0)\n else:\n place_info_list = place.split(':', 1)\n device_id = place_info_list[1]\n device_id = int(device_id)\n return core.CUDAPlace(device_id)\n avaliable_xpu_place = re.match(r'xpu:\\d+', place)\n if avaliable_xpu_place:\n if not core.is_compiled_with_xpu():\n raise ValueError(\n \"The device should not be {}, since PaddlePaddle is \" \\\n \"not compiled with XPU\".format(avaliable_xpu_place))\n place_info_list = place.split(':', 1)\n device_id = place_info_list[1]\n device_id = int(device_id)\n return core.XPUPlace(device_id)\n raise ValueError(\n \"paddle support CPUPlace, CUDAPlace,CUDAPinnedPlace and XPUPlace, Please check your Place Input\"\n )\n\n\ndef _get_paddle_place_list(places):\n\n if not isinstance(places, (list, tuple)):\n raise TypeError(\"places must to be List or Tuple\")\n\n ret = []\n for p in places:\n p = _get_paddle_place(p)\n ret.append(p)\n\n return ret\n" ]
[ [ "numpy.array", "numpy.dtype" ] ]
tliu68/maggot_connectome
[ "ef4bbd2011fa9e03da187fcca8c8c1ca79209a36" ]
[ "pkg/pkg/graph/graph.py" ]
[ "from copy import deepcopy\n\nimport networkx as nx\nimport numpy as np\nimport pandas as pd\n\nfrom ..utils import get_paired_inds, to_pandas_edgelist\n\n\nclass MaggotGraph:\n def __init__(self, g, nodes=None, edges=None):\n self.g = g\n # TODO add checks for when nodes/edges are passed, do they actually match the\n # graph?\n if nodes is None:\n # TODO\n raise NotImplementedError()\n self.nodes = nodes\n if edges is None:\n edges = to_pandas_edgelist(g)\n self.edges = edges\n self._node_columns = nodes.columns\n self._single_type = False\n if edges[\"edge_type\"].nunique() == 1:\n self._single_type = True\n\n def to_edge_type_graph(self, edge_type):\n type_edges = self.edges[self.edges[\"edge_type\"] == edge_type]\n view = nx.edge_subgraph(self.g, type_edges.index)\n return MaggotGraph(view, self.nodes, type_edges)\n\n @property\n def edge_types(self):\n return sorted(self.edges[\"edge_type\"].unique())\n\n @property\n def aa(self):\n return self.to_edge_type_graph(\"aa\")\n\n @property\n def ad(self):\n return self.to_edge_type_graph(\"ad\")\n\n @property\n def da(self):\n return self.to_edge_type_graph(\"da\")\n\n @property\n def dd(self):\n return self.to_edge_type_graph(\"dd\")\n\n @property\n def sum(self):\n return self.to_edge_type_graph(\"sum\")\n\n @property\n def adj(self, edge_type=None):\n if self._single_type:\n adj = nx.to_numpy_array(self.g, nodelist=self.nodes.index)\n return adj\n elif edge_type is not None:\n etg = self.to_edge_type_graph(edge_type)\n return etg.adj()\n else:\n msg = \"Current MaggotGraph has more than one edge type. \"\n msg += \"Use .adjs() method instead to specify multple edge types.\"\n raise ValueError(msg)\n\n def node_subgraph(self, source_node_ids, target_node_ids=None):\n # if target_node_ids is None: # induced subgraph on source nodes\n # # TODO don't really need two cases here\n # sub_g = self.g.subgraph(source_node_ids)\n # sub_nodes = self.nodes.reindex(source_node_ids)\n # sub_edges = to_pandas_edgelist(sub_g)\n # return MaggotGraph(sub_g, sub_nodes, sub_edges)\n # else: # subgraph defined on a set of nodes, but not necessarily induced\n induced = False\n if target_node_ids is None:\n target_node_ids = source_node_ids\n induced = True\n edges = self.edges\n nodes = self.nodes\n source_edges = edges[edges.source.isin(source_node_ids)]\n source_target_edges = source_edges[source_edges.target.isin(target_node_ids)]\n sub_g = self.g.edge_subgraph(source_target_edges.index)\n sub_nodes = nodes[\n nodes.index.isin(source_node_ids) | nodes.index.isin(target_node_ids)\n ]\n if induced:\n sub_nodes = sub_nodes.reindex(source_node_ids)\n # TODO what ordering makes sense when the subgraph is not induced\n return MaggotGraph(sub_g, sub_nodes, source_target_edges)\n\n def copy(self):\n return deepcopy(self)\n\n def __len__(self):\n return len(self.g)\n\n def __repr__(self):\n return self.summary_statistics.__repr__()\n\n def _repr_html_(self):\n return self.summary_statistics._repr_html_()\n\n @property\n def summary_statistics(self):\n edge_types = self.edge_types\n edges = self.edges\n cols = []\n for edge_type in edge_types:\n type_edges = edges[edges[\"edge_type\"] == edge_type]\n # number of actual nodes being used (ignoring edgeless ones)\n n_nodes = len(np.unique(type_edges[[\"source\", \"target\"]].values.ravel()))\n n_edges = len(type_edges)\n edgesum = type_edges[\"weight\"].sum()\n data = [n_nodes, n_edges, edgesum]\n index = [\"n_nodes\", \"n_edges\", \"sum_edge_weights\"]\n cols.append(pd.Series(index=index, data=data, name=edge_type))\n results = pd.DataFrame(cols)\n results.index.name = \"edge_type\"\n return results\n\n def __getitem__(self, key):\n if isinstance(key, pd.Series) and key.dtype == bool:\n return self.node_subgraph(key[key].index)\n\n def __setitem__(self, key, val):\n self.nodes[key] = val\n\n def bisect(\n self,\n paired=False,\n lcc=False,\n check_in=True,\n pair_key=\"pair\",\n pair_id_key=\"pair_id\",\n ):\n \"\"\"[summary]\n\n Parameters\n ----------\n paired : bool, optional\n If ``paired``, return subgraphs only for paired neurons and indexed the same\n for left and right. Otherwise, return subgraphs in any order, and for all\n left/right neurons.\n\n Raises\n ------\n NotImplementedError\n [description]\n \"\"\"\n nodes = self.nodes\n if paired:\n lp_inds, rp_inds = get_paired_inds(\n nodes,\n check_in=check_in,\n pair_key=pair_key,\n pair_id_key=pair_id_key,\n )\n left_ids = nodes.iloc[lp_inds].index\n right_ids = nodes.iloc[rp_inds].index\n else:\n left_ids = nodes[nodes[\"hemisphere\"] == \"L\"].index\n right_ids = nodes[nodes[\"hemisphere\"] == \"R\"].index\n left_left_mg = self.node_subgraph(left_ids)\n right_right_mg = self.node_subgraph(right_ids)\n left_right_mg = self.node_subgraph(left_ids, right_ids)\n left_left_mg = self.node_subgraph(left_ids)\n right_left_mg = self.node_subgraph(right_ids, left_ids)\n\n if lcc:\n raise NotImplementedError()\n # TODO add something about checking for largest connected components here as\n # an option\n\n return left_left_mg, right_right_mg, left_right_mg, right_left_mg\n\n def fix_pairs(self, pair_key=\"pair\", pair_id_key=\"pair_id\"):\n nodes = self.nodes\n for node_id, row in nodes.iterrows():\n pair = row[pair_key]\n if pair != -1:\n if pair not in nodes.index:\n row[pair_key] = -1\n row[pair_id_key] = -1\n print(f\"Removing invalid pair: {node_id} to {pair}\")\n\n def to_largest_connected_component(self):\n raise NotImplementedError()\n" ]
[ [ "pandas.DataFrame", "pandas.Series" ] ]
kilianovski/bootcamp
[ "8f3a753592ecb931815fde068f6377485e3fbe79" ]
[ "solutions/video_similarity_search/object_detection/server/src/encode_resnet50.py" ]
[ "import numpy as np\nfrom tensorflow.keras.applications.resnet50 import ResNet50\nfrom tensorflow.keras.applications.resnet50 import preprocess_input as preprocess_input_resnet50\nfrom tensorflow.keras.preprocessing import image\nfrom numpy import linalg as LA\n\nclass CustomOperator:\n \"\"\"\n Say something about the ExampleCalass...\n\n Args:\n args_0 (`type`):\n ...\n \"\"\"\n def __init__(self):\n self.input_shape = (224, 224, 3)\n self.weight = 'imagenet'\n self.pooling = 'max'\n self.model_resnet50 = ResNet50(weights='imagenet',\n input_shape=(self.input_shape[0], self.input_shape[1], self.input_shape[2]),\n pooling=self.pooling, include_top=False)\n self.model_resnet50.predict(np.zeros((1, 224, 224, 3)))\n\n def execute(self, img_path):\n # Return the embedding([[list]]) of the images\n img = image.load_img(img_path, target_size=(self.input_shape[0], self.input_shape[1]))\n img = image.img_to_array(img)\n img = np.expand_dims(img, axis=0)\n img = preprocess_input_resnet50(img)\n feat = self.model_resnet50.predict(img)\n norm_feat = feat[0] / LA.norm(feat[0])\n return norm_feat.tolist()\n" ]
[ [ "numpy.linalg.norm", "tensorflow.keras.preprocessing.image.load_img", "numpy.zeros", "tensorflow.keras.applications.resnet50.preprocess_input", "tensorflow.keras.applications.resnet50.ResNet50", "tensorflow.keras.preprocessing.image.img_to_array", "numpy.expand_dims" ] ]
nishant34/RotSolver
[ "d50def173eed2ebc782d51942303ce5d91031f42" ]
[ "rotation_dataloader.py" ]
[ "import numpy as np\r\nimport os\r\n\r\n\r\nclass relative_camera_poses_data:\r\n \r\n \"\"\"\r\n Class to load relative rotations data for differentiable rotation averaging.\r\n THe data format should be as follows-->\r\n Root_dir\r\n | -rotations.npy --> relative rotations pairwise.\r\n | -translations.npy --> relative translations pair wise.\r\n \"\"\"\r\n\r\n def __init__(self, data_dir):\r\n self.data_dir = data_dir\r\n self.rot_file_path = os.path.join(data_dir, \"rotations.npy\")\r\n self.trans_file_path = os.path.join(data_dir, \"translations.npy\")\r\n \r\n #extracting data\r\n self.relative_rotations = np.load(self.rot_file_path)\r\n self.relative_translations = np.load(self.trans_file_path)\r\n #reshaping from (num_images,3,3) to (num_images,9)\r\n self.relative_rotations = np.reshape(self.relative_rotations, (-1,9))\r\n \r\n assert self.relative_rotations.shape[0] == self.relative_translations.shape[0], \"Inconsistent data\"\r\n\r\n \r\n def _get_rot_(self, index):\r\n return self.relative_rotations[index]\r\n\r\n \r\n def _get_trans_(self, index):\r\n return self.relative_translations[index]\r\n \r\n\r\n def _get_all_rots_(self):\r\n return self.relative_rotations\r\n \r\n def _get_all_trans_(self):\r\n return self.relative_translations\r\n \r\n def _get_num_features_(self):\r\n return self.relative_rotations.shape[-1]\r\n \r\n def _len_(self):\r\n return self.relative_rotations.shape[0]\r\n \r\n\r\n\r\n \r\n" ]
[ [ "numpy.load", "numpy.reshape" ] ]
Engineero/tensorflow
[ "402d28705e426fea7aad6bbbe405a11daa6b6cd5" ]
[ "tensorflow/lite/python/util.py" ]
[ "# Lint as: python2, python3\n# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Functions used by multiple converter files.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport copy\nimport datetime\nimport sys\n\nfrom absl import logging\nimport six\nfrom six.moves import range\n\nimport flatbuffers\nfrom tensorflow.core.protobuf import config_pb2 as _config_pb2\nfrom tensorflow.core.protobuf import graph_debug_info_pb2\nfrom tensorflow.core.protobuf import meta_graph_pb2 as _meta_graph_pb2\nfrom tensorflow.lite.python import schema_py_generated as schema_fb\nfrom tensorflow.lite.python import schema_util\nfrom tensorflow.lite.python import tflite_keras_util as _tflite_keras_util\nfrom tensorflow.lite.python.op_hint import convert_op_hints_to_stubs\nfrom tensorflow.lite.python.op_hint import find_all_hinted_output_nodes\nfrom tensorflow.lite.toco import types_pb2 as _types_pb2\nfrom tensorflow.python.eager import function\nfrom tensorflow.python.framework import convert_to_constants as _convert_to_constants\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import error_interpolation as _error_interpolation\nfrom tensorflow.python.framework import graph_util as tf_graph_util\nfrom tensorflow.python.grappler import tf_optimizer\nfrom tensorflow.python.training.saver import export_meta_graph as _export_meta_graph\n\n# Keras functions used by TFLite\nmodel_input_signature = _tflite_keras_util.model_input_signature\ntrace_model_call = _tflite_keras_util.trace_model_call\n\n# Map of tf.dtypes to TFLite types_flag_pb2.\n_MAP_TF_TO_TFLITE_TYPES = {\n dtypes.float32: _types_pb2.FLOAT,\n dtypes.float16: _types_pb2.FLOAT16,\n dtypes.int32: _types_pb2.INT32,\n dtypes.uint8: _types_pb2.QUANTIZED_UINT8,\n dtypes.int64: _types_pb2.INT64,\n dtypes.uint64: _types_pb2.UINT64,\n dtypes.string: _types_pb2.STRING,\n dtypes.bool: _types_pb2.BOOL,\n dtypes.int16: _types_pb2.QUANTIZED_INT16,\n dtypes.complex64: _types_pb2.COMPLEX64,\n dtypes.int8: _types_pb2.INT8,\n dtypes.float64: _types_pb2.FLOAT64,\n dtypes.complex128: _types_pb2.COMPLEX128,\n}\n\n_MAP_TFLITE_ENUM_TO_TF_TYPES = {\n 0: dtypes.float32,\n 1: dtypes.float16,\n 2: dtypes.int32,\n 3: dtypes.uint8,\n 4: dtypes.int64,\n 5: dtypes.string,\n 6: dtypes.bool,\n 7: dtypes.int16,\n 8: dtypes.complex64,\n 9: dtypes.int8,\n 10: dtypes.float64,\n 11: dtypes.complex128,\n}\n\n_TFLITE_FILE_IDENTIFIER = b\"TFL3\"\n\n_MAP_QUANT_TO_IO_TYPES = {\n dtypes.int8: {dtypes.int8, dtypes.uint8},\n dtypes.int16: {dtypes.int16},\n}\n\n\ndef convert_dtype_to_tflite_type(tf_dtype):\n \"\"\"Converts tf.dtype to TFLite proto type.\n\n Args:\n tf_dtype: tf.dtype\n\n Raises:\n ValueError: Unsupported tf.dtype.\n\n Returns:\n types_flag_pb2.\n \"\"\"\n result = _MAP_TF_TO_TFLITE_TYPES.get(tf_dtype)\n if result is None:\n raise ValueError(\"Unsupported tf.dtype {0}\".format(tf_dtype))\n return result\n\n\ndef _convert_tflite_enum_type_to_tf_type(tflite_enum_type):\n \"\"\"Converts tflite enum type (eg: 0) to tf type (eg: tf.float32).\n\n Args:\n tflite_enum_type: tflite enum type (eg: 0, that corresponds to float32)\n\n Raises:\n ValueError: If an invalid tflite enum type is provided.\n\n Returns:\n tf type (eg: tf.float32)\n \"\"\"\n tf_type = _MAP_TFLITE_ENUM_TO_TF_TYPES.get(tflite_enum_type)\n if tf_type is None:\n raise ValueError(\n \"Unsupported enum {}. The valid map of enum to tf types is : {}\"\n .format(tflite_enum_type, _MAP_TFLITE_ENUM_TO_TF_TYPES))\n return tf_type\n\n\ndef _get_tf_type_name(tf_type):\n \"\"\"Converts tf.dtype (eg: tf.float32) to str (eg: \"tf.float32\").\"\"\"\n return \"tf.\" + tf_type.name\n\n\ndef get_tensor_name(tensor):\n \"\"\"Returns name of the input tensor.\n\n Args:\n tensor: tf.Tensor\n\n Returns:\n str\n \"\"\"\n parts = six.ensure_str(tensor.name).split(\":\")\n if len(parts) > 2:\n raise ValueError(\"Tensor name invalid. Expect 0 or 1 colon, got {0}\".format(\n len(parts) - 1))\n\n # To be consistent with the tensor naming scheme in tensorflow, we need\n # drop the ':0' suffix for the first tensor.\n if len(parts) > 1 and parts[1] != \"0\":\n return tensor.name\n return parts[0]\n\n\ndef get_tensors_from_tensor_names(graph, tensor_names):\n \"\"\"Gets the Tensors associated with the `tensor_names` in the provided graph.\n\n Args:\n graph: TensorFlow Graph.\n tensor_names: List of strings that represent names of tensors in the graph.\n\n Returns:\n A list of Tensor objects in the same order the names are provided.\n\n Raises:\n ValueError:\n tensor_names contains an invalid tensor name.\n \"\"\"\n # Get the list of all of the tensors.\n tensor_name_to_tensor = {}\n for op in graph.get_operations():\n for tensor in op.values():\n tensor_name_to_tensor[get_tensor_name(tensor)] = tensor\n\n # Get the tensors associated with tensor_names.\n tensors = []\n invalid_tensors = []\n for name in tensor_names:\n if not isinstance(name, six.string_types):\n raise ValueError(\"Invalid type for a tensor name in the provided graph. \"\n \"Expected type for a tensor name is 'str', instead got \"\n \"type '{}' for tensor name '{}'\".format(\n type(name), name))\n\n tensor = tensor_name_to_tensor.get(name)\n if tensor is None:\n invalid_tensors.append(name)\n else:\n tensors.append(tensor)\n\n # Throw ValueError if any user input names are not valid tensors.\n if invalid_tensors:\n raise ValueError(\"Invalid tensors '{}' were found.\".format(\n \",\".join(invalid_tensors)))\n return tensors\n\n\ndef set_tensor_shapes(tensors, shapes):\n \"\"\"Sets Tensor shape for each tensor if the shape is defined.\n\n Args:\n tensors: TensorFlow ops.Tensor.\n shapes: Dict of strings representing input tensor names to list of\n integers representing input shapes (e.g., {\"foo\": : [1, 16, 16, 3]}).\n\n Raises:\n ValueError:\n `shapes` contains an invalid tensor.\n `shapes` contains an invalid shape for a valid tensor.\n \"\"\"\n if shapes:\n tensor_names_to_tensor = {\n get_tensor_name(tensor): tensor for tensor in tensors\n }\n for name, shape in shapes.items():\n if name not in tensor_names_to_tensor:\n raise ValueError(\"Invalid tensor \\'{}\\' found in tensor shapes \"\n \"map.\".format(name))\n if shape is not None:\n tensor = tensor_names_to_tensor[name]\n try:\n tensor.set_shape(shape)\n except ValueError as error:\n message = (\"The shape of tensor '{0}' cannot be changed from {1} to \"\n \"{2}. {3}\".format(name, tensor.shape, shape, str(error)))\n raise ValueError(message)\n\n\ndef get_grappler_config(optimizers_list):\n \"\"\"Creates a tf.compat.v1.ConfigProto for configuring Grappler.\n\n Args:\n optimizers_list: List of strings that represents the list of optimizers.\n\n Returns:\n tf.ConfigProto.\n \"\"\"\n config = _config_pb2.ConfigProto()\n rewrite_options = config.graph_options.rewrite_options\n for optimizer in optimizers_list:\n rewrite_options.optimizers.append(optimizer)\n return config\n\n\ndef run_graph_optimizations(graph_def,\n input_arrays,\n output_arrays,\n config,\n graph=None):\n \"\"\"Apply standard TensorFlow optimizations to the graph_def.\n\n Args:\n graph_def: Frozen GraphDef to be optimized.\n input_arrays: List of arrays that are considered inputs of the graph.\n output_arrays: List of arrays that are considered outputs of the graph.\n config: tf.ConfigProto.\n graph: TensorFlow Graph. Required when Eager mode is enabled. (default None)\n\n Returns:\n A new, optimized GraphDef.\n \"\"\"\n meta_graph = _export_meta_graph(graph_def=graph_def, graph=graph)\n\n signature = _meta_graph_pb2.SignatureDef()\n for array in input_arrays:\n signature.inputs[array.name].name = array.name\n signature.inputs[array.name].dtype = array.dtype.as_datatype_enum\n signature.inputs[array.name].tensor_shape.CopyFrom(array.shape.as_proto())\n\n for array in output_arrays:\n signature.outputs[array.name].name = array.name\n signature.outputs[array.name].dtype = array.dtype.as_datatype_enum\n signature.outputs[array.name].tensor_shape.CopyFrom(array.shape.as_proto())\n\n meta_graph.signature_def[\"not_used_key\"].CopyFrom(signature)\n\n # We need to add a collection called 'train_op' so that grappler\n # knows what the outputs are.\n fetch_collection = _meta_graph_pb2.CollectionDef()\n for array in input_arrays + output_arrays:\n fetch_collection.node_list.value.append(array.name)\n meta_graph.collection_def[\"train_op\"].CopyFrom(fetch_collection)\n\n return tf_optimizer.OptimizeGraph(config, meta_graph)\n\n\ndef _convert_op_hints_if_present(sess, graph_def, output_tensors,\n hinted_outputs_nodes):\n if is_frozen_graph(sess):\n raise ValueError(\"Try to convert op hints, needs unfrozen graph.\")\n output_arrays = [get_tensor_name(tensor) for tensor in output_tensors]\n graph_def = tf_graph_util.convert_variables_to_constants(\n sess, graph_def, output_arrays + hinted_outputs_nodes)\n graph_def = convert_op_hints_to_stubs(graph_def=graph_def)\n return graph_def\n\n\ndef freeze_graph(sess, input_tensors, output_tensors):\n \"\"\"Returns a frozen GraphDef.\n\n Runs a Grappler pass and freezes a graph with Variables in it. Otherwise the\n existing GraphDef is returned. The Grappler pass is only run on models that\n are frozen in order to inline the functions in the graph.\n If OpHints is present, it will try to convert the OpHint graph.\n\n Args:\n sess: TensorFlow Session.\n input_tensors: List of input tensors.\n output_tensors: List of output tensors (only .name is used from this).\n\n Returns:\n Frozen GraphDef.\n \"\"\"\n # Runs a Grappler pass in order to inline any functions in the graph.\n # Asides from inlining any simple function, Grappler will also try to lower\n # while loop into switch merge representation which is undesired for Ophints,\n # so we simply remove those attributes to prevent Grappler from doing so.\n graph_def = _convert_to_constants.disable_lower_using_switch_merge(\n sess.graph_def)\n config = get_grappler_config([\"function\"])\n graph_def = run_graph_optimizations(\n graph_def, input_tensors, output_tensors, config, graph=sess.graph)\n\n # If ophints are present, just convert them.\n hinted_outputs_nodes = find_all_hinted_output_nodes(sess)\n if hinted_outputs_nodes:\n return _convert_op_hints_if_present(sess, graph_def, output_tensors,\n hinted_outputs_nodes)\n\n if not is_frozen_graph(sess):\n output_node_names = [tensor.name.split(\":\")[0] for tensor in output_tensors]\n return tf_graph_util.convert_variables_to_constants(sess, graph_def,\n output_node_names)\n else:\n return sess.graph_def\n\n\ndef is_frozen_graph(sess):\n \"\"\"Determines if the graph is frozen.\n\n Determines if a graph has previously been frozen by checking for any\n operations of type Variable*. If variables are found, the graph is not frozen.\n\n Args:\n sess: TensorFlow Session.\n\n Returns:\n Bool.\n \"\"\"\n for op in sess.graph.get_operations():\n if six.ensure_str(op.type).startswith(\"Variable\") or six.ensure_str(\n op.type).endswith(\"VariableOp\"):\n return False\n return True\n\n\ndef build_debug_info_func(original_graph):\n \"\"\"Returns a method to retrieve the `GraphDebugInfo` from the original graph.\n\n Args:\n original_graph: The original `Graph` containing all the op stack traces.\n\n Returns:\n A function which retrieves the stack traces from the original graph and\n converts them to a `GraphDebugInfo` for a given set of nodes.\n \"\"\"\n\n def f(original_nodes):\n \"\"\"Function to create `GraphDebugInfo` for the given `original_nodes`.\"\"\"\n if not original_graph:\n return None\n # For the given nodes, gets all the op definitions in the original graph.\n useful_ops = []\n for func, name in original_nodes:\n try:\n if not func:\n useful_ops.append((func, original_graph.get_operation_by_name(name)))\n else:\n sub_func = original_graph._get_function(func) # pylint: disable=protected-access\n if isinstance(sub_func, function._EagerDefinedFunction): # pylint: disable=protected-access\n useful_ops.append(\n (func, sub_func.graph.get_operation_by_name(name)))\n else:\n sys.stderr.write(\n \"Use '@tf.function' or '@defun' to decorate the function.\\n\")\n continue\n except KeyError:\n # New node created by graph optimizer. No stack trace from source code.\n continue\n # Convert all the op definitions to stack traces in terms of GraphDebugInfo.\n return _error_interpolation.create_graph_debug_info_def(useful_ops)\n\n return f\n\n\ndef convert_debug_info_func(saved_debug_info):\n \"\"\"Returns a method to retrieve the `GraphDebugInfo` from the original graph.\n\n Args:\n saved_debug_info: The `GraphDebugInfo` containing all the debug info.\n\n Returns:\n A function which retrieves the stack traces from the original graph and\n converts them to a `GraphDebugInfo` for a given set of nodes.\n \"\"\"\n\n def f(original_nodes):\n \"\"\"Function to create `GraphDebugInfo` for the given `original_nodes`.\"\"\"\n if not saved_debug_info:\n return None\n\n output_debug_info = graph_debug_info_pb2.GraphDebugInfo()\n # All the files are copied over, so the index wouldn't be changed.\n output_debug_info.files[:] = saved_debug_info.files\n # We only copy over the debug info for the input nodes\n for func, node in original_nodes:\n debug_key = node + \"@\" + func\n output_debug_info.traces[debug_key].CopyFrom(\n saved_debug_info.traces[debug_key])\n return output_debug_info\n\n return f\n\n\ndef get_debug_info(nodes_to_debug_info_func, converted_graph):\n \"\"\"Returns the debug info for the original nodes in the `converted_graph`.\n\n Args:\n nodes_to_debug_info_func: The method to collect the op debug info for the\n nodes.\n converted_graph: A `GraphDef` after optimization and transformation.\n\n Returns:\n `GraphDebugInfo` for all the original nodes in `converted_graph`.\n \"\"\"\n if not nodes_to_debug_info_func:\n return None\n\n # Collect all the debug info nodes from the converted_graph\n original_nodes = set()\n for node in converted_graph.node:\n debug_nodes = node.experimental_debug_info.original_node_names\n debug_funcs = node.experimental_debug_info.original_func_names\n # If the `original_node_names` are empty, uses the node name directly.\n if not debug_nodes:\n original_nodes.add((\"\", node.name))\n else:\n for i in range(len(debug_nodes)):\n debug_func = \"\" if i >= len(debug_funcs) else debug_funcs[i]\n original_nodes.add((debug_func, debug_nodes[i]))\n\n # Convert the nodes to the debug info proto object.\n return nodes_to_debug_info_func(original_nodes)\n\n\ndef convert_bytes_to_c_source(data,\n array_name,\n max_line_width=80,\n include_guard=None,\n include_path=None,\n use_tensorflow_license=False):\n \"\"\"Returns strings representing a C constant array containing `data`.\n\n Args:\n data: Byte array that will be converted into a C constant.\n array_name: String to use as the variable name for the constant array.\n max_line_width: The longest line length, for formatting purposes.\n include_guard: Name to use for the include guard macro definition.\n include_path: Optional path to include in the source file.\n use_tensorflow_license: Whether to include the standard TensorFlow Apache2\n license in the generated files.\n\n Returns:\n Text that can be compiled as a C source file to link in the data as a\n literal array of values.\n Text that can be used as a C header file to reference the literal array.\n \"\"\"\n\n starting_pad = \" \"\n array_lines = []\n array_line = starting_pad\n for value in bytearray(data):\n if (len(array_line) + 4) > max_line_width:\n array_lines.append(array_line + \"\\n\")\n array_line = starting_pad\n array_line += \" 0x%02x,\" % (value)\n if len(array_line) > len(starting_pad):\n array_lines.append(array_line + \"\\n\")\n array_values = \"\".join(array_lines)\n\n if include_guard is None:\n include_guard = \"TENSORFLOW_LITE_UTIL_\" + array_name.upper() + \"_DATA_H_\"\n\n if include_path is not None:\n include_line = \"#include \\\"{include_path}\\\"\\n\".format(\n include_path=include_path)\n else:\n include_line = \"\"\n\n if use_tensorflow_license:\n license_text = \"\"\"\n/* Copyright {year} The TensorFlow Authors. All Rights Reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n==============================================================================*/\n\"\"\".format(year=datetime.date.today().year)\n else:\n license_text = \"\"\n\n source_template = \"\"\"{license_text}\n// This is a TensorFlow Lite model file that has been converted into a C data\n// array using the tensorflow.lite.util.convert_bytes_to_c_source() function.\n// This form is useful for compiling into a binary for devices that don't have a\n// file system.\n\n{include_line}\n// We need to keep the data array aligned on some architectures.\n#ifdef __has_attribute\n#define HAVE_ATTRIBUTE(x) __has_attribute(x)\n#else\n#define HAVE_ATTRIBUTE(x) 0\n#endif\n#if HAVE_ATTRIBUTE(aligned) || (defined(__GNUC__) && !defined(__clang__))\n#define DATA_ALIGN_ATTRIBUTE __attribute__((aligned(4)))\n#else\n#define DATA_ALIGN_ATTRIBUTE\n#endif\n\nconst unsigned char {array_name}[] DATA_ALIGN_ATTRIBUTE = {{\n{array_values}}};\nconst int {array_name}_len = {array_length};\n\"\"\"\n\n source_text = source_template.format(\n array_name=array_name,\n array_length=len(data),\n array_values=array_values,\n license_text=license_text,\n include_line=include_line)\n\n header_template = \"\"\"\n{license_text}\n\n// This is a TensorFlow Lite model file that has been converted into a C data\n// array using the tensorflow.lite.util.convert_bytes_to_c_source() function.\n// This form is useful for compiling into a binary for devices that don't have a\n// file system.\n\n#ifndef {include_guard}\n#define {include_guard}\n\nextern const unsigned char {array_name}[];\nextern const int {array_name}_len;\n\n#endif // {include_guard}\n\"\"\"\n\n header_text = header_template.format(\n array_name=array_name,\n include_guard=include_guard,\n license_text=license_text)\n\n return source_text, header_text\n\n\ndef _convert_model_from_bytearray_to_object(model_bytearray):\n \"\"\"Converts a tflite model from a bytearray into a parsable object.\"\"\"\n model_object = schema_fb.Model.GetRootAsModel(model_bytearray, 0)\n model_object = schema_fb.ModelT.InitFromObj(model_object)\n model_object = copy.deepcopy(model_object)\n model_object.subgraphs[0].inputs[0] = model_object.subgraphs[0].inputs[0]\n return model_object\n\n\ndef _convert_model_from_object_to_bytearray(model_object):\n \"\"\"Converts a tflite model from a parsable object into a bytearray.\"\"\"\n # Initial size of the buffer, which will grow automatically if needed\n builder = flatbuffers.Builder(1024)\n model_offset = model_object.Pack(builder)\n builder.Finish(model_offset, file_identifier=_TFLITE_FILE_IDENTIFIER)\n return bytes(builder.Output())\n\n\ndef _remove_tensors_from_model(model, remove_tensors_idxs):\n \"\"\"Remove tensors from model.\"\"\"\n if not remove_tensors_idxs:\n return\n if len(model.subgraphs) > 1:\n raise ValueError(\"Model must only have one subgraph. Instead, it has \"\n \"{} subgraphs.\".format(len(model.subgraphs)))\n subgraph = model.subgraphs[0]\n tensors = subgraph.tensors\n operators = subgraph.operators\n\n logging.debug(\"Removing tensors at indices : %s\", remove_tensors_idxs)\n # An optimized check to validate if \"remove_tensors_idxs\" (eg: [4,5,6]) is an\n # exact subset, with ordering, of \"tensors\" indices (eg: [0,1,2,3,4,5,6]).\n if min(remove_tensors_idxs) == len(tensors) - len(remove_tensors_idxs):\n logging.debug(\"Removing tensors only at the end of the tensor list\")\n del tensors[min(remove_tensors_idxs):]\n else:\n logging.debug(\"Removing tensors requires updating the model\")\n # Map the old tensor indices to new tensor indices\n d_old_to_new_tensors = {}\n left_shift_by = 0\n for idx in range(len(tensors)):\n if idx in remove_tensors_idxs:\n left_shift_by += 1\n else:\n d_old_to_new_tensors[idx] = idx - left_shift_by\n logging.debug(\"Old to new tensors map: %s\", d_old_to_new_tensors.__str__())\n # Update tensor indices referenced throughout the model\n def update_tensors(tensor_idxs):\n for i, ti in enumerate(tensor_idxs):\n tensor_idxs[i] = d_old_to_new_tensors.get(ti, -1)\n update_tensors(subgraph.inputs)\n update_tensors(subgraph.outputs)\n for op in operators:\n update_tensors(op.inputs)\n update_tensors(op.outputs)\n # Delete the tensors\n for idx in sorted(remove_tensors_idxs, reverse=True):\n tensors.pop(idx)\n logging.debug(\"Removed tensors marked for deletion\")\n\n\ndef _modify_model_input_type(model, inference_input_type=dtypes.float32):\n \"\"\"Modify model input type.\"\"\"\n\n if inference_input_type == dtypes.float32:\n return\n\n subgraph = model.subgraphs[0]\n tensors = subgraph.tensors\n operators = subgraph.operators\n\n # Find all quantize operators\n quant_opcode_idxs = []\n for idx, opcode in enumerate(model.operatorCodes):\n builtin_code = schema_util.get_builtin_code_from_operator_code(opcode)\n if builtin_code == schema_fb.BuiltinOperator.QUANTIZE:\n quant_opcode_idxs.append(idx)\n if not quant_opcode_idxs:\n raise ValueError(\"Model input is not quantized.\")\n\n # Validate that the model input is quantized\n input_quant_ops = []\n for op in operators:\n # Find operators that quantize model input\n if op.opcodeIndex in quant_opcode_idxs and op.inputs[0] in subgraph.inputs:\n float_tensor, quant_tensor = tensors[op.inputs[0]], tensors[op.outputs[0]]\n # If found, validate that the operator's input type is float\n float_type = _convert_tflite_enum_type_to_tf_type(float_tensor.type)\n if float_type != dtypes.float32:\n raise ValueError(\n \"Initial model input type must be tf.float32. Expected type for \"\n \"tensor with name '{}' is tf.float32, instead type is {}\".format(\n float_tensor.name, _get_tf_type_name(float_type)))\n # If found, validate that the operator output is quantized and compatible\n # with the final model input type\n quant_type = _convert_tflite_enum_type_to_tf_type(quant_tensor.type)\n if quant_type not in _MAP_QUANT_TO_IO_TYPES:\n raise ValueError(\n \"Initial model input is not quantized. Expected type for \"\n \"tensor with name '{}' should be in {}, instead type is {}\".format(\n quant_tensor.name,\n tuple(_get_tf_type_name(t) for t in\n _MAP_QUANT_TO_IO_TYPES.keys()),\n _get_tf_type_name(quant_type)))\n else:\n inference_io_types = _MAP_QUANT_TO_IO_TYPES[quant_type]\n if inference_input_type not in inference_io_types:\n raise ValueError(\n \"Unsupported `inference_input_type` value. Expected to be in \"\n \"{}, instead got {}.\".format(\n tuple(_get_tf_type_name(t) for t in inference_io_types),\n _get_tf_type_name(inference_input_type)))\n input_quant_ops.append(op)\n\n if len(subgraph.inputs) != len(input_quant_ops):\n logging.warning(\n \"For model inputs containing unsupported operations which cannot be \"\n \"quantized, the `inference_input_type` attribute will default to the \"\n \"original type.\"\n )\n\n # Modify model input type\n if inference_input_type == dtypes.uint8:\n # Change quant op (float to int8) to quant op (uint8 to int8)\n for op in input_quant_ops:\n int8_quantization = tensors[op.outputs[0]].quantization\n uint8_quantization = schema_fb.QuantizationParametersT()\n uint8_quantization.scale = [int8_quantization.scale[0]]\n uint8_quantization.zeroPoint = [int8_quantization.zeroPoint[0] + 128]\n tensors[op.inputs[0]].quantization = uint8_quantization\n tensors[op.inputs[0]].type = schema_fb.TensorType.UINT8\n elif inference_input_type in _MAP_QUANT_TO_IO_TYPES:\n # Remove the inputs and the quant operator\n remove_tensors_idxs = set()\n for op in input_quant_ops:\n subgraph.inputs[subgraph.inputs == op.inputs[0]] = op.outputs[0]\n remove_tensors_idxs.add(op.inputs[0])\n operators.remove(op)\n # Remove tensors marked for deletion.\n _remove_tensors_from_model(model, remove_tensors_idxs)\n else:\n raise ValueError(\n \"Unsupported `inference_input_type` value {}.\".format(\n _get_tf_type_name(inference_input_type)))\n\n\ndef _modify_model_output_type(model, inference_output_type=dtypes.float32):\n \"\"\"Modify model output type.\"\"\"\n\n if inference_output_type == dtypes.float32:\n return\n\n subgraph = model.subgraphs[0]\n tensors = subgraph.tensors\n operators = subgraph.operators\n\n # Find all dequantize operators\n dequant_opcode_idxs = []\n for idx, opcode in enumerate(model.operatorCodes):\n builtin_code = schema_util.get_builtin_code_from_operator_code(opcode)\n if builtin_code == schema_fb.BuiltinOperator.DEQUANTIZE:\n dequant_opcode_idxs.append(idx)\n if not dequant_opcode_idxs:\n raise ValueError(\"Model output is not dequantized.\")\n\n # Validate that the model output is dequantized\n output_dequant_ops = []\n for op in operators:\n # Find operators that dequantize model output\n if op.opcodeIndex in dequant_opcode_idxs and \\\n op.outputs[0] in subgraph.outputs:\n # If found, validate that the operator's output type is float\n quant_tensor, float_tensor = tensors[op.inputs[0]], tensors[op.outputs[0]]\n float_type = _convert_tflite_enum_type_to_tf_type(float_tensor.type)\n if float_type != dtypes.float32:\n raise ValueError(\n \"Initial model output type must be tf.float32. Expected type for \"\n \"tensor with name '{}' is tf.float32, instead type is {}\".format(\n float_tensor.name, _get_tf_type_name(float_type)))\n # If found, validate that the operator input is quantized and compatible\n # with the final model output type\n quant_type = _convert_tflite_enum_type_to_tf_type(quant_tensor.type)\n if quant_type not in _MAP_QUANT_TO_IO_TYPES:\n raise ValueError(\n \"Initial model output is not dequantized. Expected type for \"\n \"tensor with name '{}' should be in {}, instead type is {}\".format(\n quant_tensor.name,\n tuple(_get_tf_type_name(t) for t in\n _MAP_QUANT_TO_IO_TYPES.keys()),\n _get_tf_type_name(quant_type)))\n else:\n inference_io_types = _MAP_QUANT_TO_IO_TYPES[quant_type]\n if inference_output_type not in inference_io_types:\n raise ValueError(\n \"Unsupported `inference_output_type` value. Expected to be in \"\n \"{}, instead got {}.\".format(\n tuple(_get_tf_type_name(t) for t in inference_io_types),\n _get_tf_type_name(inference_output_type)))\n output_dequant_ops.append(op)\n\n if len(subgraph.outputs) != len(output_dequant_ops):\n logging.warning(\n \"For model outputs containing unsupported operations which cannot be \"\n \"quantized, the `inference_output_type` attribute will default to the \"\n \"original type.\"\n )\n\n # Modify model output type\n if inference_output_type == dtypes.uint8:\n # Find a quantize operator\n quant_opcode_idx = -1\n for idx, opcode in enumerate(model.operatorCodes):\n builtin_code = schema_util.get_builtin_code_from_operator_code(opcode)\n if builtin_code == schema_fb.BuiltinOperator.QUANTIZE:\n quant_opcode_idx = idx\n break\n # Create a quantize operator, if none exist\n if quant_opcode_idx == -1:\n quant_op = schema_fb.OperatorCodeT()\n quant_op.builtinCode = schema_fb.BuiltinOperator.QUANTIZE\n quant_op.deprecatedBuiltinCode = schema_fb.BuiltinOperator.QUANTIZE\n model.operatorCodes.append(quant_op)\n quant_opcode_idx = len(model.operatorCodes) - 1\n # Change dequant op (int8 to float) to quant op (int8 to uint8)\n for op in output_dequant_ops:\n op.opcodeIndex = quant_opcode_idx\n int8_quantization = tensors[op.inputs[0]].quantization\n uint8_quantization = schema_fb.QuantizationParametersT()\n uint8_quantization.scale = [int8_quantization.scale[0]]\n uint8_quantization.zeroPoint = [int8_quantization.zeroPoint[0] + 128]\n tensors[op.outputs[0]].quantization = uint8_quantization\n tensors[op.outputs[0]].type = schema_fb.TensorType.UINT8\n elif inference_output_type in _MAP_QUANT_TO_IO_TYPES:\n # Remove the outputs and the dequant operator\n remove_tensors_idxs = set()\n for op in output_dequant_ops:\n subgraph.outputs[subgraph.outputs == op.outputs[0]] = op.inputs[0]\n remove_tensors_idxs.add(op.outputs[0])\n operators.remove(op)\n # Remove tensors marked for deletion.\n _remove_tensors_from_model(model, remove_tensors_idxs)\n else:\n raise ValueError(\n \"Unsupported `inference_output_type` value {}.\".format(\n _get_tf_type_name(inference_output_type)))\n\n\ndef modify_model_io_type(\n model, inference_input_type=dtypes.float32,\n inference_output_type=dtypes.float32):\n \"\"\"Modify the input/output type of a tflite model.\n\n Args:\n model: A tflite model.\n inference_input_type: tf.DType representing modified input type.\n (default tf.float32. If model input is int8 quantized, it must be in\n {tf.float32, tf.int8,tf.uint8}, else if model input is int16 quantized,\n it must be in {tf.float32, tf.int16}, else it must be tf.float32)\n inference_output_type: tf.DType representing modified output type.\n (default tf.float32. If model output is int8 dequantized, it must be in\n {tf.float32, tf.int8,tf.uint8}, else if model output is int16 dequantized,\n it must be in {tf.float32, tf.int16}, else it must be tf.float32)\n Returns:\n A tflite model with modified input/output type.\n\n Raises:\n ValueError: If `inference_input_type`/`inference_output_type` is unsupported\n or a supported integer type is specified for a model whose input/output is\n not quantized/dequantized.\n RuntimeError: If the modification was unsuccessful.\n\n \"\"\"\n if inference_input_type == dtypes.float32 and \\\n inference_output_type == dtypes.float32:\n return model\n\n model_object = _convert_model_from_bytearray_to_object(model)\n\n if len(model_object.subgraphs) > 1:\n raise ValueError(\"Model must only have one subgraph. Instead, it has \"\n \"{} subgraphs.\".format(len(model_object.subgraphs)))\n\n _modify_model_input_type(model_object, inference_input_type)\n\n _modify_model_output_type(model_object, inference_output_type)\n\n return _convert_model_from_object_to_bytearray(model_object)\n" ]
[ [ "tensorflow.core.protobuf.config_pb2.ConfigProto", "tensorflow.python.training.saver.export_meta_graph", "tensorflow.lite.python.schema_util.get_builtin_code_from_operator_code", "tensorflow.lite.python.schema_py_generated.QuantizationParametersT", "tensorflow.lite.python.op_hint.convert_op_hints_to_stubs", "tensorflow.python.framework.error_interpolation.create_graph_debug_info_def", "tensorflow.core.protobuf.meta_graph_pb2.SignatureDef", "tensorflow.core.protobuf.graph_debug_info_pb2.GraphDebugInfo", "tensorflow.python.framework.convert_to_constants.disable_lower_using_switch_merge", "tensorflow.lite.python.op_hint.find_all_hinted_output_nodes", "tensorflow.lite.python.schema_py_generated.ModelT.InitFromObj", "tensorflow.python.grappler.tf_optimizer.OptimizeGraph", "tensorflow.python.framework.graph_util.convert_variables_to_constants", "tensorflow.lite.python.schema_py_generated.Model.GetRootAsModel", "tensorflow.core.protobuf.meta_graph_pb2.CollectionDef", "tensorflow.lite.python.schema_py_generated.OperatorCodeT" ] ]
xrick/Lcj-DSP-in-Python
[ "f27ee7036dc0df41b96e0b06ed13bb8fd874a714" ]
[ "dsp_python_imp/Ch04/sinusoid_wave.py" ]
[ "import numpy as np\nimport wave\nimport struct\n\nfile = \"sinusoid.wav\"\t\t# 檔案名稱\n\namplitude = 30000 # 振幅\nfrequency = 100\t\t\t\t# 頻率(Hz)\nduration = 3\t\t\t\t# 時間長度(秒)\nfs = 44100\t\t\t\t \t# 取樣頻率(Hz)\nnum_samples = duration * fs\t# 樣本數\n \nnum_channels = 1\t\t\t# 通道數\nsampwidth = 2\t\t\t\t# 樣本寬度\nnum_frames = num_samples\t# 音框數 = 樣本數\ncomptype = \"NONE\"\t\t \t# 壓縮型態\ncompname = \"not compressed\" # 無壓縮\n\nt = np.linspace( 0, duration, num_samples, endpoint = False )\nx = amplitude * np.cos( 2 * np.pi * frequency * t )\n\nwav_file = wave.open( file, 'w' )\nwav_file.setparams(( num_channels, sampwidth, fs, num_frames, comptype, compname )) \n\nfor s in x :\n wav_file.writeframes( struct.pack( 'h', int ( s ) ) )\n\nwav_file.close( ) " ]
[ [ "numpy.linspace", "numpy.cos" ] ]
ufz/ogs
[ "97d0249e0c578c3055730f4e9d994b9970885098" ]
[ "Tests/Data/Parabolic/T/3D_3BHEs_array_SimX/pre/3bhes.py" ]
[ "###\n# Copyright (c) 2012-2021, OpenGeoSys Community (http://www.opengeosys.org)\n# Distributed under a Modified BSD License.\n# See accompanying file LICENSE.txt or\n# http://www.opengeosys.org/project/license\n###\n\n# Execute this file to generate TESPy network csv files\nfrom tespy.networks import network\nfrom tespy.components import sink, source, splitter, merge, pump, heat_exchanger_simple\nfrom tespy.connections import connection, ref, bus\nfrom tespy.tools.characteristics import char_line\nfrom tespy.tools.data_containers import dc_cc\n\nimport numpy as np\n\n# %% network\nbtes = network(\n fluids=[\"water\"],\n T_unit=\"K\",\n p_unit=\"bar\",\n h_unit=\"kJ / kg\",\n T_range=[273.25, 373.15],\n p_range=[1, 20],\n h_range=[1, 1000],\n)\n\n# %% components\nfc_in = source(\"from consumer inflow\")\nfc_out = sink(\"from consumer outflow\")\n\npu = pump(\"pump\")\n\nsp = splitter(\"splitter\", num_out=3)\n\n# bhe:\nbhe_name = \"BHE1\"\nassert \"BHE1\" in bhe_name, \"BHE should be named with 'BHE1'\"\nbhe1 = heat_exchanger_simple(bhe_name)\nbhe_name = \"BHE2\"\nassert \"BHE2\" in bhe_name, \"BHE should be named with 'BHE2'\"\nbhe2 = heat_exchanger_simple(bhe_name)\nbhe_name = \"BHE3\"\nassert \"BHE3\" in bhe_name, \"BHE should be named with 'BHE3'\"\nbhe3 = heat_exchanger_simple(bhe_name)\n\nmg = merge(\"merge\", num_in=3)\n\ncons = heat_exchanger_simple(\"consumer\")\n\n# %% connections\nfc_pu = connection(fc_in, \"out1\", pu, \"in1\")\n\npu_sp = connection(pu, \"out1\", sp, \"in1\")\n\nsp_bhe1 = connection(sp, \"out1\", bhe1, \"in1\")\nsp_bhe2 = connection(sp, \"out2\", bhe2, \"in1\")\nsp_bhe3 = connection(sp, \"out3\", bhe3, \"in1\")\n\nbhe1_mg = connection(bhe1, \"out1\", mg, \"in1\")\nbhe2_mg = connection(bhe2, \"out1\", mg, \"in2\")\nbhe3_mg = connection(bhe3, \"out1\", mg, \"in3\")\n\nmg_cons = connection(mg, \"out1\", cons, \"in1\")\n\ncons_fc = connection(cons, \"out1\", fc_out, \"in1\")\n\nbtes.add_conns(\n fc_pu, pu_sp, sp_bhe1, sp_bhe2, sp_bhe3, bhe1_mg, bhe2_mg, bhe3_mg, mg_cons, cons_fc\n)\n\n\n# %% paramerization\n## components paramerization\n# pump\n# flow_char\n# provide volumetric flow in m^3 / s\nx = np.array(\n [\n 0.00,\n 0.00001952885971862,\n 0.00390577194372,\n 0.005858657915586,\n 0.007811543887448,\n 0.00976442985931,\n 0.011717315831173,\n 0.013670201803035,\n 0.015623087774897,\n 0.017575973746759,\n 0.019528859718621,\n 0.021481745690483,\n 0.023434631662345,\n 0.025387517634207,\n 0.027340403606069,\n 0.029293289577931,\n 0.031246175549793,\n 0.033199061521655,\n 0.035151947493517,\n 0.037104833465379,\n 0.039057719437241,\n 0.041010605409104,\n 0.042963491380966,\n 0.044916377352828,\n 0.04686926332469,\n 0.048822149296552,\n 0.050775035268414,\n 0.052727921240276,\n 0.054680807212138,\n 0.056633693184,\n ]\n)\n\n# provide head in Pa\ny = (\n np.array(\n [\n 0.47782539,\n 0.47725723,\n 0.47555274,\n 0.47271192,\n 0.46873478,\n 0.46362130,\n 0.45737151,\n 0.44998538,\n 0.44146293,\n 0.43180416,\n 0.4220905,\n 0.40907762,\n 0.39600986,\n 0.38180578,\n 0.36646537,\n 0.34998863,\n 0.33237557,\n 0.31362618,\n 0.29374046,\n 0.27271841,\n 0.25056004,\n 0.22726535,\n 0.20283432,\n 0.17726697,\n 0.15056329,\n 0.12272329,\n 0.09374696,\n 0.06363430,\n 0.03238531,\n 0.00000000,\n ]\n )\n * 1e5\n)\n\nchar = char_line(x=x, y=y)\npu.set_attr(flow_char=dc_cc(func=char, is_set=True))\npu.set_attr(eta_s=0.90)\n\n# bhes\nbhe1.set_attr(D=0.013665, L=100, ks=0.00001)\nbhe2.set_attr(D=0.013665, L=100, ks=0.00001)\nbhe3.set_attr(D=0.013665, L=100, ks=0.00001)\n\n# consumer\ncons.set_attr(D=0.2, L=20, ks=0.00001)\n# busses\nheat = bus(\"consumer heat demand\")\nheat.add_comps({\"c\": cons, \"p\": \"P\"})\nbtes.add_busses(heat)\n# consumer heat demand\nheat.set_attr(P=-3000) # W\n\n\n## connection parametrization\n# system inlet\ninflow_head = 2 # bar\n\nfc_pu.set_attr(p=inflow_head, m=0.6, fluid={\"water\": 1})\n\n# for BHEs:\n# Tout:\nbhe1_mg.set_attr(T=303.15)\nbhe2_mg.set_attr(T=303.15)\nbhe3_mg.set_attr(T=303.15)\n\n# imposed boundary condition: ensure all heat from BHEs are consumed on 'consumer'\npu_sp.set_attr(h=ref(cons_fc, 1, 0))\n\n# %% solve\nbtes.solve(\"design\")\n# btes.print_results()\n\n# %% save to csv:\nbtes.save(\"tespy_nw\", structure=True)\n" ]
[ [ "numpy.array" ] ]
IzzatHalabi/newpix_prototype
[ "5d617ef20df59af57c26ca0f7fc8521afd4203f7" ]
[ "env/Lib/site-packages/mcdm/tests/test_load.py" ]
[ "#!/usr/bin/env python3\n\n# Copyright (c) 2020 Dimitrios-Georgios Akestoridis\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the\n# \"Software\"), to deal in the Software without restriction, including\n# without limitation the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the Software, and to\n# permit persons to whom the Software is furnished to do so, subject to\n# the following conditions:\n#\n# The above copyright notice and this permission notice shall be included\n# in all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\n# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\n# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.\n# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY\n# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,\n# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\nimport mcdm\nimport numpy as np\nimport os\nimport unittest\n\n\nDIR_PATH = os.path.dirname(os.path.abspath(__file__))\n\n\nclass TestLoad(unittest.TestCase):\n def test_loading_example01(self):\n \"\"\"Test loading a matrix with the default parameter values.\"\"\"\n filepath = os.path.join(DIR_PATH, \"data\", \"example01.csv\")\n obtained_matrix, obtained_row_labels = mcdm.load(filepath)\n expected_matrix = np.array(\n [[0.0, 0.0, 1.0],\n [0.1, 0.2, 0.8],\n [0.2, 0.4, 0.6],\n [0.3, 0.7, 0.3],\n [0.6, 0.8, 0.2],\n [0.8, 0.9, 0.1],\n [1.0, 1.0, 0.0]],\n dtype=np.float64)\n expected_row_labels = None\n np.testing.assert_allclose(obtained_matrix, expected_matrix)\n self.assertEqual(obtained_matrix.dtype, expected_matrix.dtype)\n self.assertEqual(obtained_row_labels, expected_row_labels)\n\n def test_loading_example02(self):\n \"\"\"Test loading a matrix with column and row labels.\"\"\"\n filepath = os.path.join(DIR_PATH, \"data\", \"example02.csv\")\n obtained_matrix, obtained_row_labels = mcdm.load(\n filepath, skiprows=1, labeled_rows=True)\n expected_matrix = np.array(\n [[0.0, 0.0, 0.0],\n [0.0, 0.0, 1.0],\n [0.2, 0.5, 0.0],\n [0.2, 0.5, 1.0],\n [0.4, 1.0, 0.0],\n [0.4, 1.0, 1.0],\n [0.6, 1.0, 0.0],\n [0.6, 1.0, 1.0],\n [0.8, 0.5, 0.0],\n [0.8, 0.5, 1.0],\n [1.0, 0.0, 0.0],\n [1.0, 0.0, 1.0]],\n dtype=np.float64)\n expected_row_labels = [\n \"a1\",\n \"a2\",\n \"a3\",\n \"a4\",\n \"a5\",\n \"a6\",\n \"a7\",\n \"a8\",\n \"a9\",\n \"a10\",\n \"a11\",\n \"a12\",\n ]\n np.testing.assert_allclose(obtained_matrix, expected_matrix)\n self.assertEqual(obtained_matrix.dtype, expected_matrix.dtype)\n self.assertEqual(obtained_row_labels, expected_row_labels)\n\n def test_loading_example03(self):\n \"\"\"Test loading a matrix without any labels from a TSV file.\"\"\"\n filepath = os.path.join(DIR_PATH, \"data\", \"example03.tsv\")\n obtained_matrix, obtained_row_labels = mcdm.load(\n filepath, delimiter=\"\\t\")\n expected_matrix = np.array(\n [[0.00, 1.00],\n [0.25, 0.75],\n [0.50, 0.50],\n [0.75, 0.25],\n [1.00, 0.00]],\n dtype=np.float64)\n expected_row_labels = None\n np.testing.assert_allclose(obtained_matrix, expected_matrix)\n self.assertEqual(obtained_matrix.dtype, expected_matrix.dtype)\n self.assertEqual(obtained_row_labels, expected_row_labels)\n\n def test_loading_example04(self):\n \"\"\"Test loading a matrix with a comment line from a TSV file.\"\"\"\n filepath = os.path.join(DIR_PATH, \"data\", \"example04.tsv\")\n obtained_matrix, obtained_row_labels = mcdm.load(\n filepath, delimiter=\"\\t\", skiprows=1)\n expected_matrix = np.array(\n [[ 2.0, 12.0, 7.0, 7.0], # noqa: E201\n [ 4.0, 100.0, 7.0, 7.0], # noqa: E201\n [10.0, 200.0, 7.0, 7.0], # noqa: E201\n [ 0.0, 300.0, 7.0, 7.0], # noqa: E201\n [ 6.0, 400.0, 7.0, 7.0], # noqa: E201\n [ 1.0, 600.0, 7.0, 7.0]], # noqa: E201\n dtype=np.float64)\n expected_row_labels = None\n np.testing.assert_allclose(obtained_matrix, expected_matrix)\n self.assertEqual(obtained_matrix.dtype, expected_matrix.dtype)\n self.assertEqual(obtained_row_labels, expected_row_labels)\n\n def test_loading_example05(self):\n \"\"\"Test loading a matrix with row labels from a CSV file.\"\"\"\n filepath = os.path.join(DIR_PATH, \"data\", \"example05.csv\")\n obtained_matrix, obtained_row_labels = mcdm.load(\n filepath, labeled_rows=True)\n expected_matrix = np.array(\n [[ 8.0, 8.0, -1.0, -1.0, 5.0, 5.0], # noqa: E201\n [24.0, 24.0, -11.0, -11.0, 0.0, 0.0], # noqa: E201\n [ 4.0, 4.0, -10.0, -10.0, 40.0, 40.0], # noqa: E201\n [14.0, 14.0, -9.0, -9.0, 15.0, 15.0], # noqa: E201\n [ 6.0, 6.0, -7.0, -7.0, -5.0, -5.0], # noqa: E201\n [18.0, 18.0, -5.0, -5.0, -10.0, -10.0]], # noqa: E201\n dtype=np.float64)\n expected_row_labels = [\"A\", \"B\", \"C\", \"D\", \"E\", \"F\"]\n np.testing.assert_allclose(obtained_matrix, expected_matrix)\n self.assertEqual(obtained_matrix.dtype, expected_matrix.dtype)\n self.assertEqual(obtained_row_labels, expected_row_labels)\n\n def test_loading_example06(self):\n \"\"\"Test loading a matrix with a comment line from a CSV file.\"\"\"\n filepath = os.path.join(DIR_PATH, \"data\", \"example06.csv\")\n obtained_matrix, obtained_row_labels = mcdm.load(\n filepath, skiprows=1)\n expected_matrix = np.array(\n [[0.5, 0.6, 0.3, 0.2, 0.9],\n [0.5, 0.5, 0.5, 0.5, 0.5],\n [0.5, 0.4, 0.7, 0.8, 0.1]],\n dtype=np.float64)\n expected_row_labels = None\n np.testing.assert_allclose(obtained_matrix, expected_matrix)\n self.assertEqual(obtained_matrix.dtype, expected_matrix.dtype)\n self.assertEqual(obtained_row_labels, expected_row_labels)\n\n def test_loading_example07(self):\n \"\"\"Test loading a matrix with a multi-line comment from a CSV file.\"\"\"\n filepath = os.path.join(DIR_PATH, \"data\", \"example07.csv\")\n obtained_matrix, obtained_row_labels = mcdm.load(\n filepath, skiprows=3)\n expected_matrix = np.array(\n [[0.9, 30.0, 500.0, 4.0],\n [0.1, 50.0, 5.0, 6.0],\n [0.5, 80.0, 8.0, 6.0],\n [0.8, 40.0, 100.0, 4.0],\n [0.7, 60.0, 20.0, 5.0],\n [0.6, 60.0, 10.0, 5.0]],\n dtype=np.float64)\n expected_row_labels = None\n np.testing.assert_allclose(obtained_matrix, expected_matrix)\n self.assertEqual(obtained_matrix.dtype, expected_matrix.dtype)\n self.assertEqual(obtained_row_labels, expected_row_labels)\n\n def test_loading_example08(self):\n \"\"\"Test loading a matrix with row labels from a TSV file.\"\"\"\n filepath = os.path.join(DIR_PATH, \"data\", \"example08.tsv\")\n obtained_matrix, obtained_row_labels = mcdm.load(\n filepath, delimiter=\"\\t\", labeled_rows=True)\n expected_matrix = np.array(\n [[4.0, 5.0, 10.0],\n [3.0, 10.0, 6.0],\n [3.0, 20.0, 2.0],\n [2.0, 15.0, 5.0]],\n dtype=np.float64)\n expected_row_labels = [\"A\", \"B\", \"C\", \"D\"]\n np.testing.assert_allclose(obtained_matrix, expected_matrix)\n self.assertEqual(obtained_matrix.dtype, expected_matrix.dtype)\n self.assertEqual(obtained_row_labels, expected_row_labels)\n\n def test_loading_example09(self):\n \"\"\"Test loading a large matrix from a TSV file.\"\"\"\n filepath = os.path.join(DIR_PATH, \"data\", \"example09.tsv\")\n obtained_matrix, obtained_row_labels = mcdm.load(\n filepath, delimiter=\"\\t\", skiprows=1, labeled_rows=True)\n expected_matrix = np.array(\n [[1.000000, 1.000000, 0.017276],\n [0.046296, 0.022222, 1.000000],\n [0.259295, 0.106985, 0.783554],\n [0.260509, 0.107106, 0.801962],\n [0.090419, 0.044763, 0.245226],\n [0.563999, 0.239328, 0.288358],\n [0.320434, 0.147798, 0.738850],\n [0.314969, 0.144773, 0.751384],\n [0.714533, 0.364252, 0.092688],\n [0.972336, 0.706954, 0.091856],\n [0.283518, 0.127236, 0.805858],\n [0.296781, 0.132676, 0.797796],\n [0.265469, 0.122640, 0.202089],\n [0.839930, 0.461981, 0.304980],\n [0.282103, 0.126395, 0.808264],\n [0.296100, 0.132096, 0.799922],\n [0.212761, 0.104337, 0.229227],\n [0.798002, 0.429797, 0.335956],\n [0.068258, 0.035742, 0.519465],\n [0.102412, 0.055489, 0.281905],\n [0.155229, 0.085050, 0.163012],\n [0.238498, 0.128995, 0.103688],\n [0.177178, 0.075565, 0.854643],\n [0.257650, 0.112055, 0.811516],\n [0.294934, 0.131563, 0.781283],\n [0.310552, 0.140593, 0.762520],\n [0.368115, 0.159646, 0.449073],\n [0.498578, 0.228317, 0.296180],\n [0.635688, 0.310778, 0.210340],\n [0.759518, 0.402583, 0.149893],\n [0.499916, 0.188975, 0.302964],\n [0.717516, 0.306092, 0.249340],\n [0.790702, 0.359737, 0.221402],\n [0.848093, 0.415040, 0.193533],\n [0.068414, 0.035866, 0.519542],\n [0.102469, 0.055554, 0.282188],\n [0.155261, 0.085064, 0.162956],\n [0.238748, 0.129114, 0.103684]],\n dtype=np.float64)\n expected_row_labels = [\n \"Epidemic\",\n \"Direct\",\n \"CnF.LTS\",\n \"CnF.DestEnc\",\n \"CnF.Enc\",\n \"CnF.PRoPHET\",\n \"CnR.LTS\",\n \"CnR.DestEnc\",\n \"CnR.Enc\",\n \"CnR.PRoPHET\",\n \"DF.LTS\",\n \"DF.DestEnc\",\n \"DF.Enc\",\n \"DF.PRoPHET\",\n \"COORD.LTS\",\n \"COORD.DestEnc\",\n \"COORD.Enc\",\n \"COORD.PRoPHET\",\n \"SnW.L2\",\n \"SnW.L4\",\n \"SnW.L8\",\n \"SnW.L16\",\n \"LSF-SnW.L2\",\n \"LSF-SnW.L4\",\n \"LSF-SnW.L8\",\n \"LSF-SnW.L16\",\n \"SnF.L2\",\n \"SnF.L4\",\n \"SnF.L8\",\n \"SnF.L16\",\n \"SimBetTS.L2\",\n \"SimBetTS.L4\",\n \"SimBetTS.L8\",\n \"SimBetTS.L16\",\n \"EBR.L2\",\n \"EBR.L4\",\n \"EBR.L8\",\n \"EBR.L16\",\n ]\n np.testing.assert_allclose(obtained_matrix, expected_matrix)\n self.assertEqual(obtained_matrix.dtype, expected_matrix.dtype)\n self.assertEqual(obtained_row_labels, expected_row_labels)\n\n def test_num_columns_exception(self):\n \"\"\"Test loading a matrix with a wrong number of columns.\"\"\"\n filepath = os.path.join(DIR_PATH, \"data\", \"failure01.tsv\")\n self.assertRaises(ValueError, mcdm.load, filepath,\n delimiter=\"\\t\", labeled_rows=True)\n\n def test_no_columns_exception(self):\n \"\"\"Test loading a matrix without any columns.\"\"\"\n filepath = os.path.join(DIR_PATH, \"data\", \"failure02.tsv\")\n self.assertRaises(ValueError, mcdm.load, filepath,\n delimiter=\"\\t\", labeled_rows=True)\n\n def test_wrong_type_exception(self):\n \"\"\"Test loading a matrix with a wrong value type.\"\"\"\n filepath = os.path.join(DIR_PATH, \"data\", \"failure03.csv\")\n self.assertRaises(ValueError, mcdm.load, filepath,\n skiprows=1, labeled_rows=True)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n" ]
[ [ "numpy.testing.assert_allclose", "numpy.array" ] ]
ml-in-programming/ml-on-source-code-models
[ "28f206afcda761320550cefdd53a3f89d206f82f", "28f206afcda761320550cefdd53a3f89d206f82f" ]
[ "psob_authorship/reproduce_results/psobp_reproducibility/proof_of_work/iris_dataset_pytorch.py", "psob_authorship/reproduce_results/psobp_reproducibility/proof_of_work/ackley.py" ]
[ "\"\"\"\nExample is taken from https://pyswarms.readthedocs.io/en/latest/examples/custom_objective_function.html\nComparison between my implementation of PSO and pyswarms is made on iris dataset.\nOptimizing PyTorch model.\nAssert is taken on absolute difference in final accuracy with 0.015 threshold.\nAlso for train loss threshold is 0.03.\nThresholds and results are different because instead of softmax logsoftmax is used.\n\"\"\"\n# Import modules\n# Import PySwarms\nimport torch\nfrom sklearn.datasets import load_iris\nfrom torch import nn\n\nfrom psob_authorship.experiment.utils import make_experiment_reproducible\nfrom psob_authorship.pso.PSO import PSO\nfrom psob_authorship.train.utils import get_model_accuracy_and_loss\n\nCONFIG = {\n 'random_state': 4562,\n 'criterion': nn.CrossEntropyLoss(),\n 'pso_options': {'c1': 0.5, 'c2': 0.3, 'w': 0.9,\n 'particle_clamp': (0, 1), 'use_particle_clamp_each_iteration': False,\n 'unchanged_iterations_stop': 20000, 'use_only_early_stopping': False\n # 20k not to use early stopping, so exactly 1000 iterations will be performed\n },\n 'n_particles': 100,\n 'pso_iters': 1000,\n 'pso_optimizer': PSO,\n 'train_loss_threshold': 0.03,\n 'accuracy_threshold': 0.015\n}\nmake_experiment_reproducible(CONFIG['random_state'])\n\n\ndef print_info(string):\n print(string)\n\n\nCONFIG['pso_options']['print_info'] = print_info\n\n\nclass Model(nn.Module):\n # Neural network architecture\n n_inputs = 4\n n_hidden = 20\n n_classes = 3\n dimensions = (n_inputs * n_hidden) + (n_hidden * n_classes) + n_hidden + n_classes\n\n def __init__(self):\n super().__init__()\n self.fc1 = nn.Linear(Model.n_inputs, Model.n_hidden, bias=True)\n self.nonlin1 = nn.Tanh()\n self.fc2 = nn.Linear(Model.n_hidden, Model.n_classes, bias=True)\n\n def forward(self, x):\n x = self.fc1(x)\n x = self.nonlin1(x)\n x = self.fc2(x)\n return x\n\n\ndef conduct_iris_dataset_pytorch_comparison_experiment():\n # Load the iris dataset\n data = load_iris()\n\n # Store the features as X and the labels as y\n X = torch.FloatTensor(data.data)\n y = torch.LongTensor(data.target)\n\n model = Model()\n criterion = CONFIG['criterion']\n\n def pso_optimize(use_pyswarms):\n CONFIG['pso_options']['use_pyswarms'] = use_pyswarms\n optimizer = CONFIG['pso_optimizer'](model, criterion, CONFIG['pso_options'], CONFIG['n_particles'])\n optimizer.optimize(X, y,\n X, y,\n CONFIG['pso_iters'], None)\n\n # pyswarms\n pso_optimize(use_pyswarms=True)\n pyswarms_cost, pyswarms_accuracy = get_model_accuracy_and_loss(model, criterion, X, y)\n\n # my implementation\n CONFIG['pso_options']['w'] = (CONFIG['pso_options']['w'], CONFIG['pso_options']['w'])\n pso_optimize(use_pyswarms=False)\n my_cost, my_accuracy = get_model_accuracy_and_loss(model, criterion, X, y)\n\n accuracy_diff = abs(pyswarms_accuracy - my_accuracy)\n cost_diff = abs(pyswarms_cost - my_cost)\n assert (accuracy_diff <= CONFIG['accuracy_threshold'])\n assert (cost_diff <= CONFIG['train_loss_threshold'])\n print(\"ASSERTIONS PASSED\")\n print(\"Thresholds for accuracy \" +\n str(CONFIG['accuracy_threshold']) + \", for train loss \" +\n str(CONFIG['train_loss_threshold']))\n print(\"My :\" + str(my_accuracy) + \" \" + str(my_cost))\n print(\"Pyswarms:\" + str(pyswarms_accuracy) + \" \" + str(pyswarms_cost))\n print(\"Accuracy difference: \" + str(accuracy_diff))\n print(\"Loss difference: \" + str(cost_diff))\n\n\nif __name__ == '__main__':\n conduct_iris_dataset_pytorch_comparison_experiment()\n", "\"\"\"\nAckley function for test:\nhttps://pyswarms.readthedocs.io/en/latest/api/pyswarms.utils.functions.html#module-pyswarms.utils.functions.single_obj\nIt is just simple function: R^n -> R\nThresholds are 1e-14.\n\"\"\"\nimport numpy as np\nimport pyswarms as ps\nfrom pyswarms.utils.functions.single_obj import ackley\n\nfrom psob_authorship.experiment.utils import make_experiment_reproducible\nfrom psob_authorship.pso.DecreasingWeightPsoOptimizer import DecreasingWeightPsoOptimizer\n\nCONFIG = {\n 'random_state': 4562,\n 'pso_options': {'c1': 0.5, 'c2': 0.3, 'w': 0.9,\n 'particle_clamp': (0, 1), 'use_particle_clamp_each_iteration': False,\n 'unchanged_iterations_stop': 20000, 'use_only_early_stopping': False\n # 20k not to use early stopping, so exactly 1000 iterations will be performed\n },\n 'n_particles': 100,\n 'velocity_clamp': (-1, 1),\n 'pso_iters': 1000,\n 'function_value_threshold': 1e-14,\n 'point_threshold': 1e-14\n}\nmake_experiment_reproducible(CONFIG['random_state'])\n\n\ndef conduct_ackley_comparison_experiment():\n dimensions = 10\n correct_function_value, correct_point = 0, np.zeros(dimensions)\n\n # pyswarms implementation\n pyswarms_optimizer = ps.single.GlobalBestPSO(n_particles=CONFIG['n_particles'], dimensions=dimensions,\n options=CONFIG['pso_options'], velocity_clamp=CONFIG['velocity_clamp'])\n pyswarms_function_value, pyswarms_point = pyswarms_optimizer.optimize(ackley, iters=CONFIG['pso_iters'])\n\n # my implementation\n CONFIG['pso_options']['w'] = (CONFIG['pso_options']['w'], CONFIG['pso_options']['w'])\n my_optimizer = DecreasingWeightPsoOptimizer(CONFIG['n_particles'], dimensions,\n CONFIG['pso_options'], CONFIG['velocity_clamp'])\n my_function_value, my_point = my_optimizer.optimize(ackley, CONFIG['pso_iters'], None)\n\n point_diff = np.linalg.norm(correct_point - my_point)\n function_value_diff = abs(correct_function_value - my_function_value)\n assert (point_diff <= CONFIG['point_threshold'])\n assert (function_value_diff <= CONFIG['function_value_threshold'])\n print(\"ASSERTIONS PASSED\")\n print(\"Thresholds for point \" +\n str(CONFIG['point_threshold']) + \", for function value \" +\n str(CONFIG['function_value_threshold']))\n print(\"Correct :\" + str(correct_function_value) + \" \" + str(correct_point))\n print(\"My :\" + str(my_function_value) + \" \" + str(my_point))\n print(\"Pyswarms:\" + str(pyswarms_function_value) + \" \" + str(pyswarms_point))\n print(\"Point difference: \" + str(point_diff))\n print(\"Function value difference: \" + str(function_value_diff))\n\n\nif __name__ == '__main__':\n conduct_ackley_comparison_experiment()\n" ]
[ [ "torch.nn.Linear", "torch.nn.Tanh", "torch.FloatTensor", "torch.LongTensor", "torch.nn.CrossEntropyLoss", "sklearn.datasets.load_iris" ], [ "numpy.linalg.norm", "numpy.zeros" ] ]
pidan1231239/pytorch-template
[ "c68ae0019514e1ab59853ce552c8ec8603554d52" ]
[ "test.py" ]
[ "import argparse\nimport torch\nfrom tqdm import tqdm\nimport data_loader.data_loaders as module_data\nimport model.loss as module_loss\nimport model.metric as module_metric\nimport model.model as module_arch\nfrom parse_config import ConfigParser\n\n\ndef main(config):\n logger = config.get_logger('test')\n\n # setup data_loader instances\n data_loader = getattr(module_data, config['data_loader']['type'])(\n config['data_loader']['args']['data_dir'],\n batch_size=512,\n shuffle=False,\n validation_split=0.0,\n training=False,\n num_workers=2\n )\n\n # build model architecture\n model = config.init_obj('arch', module_arch)\n logger.info(model)\n\n # get function handles of loss and metrics\n loss_fn = getattr(module_loss, config['loss'])\n metric_fns = [getattr(module_metric, met) for met in config['metrics']]\n\n logger.info('Loading checkpoint: {} ...'.format(config.resume))\n checkpoint = torch.load(config.resume)\n state_dict = checkpoint['state_dict']\n model = torch.nn.DataParallel(model)\n model.load_state_dict(state_dict)\n\n # prepare model for testing\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n model = model.to(device)\n model.eval()\n\n total_loss = 0.0\n total_metrics = torch.zeros(len(metric_fns))\n\n with torch.no_grad():\n for i, (data, target) in enumerate(tqdm(data_loader)):\n data, target = data.to(device), target.to(device)\n output = model(data)\n\n #\n # save sample images, or do something with output here\n #\n\n # computing loss, metrics on test set\n loss = loss_fn(output, target)\n batch_size = data.shape[0]\n total_loss += loss.item() * batch_size\n for i, metric in enumerate(metric_fns):\n total_metrics[i] += metric(output, target) * batch_size\n\n n_samples = len(data_loader.sampler)\n log = {'loss': total_loss / n_samples}\n log.update({\n met.__name__: total_metrics[i].item() / n_samples for i, met in enumerate(metric_fns)\n })\n logger.info(log)\n\n\nif __name__ == '__main__':\n args = argparse.ArgumentParser(description='PyTorch Template')\n args.add_argument('-c', '--config', default=None, type=str,\n help='config file path (default: None)')\n args.add_argument('-r', '--resume', default=None, type=str,\n help='path to latest checkpoint (default: None)')\n args.add_argument('-d', '--device', default=None, type=str,\n help='indices of GPUs to enable (default: all)')\n\n config = ConfigParser.from_args(args)\n main(config)\n" ]
[ [ "torch.no_grad", "torch.cuda.is_available", "torch.load", "torch.nn.DataParallel" ] ]
jabbar536/django_ML_model
[ "a85cfd68f906c799aa9085e740fed22063d37e2e" ]
[ "backend/server/apps/endpoints/views.py" ]
[ "# backend/server/apps/endpoints/views.py file\n\n# please add imports\nimport json\nfrom numpy.random import rand\nfrom rest_framework import views, status\nfrom rest_framework.response import Response\nfrom apps.ml.registry import MLRegistry\nfrom server.wsgi import registry\n\nfrom rest_framework import viewsets\nfrom rest_framework import mixins\n\nfrom apps.endpoints.models import Endpoint\nfrom apps.endpoints.serializers import EndpointSerializer\n\nfrom apps.endpoints.models import MLAlgorithm\nfrom apps.endpoints.serializers import MLAlgorithmSerializer\n\nfrom apps.endpoints.models import MLAlgorithmStatus\nfrom apps.endpoints.serializers import MLAlgorithmStatusSerializer\n\nfrom apps.endpoints.models import MLRequest\nfrom apps.endpoints.serializers import MLRequestSerializer\n\n\nclass EndpointViewSet(\n mixins.RetrieveModelMixin, mixins.ListModelMixin, viewsets.GenericViewSet\n):\n serializer_class = EndpointSerializer\n queryset = Endpoint.objects.all()\n\n\nclass MLAlgorithmViewSet(\n mixins.RetrieveModelMixin, mixins.ListModelMixin, viewsets.GenericViewSet\n):\n serializer_class = MLAlgorithmSerializer\n queryset = MLAlgorithm.objects.all()\n\n\ndef deactivate_other_statuses(instance):\n old_statuses = MLAlgorithmStatus.objects.filter(parent_mlalgorithm=instance.parent_mlalgorithm,\n created_at__lt=instance.created_at,\n active=True)\n for i in range(len(old_statuses)):\n old_statuses[i].active = False\n MLAlgorithmStatus.objects.bulk_update(old_statuses, [\"active\"])\n\n\nclass MLAlgorithmStatusViewSet(\n mixins.RetrieveModelMixin, mixins.ListModelMixin, viewsets.GenericViewSet,\n mixins.CreateModelMixin\n):\n serializer_class = MLAlgorithmStatusSerializer\n queryset = MLAlgorithmStatus.objects.all()\n\n def perform_create(self, serializer):\n try:\n with transaction.atomic():\n instance = serializer.save(active=True)\n # set active=False for other statuses\n deactivate_other_statuses(instance)\n\n except Exception as e:\n raise APIException(str(e))\n\n\nclass MLRequestViewSet(\n mixins.RetrieveModelMixin, mixins.ListModelMixin, viewsets.GenericViewSet,\n mixins.UpdateModelMixin\n):\n serializer_class = MLRequestSerializer\n queryset = MLRequest.objects.all()\n\n\nclass PredictView(views.APIView):\n def post(self, request, endpoint_name, format=None):\n\n algorithm_status = self.request.query_params.get(\"status\", \"production\")\n algorithm_version = self.request.query_params.get(\"version\")\n\n algs = MLAlgorithm.objects.filter(parent_endpoint__name = endpoint_name, status__status = algorithm_status, status__active=True)\n\n if algorithm_version is not None:\n algs = algs.filter(version = algorithm_version)\n\n if len(algs) == 0:\n return Response(\n {\"status\": \"Error\", \"message\": \"ML algorithm is not available\"},\n status=status.HTTP_400_BAD_REQUEST,\n )\n if len(algs) != 1 and algorithm_status != \"ab_testing\":\n return Response(\n {\"status\": \"Error\", \"message\": \"ML algorithm selection is ambiguous. Please specify algorithm version.\"},\n status=status.HTTP_400_BAD_REQUEST,\n )\n alg_index = 0\n if algorithm_status == \"ab_testing\":\n alg_index = 0 if rand() < 0.5 else 1\n\n algorithm_object = registry.endpoints[algs[alg_index].id]\n prediction = algorithm_object.compute_prediction(request.data)\n\n label = prediction[\"label\"] if \"label\" in prediction else \"error\"\n ml_request = MLRequest(\n input_data=json.dumps(request.data),\n full_response=prediction,\n response=label,\n feedback=\"\",\n parent_mlalgorithm=algs[alg_index],\n )\n ml_request.save()\n\n prediction[\"request_id\"] = ml_request.id\n\n return Response(prediction)" ]
[ [ "numpy.random.rand" ] ]
chelokot/ProjectedGAN-pytorch
[ "9b56e471d7abeaf9523655a31c77f7473b3830b0" ]
[ "dataset.py" ]
[ "import torch\r\nfrom torchvision import transforms, datasets\r\n\r\n\r\ndef load_data(data_path, batch_size):\r\n train_transforms = transforms.Compose([transforms.RandomRotation(30),\r\n transforms.Resize((256, 256)),\r\n transforms.RandomHorizontalFlip(),\r\n transforms.ToTensor(),\r\n transforms.Normalize([0.485, 0.456, 0.406],\r\n [0.229, 0.224, 0.225])])\r\n train_data = datasets.ImageFolder(data_path, transform=train_transforms)\r\n trainloader = torch.utils.data.DataLoader(train_data, batch_size=batch_size, shuffle=True)\r\n return trainloader\r\n\r\n\r\nif __name__ == '__main__':\r\n loader, data = load_data(\"data\")\r\n\r\n" ]
[ [ "torch.utils.data.DataLoader" ] ]
KailinTong/Algorithms-Design-and-Analysis
[ "786def6bdab0025ee037c0c5e16f0873e23c9134" ]
[ "Part_2/Homework_3/knapsack.py" ]
[ "import numpy as np\nimport sys\nsys.setrecursionlimit(10000)\n\nclass Knapsack:\n def __init__(self, txt_name):\n self.size = 0\n self.num_items = 0\n self.items = [(0, 0)] # (value, weight)\n self.array = np.array([])\n self.read_txt(txt_name)\n self.cache = {}\n self.value = self.compute_value(self.size, self.num_items)\n\n def read_txt(self, txt_name):\n with open(txt_name) as f:\n first_line = f.readline().rstrip(\"\\n\").split(\" \")\n print(\"Knapsack size: \" + first_line[0])\n self.size = int(first_line[0])\n print(\"Number of items: \" + first_line[1])\n self.num_items = int(first_line[1])\n self.array = np.zeros(shape=(self.num_items+0, self.size), dtype=int) # add one row of 0\n\n for line in f:\n str_list = line.rstrip(\"\\n\").split(' ')\n item = tuple(map(int, str_list))\n self.items.append(item)\n\n def compute_value(self, weight, index):\n if index == 0 or weight == 0:\n return 0\n (this_value, this_weight) = self.items[index]\n # thie item weight is bigger than the weight size, no solution, decrease the index\n if this_weight > weight:\n if (weight, index - 1) not in self.cache:\n self.cache[(weight, index - 1)] = self.compute_value(weight, index - 1)\n return self.cache[(weight, index - 1)]\n else:\n # solution including this item\n if (weight - this_weight, index - 1) not in self.cache:\n self.cache[(weight - this_weight, index - 1)] = self.compute_value(weight - this_weight, index - 1)\n solution_including_this_item = this_value + self.cache[(weight - this_weight, index - 1)]\n if (weight, index - 1) not in self.cache:\n self.cache[(weight, index - 1)] = self.compute_value(weight, index - 1)\n solution_without_this_item = self.cache[(weight, index - 1)]\n return max(solution_including_this_item, solution_without_this_item)\n\n\nif __name__ == \"__main__\":\n # k = Knapsack(\"knapsack1.txt\")\n k = Knapsack(\"knapsack_big.txt\")\n print(k.value)" ]
[ [ "numpy.array", "numpy.zeros" ] ]
brandongk/segmenter
[ "dbc042d31dc74f1abdc87ae10a6be78ba38ddb91" ]
[ "segmenter/evaluators/PredictEvaluator.py" ]
[ "from tensorflow.keras import backend as K\nfrom segmenter.evaluators.ThresholdAwareEvaluator import ThresholdAwareEvaluator\nimport numpy as np\nimport os\n\n\nclass PredictEvaluator(ThresholdAwareEvaluator):\n def evaluate_threshold(self, model, threshold, outdir):\n for batch, (images, masks) in enumerate(self.dataset):\n name = os.path.basename(self.generator.image_files[batch])\n outfile = os.path.join(outdir, name)\n # if os.path.exists(\"{}.npz \".format(outfile)):\n # continue\n print(\"{} ({}/{})\".format(name, batch, self.num_images))\n predictions = model.predict_on_batch(images).numpy()\n for i in range(predictions.shape[0]):\n prediction = predictions[i]\n mask = masks[i].numpy()\n image = images[i].numpy()\n prediction = prediction[:, :, 0]\n mask = mask[:, :, 0]\n\n thresholded_prediction = np.where(prediction > threshold, 1,\n 0).astype(prediction.dtype)\n\n if image.shape[2] == 1:\n image = image[:, :, 0]\n\n np.savez_compressed(outfile,\n image=image,\n raw_prediction=prediction,\n prediction=thresholded_prediction,\n mask=mask)\n" ]
[ [ "numpy.where", "numpy.savez_compressed" ] ]
matiaslindgren/ghht
[ "1e310a3573730dd546551fa3003e2403f6fd71ef" ]
[ "ghht/__main__.py" ]
[ "from argparse import ArgumentParser\nfrom collections import defaultdict\nfrom datetime import datetime\nfrom tempfile import mkstemp\nimport os.path\n\nimport fontTools.ttx\nimport numpy as np\n\nimport ghht\n\n\ndef parse_date(d):\n return datetime.datetime.strptime(d, \"%Y-%m-%d\")\n\n\ndef _main(sink, text, start_year, font_file, background, intensity, skip_list, debug):\n assert sink or debug, \"sink or debug must be defined\"\n ttx_file = mkstemp()[1] + \".ttx\"\n print(\"converting font file '{:s}' to '{:s}'\".format(font_file, ttx_file))\n fontTools.ttx.main([\"-o\", ttx_file, font_file])\n\n skip_dates = set()\n if skip_list:\n with open(skip_list) as f:\n skip_dates = set(parse_date(l.strip()) for l in f)\n print()\n print(\"skiplist contained {} dates to skip\".format(len(skip_dates)))\n\n print()\n print(\"checking font has all chars in text '{}'\".format(text))\n font = ghht.TTF(ttx_file)\n for ch in text:\n font.assert_has(ch)\n print(\"'{}' ok\".format(ch))\n\n padding = ghht.Padding(top=1, right=1, left=1)\n\n def xy_dates():\n for (x, y), td in ghht.squares2commitdates(start_year, font.text2squares(text), padding):\n if td.date() in skip_dates:\n continue\n yield (x, y), td\n\n if debug:\n print(\"debug mode, will not generate commits\")\n print(\"x y date\")\n years = defaultdict(lambda: np.zeros(ghht.HeatMapCanvas.shape))\n for (x, y), td in xy_dates():\n print(x, y, td.date())\n years[td.date().year][y][x] += 1\n ghht.plot_debug_heatmap(sorted(years.items(), reverse=True))\n return\n\n if not os.path.isdir(os.path.join(sink, \".git\")):\n print(\"'{}' does not have a .git directory, initializing repo\".format(sink))\n ghht.run(\"git init\", sink)\n\n print()\n print(\"generating commits\")\n for (x, y), t in xy_dates():\n for _ in range(intensity):\n ghht.commit(t, sink, \"({},{})\".format(x, y))\n\n if background:\n print()\n print(\"generating commits for background\")\n ghht.commit_year(start_year, sink)\n\n\ndef main():\n parser = ArgumentParser()\n parser.add_argument(\"text\",\n type=str,\n help=\"ASCII text to render on commit heatmap.\")\n parser.add_argument(\"start_year\",\n type=int,\n help=\"Year for first commit.\")\n parser.add_argument(\"--sink\",\n type=str,\n help=\"Path to a git repository to be used for generating commits.\")\n parser.add_argument(\"--font-file\",\n type=str,\n default=ghht.DEFAULT_FONT,\n help=\"TTX-convertible font file.\")\n parser.add_argument(\"--background\",\n action=\"store_true\",\n default=False,\n help=\"Generate a single commit on every day to paint a background.\")\n parser.add_argument(\"--debug\",\n action=\"store_true\",\n default=False,\n help=\"Plot characters with matplotlib instead of generating commits.\")\n parser.add_argument(\"--intensity\",\n type=int,\n default=1,\n help=\"How many commits to generate for every text square.\")\n parser.add_argument(\"--skip-list\",\n type=str,\n help=\"Path to a file containing lines of yyyy-mm-dd for dates that should not have a commit.\")\n\n _main(**vars(parser.parse_args()))\n\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "numpy.zeros" ] ]
joordamn/CellESignal
[ "ecf487d07d35f134d4537d7c99c7fa0582221e68" ]
[ "bin/matlab_label_convert.py" ]
[ "# -*- encoding: utf-8 -*-\n'''\n-------------------------\n@File : matlab_label_convert.py\n@Time : 2022/01/24 15:36:12\n@Author : Zhongning Jiang \n@Contact : [email protected]\n@Desc : 此脚本用于转换matlab导出的标签txt\n-------------------------\n'''\n\nimport os, sys, shutil\n# sys.path.append(\"..\")\n# sys.path.append(os.path.dirname(os.path.realpath(__file__)))\n# os.chdir(sys.path[-1])\n\nfrom tqdm import tqdm\nimport json\nimport random\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nfrom utils.utils import read_from_txt\n\n\ndef parse(\n root_folder, \n signal_save_path, \n noise_save_path, \n noise_num_ratio=2, \n split_len=200, \n plot=True, \n gen_signal=True, \n gen_noise=True,\n ):\n \"\"\"在txt文件的根目录下读取txt文件,并生成对应的json文件和图\n \"\"\"\n\n label = {\n \"code\": \"\",\n \"label\": 1,\n \"number of peaks\": 1,\n \"peaks' labels\": [],\n \"borders\": [],\n \"description\": \"\",\n \"rt\":[],\n \"scan\": [],\n \"intensity\": [],\n \"mz\": [],\n }\n\n for item in os.listdir(root_folder):\n if item.endswith(\"txt\"):\n if item.startswith(\"0\"):\n # 读取信号数据\n signal_data, _ = read_from_txt(os.path.join(root_folder, item))\n elif item.startswith(\"r\"):\n # 读取border数据\n border_data, _ = read_from_txt(os.path.join(root_folder, item))\n # 提取信号逐段分割\n counter = 0\n if plot:\n figure = plt.figure()\n\n rt_total = []\n for i, border in tqdm(enumerate(border_data)):\n label[\"peaks' labels\"] = []\n label[\"borders\"] = []\n \n begin, end = border\n border_len = end - begin\n\n if border_len >= int(split_len * 0.9) or border_len == 0:\n continue\n \n # 截取数据\n pad_len = random.randint(2, int(split_len - border_len))\n rt_start = begin - pad_len\n rt_end = rt_start + split_len\n data_slice = signal_data[rt_start:rt_end]\n rt_total.append([rt_start, rt_end])\n\n # 判断前两个后两个border是否包含\n border_contains = []\n if i >= 2 and i <= len(border_data) - 3:\n borders = border_data[i-2], border_data[i-1], border, border_data[i+2], border_data[i+1]\n for b in borders:\n if rt_start <= b[0] <= rt_end or rt_start <= b[1] <= rt_end:\n _begin = max(0, b[0] - rt_start)\n _end = min(split_len - 1, b[1] - rt_start)\n border_contains.append([int(_begin), int(_end)])\n label[\"peaks' labels\"].append([0])\n else:\n border_contains.append([pad_len, min(split_len-1, pad_len + border_len)])\n label[\"peaks' labels\"].append([0])\n\n # 改写json内容\n json_file_name = \"peak_sample_\" + str(counter).zfill(4)\n json_file = os.path.join(signal_save_path, json_file_name) + '.json'\n\n label[\"code\"] = json_file_name\n label[\"number of peaks\"] = len(border_contains)\n label[\"borders\"] = border_contains\n label[\"intensity\"] = data_slice\n label[\"rt\"] = [rt_start, rt_end]\n label[\"mz\"] = data_slice\n with open(json_file, mode=\"w\", encoding=\"utf-8\") as jf:\n json.dump(label, jf)\n \n # plot\n if plot:\n figure.clear()\n ax = figure.add_subplot(111)\n ax.plot(data_slice)\n for i, border in enumerate(label['borders']):\n begin, end = border\n ax.fill_between(range(begin, end + 1), y1=data_slice[begin:end + 1], y2=min(data_slice), alpha=0.5,\n label=f\"peak NO: {i}, borders={begin}-{end}\")\n ax.legend(loc='best')\n fig_save_path = signal_save_path + \"/fig/\"\n if not os.path.exists(fig_save_path):\n os.makedirs(fig_save_path)\n figure.savefig(fig_save_path + \"peak_sample_\" + str(counter).zfill(4) + \".jpg\")\n plt.clf()\n \n counter += 1\n\n print(\"信号生成完成\")\n\n # ---------生成噪声----------- #\n if gen_noise:\n\n # 随机生成10000个噪声起点\n # noise_locs = np.random.randint(100, len(signal_data)*0.5, (10000,)).tolist()\n noise_locs = np.random.randint(100, border_data[-1][-1], (10000,)).tolist() # 噪声初始位置范围改为标注过的范围之内\n num_of_noise = noise_num_ratio * len(border_data) # 噪声数量\n # 根据噪声位置和border位置,筛选合适的噪声起点\n filtered_noise_locs = []\n for i, noise_loc in enumerate(noise_locs):\n count = 0\n for border in border_data:\n begin, end = border[0], border[1]\n if noise_loc <= begin - 2 * split_len or noise_loc >= end + 2 * split_len:\n count += 1\n if count >= len(border_data):\n filtered_noise_locs.append(noise_loc)\n print(\"filtered noise has {}\".format(len(filtered_noise_locs)))\n assert len(filtered_noise_locs) >= num_of_noise, \"filtered noise num less than {0}\".format(num_of_noise)\n final_noise_locs = filtered_noise_locs[:num_of_noise]\n # 截取噪声数据\n for i, loc in tqdm(enumerate(final_noise_locs)):\n noise_slice = signal_data[loc:loc + split_len]\n\n # 改写json内容\n json_file_name = \"nois_sample_\" + str(i).zfill(4)\n json_file = os.path.join(noise_save_path, json_file_name) + '.json'\n \n label[\"borders\"] = []\n label[\"label\"] = 0\n label[\"number of peaks\"] = 0\n label[\"peaks' labels\"] = []\n label[\"code\"] = json_file_name\n label[\"intensity\"] = noise_slice\n label[\"rt\"] = [loc, loc + split_len]\n label[\"mz\"] = noise_slice\n with open(json_file, mode=\"w\", encoding=\"utf-8\") as jf:\n json.dump(label, jf)\n \n # plot\n if plot:\n figure.clear()\n ax = figure.add_subplot(111)\n random_signal = rt_total[random.randint(0, len(rt_total)-1)][0]\n ax.plot(signal_data[random_signal:random_signal+split_len])\n ax.plot(noise_slice)\n ax.set_title(label[\"code\"])\n fig_save_path = noise_save_path + \"/fig/\"\n if not os.path.exists(fig_save_path):\n os.makedirs(fig_save_path)\n figure.savefig(fig_save_path + \"nois_sample_\" + str(i).zfill(4) + \".jpg\")\n plt.clf()\n\n print(\"噪声生成完成\")\n \n\nif __name__ == \"__main__\":\n root_folders = [\n \"../data/data_collection_20220301/txt_data/01\",\n \"../data/data_collection_20220301/txt_data/02\",\n # \"../data/data_collection_20220115/txt_data/03\",\n ]\n\n for root_folder in root_folders:\n\n peak_save_path = root_folder + \"/peak_data/\"\n noise_save_path = root_folder + \"/noise_data/\"\n \n try:\n shutil.rmtree(peak_save_path)\n shutil.rmtree(noise_save_path)\n except:\n pass\n \n if not os.path.exists(peak_save_path):\n os.makedirs(peak_save_path)\n os.makedirs(noise_save_path)\n \n parse(\n root_folder, \n peak_save_path, \n noise_save_path,\n plot=True,\n gen_signal=True,\n gen_noise=True,\n )" ]
[ [ "matplotlib.pyplot.clf", "numpy.random.randint", "matplotlib.pyplot.figure" ] ]
tacaswell/pyFAI
[ "fd63c7d9ba35e687ef5c4ec717c01bf46564572a", "fd63c7d9ba35e687ef5c4ec717c01bf46564572a", "fd63c7d9ba35e687ef5c4ec717c01bf46564572a" ]
[ "pyFAI/io/ponifile.py", "pyFAI/test/test_utils_header.py", "pyFAI/utils/mathutil.py" ]
[ "# coding: utf-8\n#\n# Project: Azimuthal integration\n# https://github.com/silx-kit/pyFAI\n#\n# Copyright (C) 2015-2021 European Synchrotron Radiation Facility, Grenoble, France\n#\n# Principal author: Jérôme Kieffer ([email protected])\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n\n\"\"\"Module function to manage poni files.\n\"\"\"\n\n__author__ = \"Jerome Kieffer\"\n__license__ = \"MIT\"\n__copyright__ = \"European Synchrotron Radiation Facility, Grenoble, France\"\n__date__ = \"06/04/2021\"\n__docformat__ = 'restructuredtext'\n\nimport collections\nimport time\nimport json\nimport logging\n_logger = logging.getLogger(__name__)\nimport numpy\nfrom .. import detectors\n\n\nclass PoniFile(object):\n\n def __init__(self, data=None):\n self._detector = None\n self._dist = None\n self._poni1 = None\n self._poni2 = None\n self._rot1 = None\n self._rot2 = None\n self._rot3 = None\n self._wavelength = None\n if data is None:\n pass\n elif isinstance(data, dict):\n self.read_from_dict(data)\n elif isinstance(data, (str,)):\n self.read_from_file(data)\n else:\n self.read_from_duck(data)\n\n def __repr__(self):\n return json.dumps(self.as_dict(), indent=4)\n\n def read_from_file(self, filename):\n data = collections.OrderedDict()\n with open(filename) as opened_file:\n for line in opened_file:\n if line.startswith(\"#\") or (\":\" not in line):\n continue\n words = line.split(\":\", 1)\n\n key = words[0].strip().lower()\n try:\n value = words[1].strip()\n except Exception as error: # IGNORE:W0703:\n _logger.error(\"Error %s with line: %s\", error, line)\n data[key] = value\n self.read_from_dict(data)\n\n def read_from_dict(self, config):\n \"\"\"Initialize this object using a dictionary.\n\n .. note:: The dictionary is versionned.\n \"\"\"\n version = int(config.get(\"poni_version\", 1))\n\n if version == 1:\n # Handle former version of PONI-file\n if \"detector\" in config:\n self._detector = detectors.detector_factory(config[\"detector\"])\n else:\n self._detector = detectors.Detector()\n\n if \"pixelsize1\" in config or \"pixelsize2\" in config:\n if isinstance(self._detector, detectors.NexusDetector):\n # NexusDetector is already set\n pass\n elif self._detector.force_pixel and (\"pixelsize1\" in config) and (\"pixelsize2\" in config):\n pixel1 = float(config[\"pixelsize1\"])\n pixel2 = float(config[\"pixelsize2\"])\n self._detector = self._detector.__class__(pixel1=pixel1, pixel2=pixel2)\n else:\n self._detector = detectors.Detector()\n if \"pixelsize1\" in config:\n self._detector.pixel1 = float(config[\"pixelsize1\"])\n if \"pixelsize2\" in config:\n self._detector.pixel2 = float(config[\"pixelsize2\"])\n\n if \"splinefile\" in config:\n if config[\"splinefile\"].lower() != \"none\":\n self._detector.set_splineFile(config[\"splinefile\"])\n\n elif version == 2:\n detector_name = config[\"detector\"]\n detector_config = config[\"detector_config\"]\n self._detector = detectors.detector_factory(detector_name, detector_config)\n else:\n raise RuntimeError(\"PONI file verison %s too recent. Upgrade pyFAI.\", version)\n\n if \"distance\" in config:\n self._dist = float(config[\"distance\"])\n elif \"dist\" in config:\n self._dist = float(config[\"dist\"])\n if \"poni1\" in config:\n self._poni1 = float(config[\"poni1\"])\n if \"poni2\" in config:\n self._poni2 = float(config[\"poni2\"])\n if \"rot1\" in config:\n self._rot1 = float(config[\"rot1\"])\n if \"rot2\" in config:\n self._rot2 = float(config[\"rot2\"])\n if \"rot3\" in config:\n self._rot3 = float(config[\"rot3\"])\n if \"wavelength\" in config:\n self._wavelength = float(config[\"wavelength\"])\n\n def read_from_duck(self, duck):\n \"\"\"Initialize the object using an object providing the same API.\n\n The duck object must provide dist, poni1, poni2, rot1, rot2, rot3,\n wavelength, and detector.\n \"\"\"\n assert numpy.isreal(duck.dist)\n self._dist = duck.dist\n assert numpy.isreal(duck.poni1)\n self._poni1 = duck.poni1\n assert numpy.isreal(duck.poni2)\n self._poni2 = duck.poni2\n assert numpy.isreal(duck.rot1)\n self._rot1 = duck.rot1\n assert numpy.isreal(duck.rot2)\n self._rot2 = duck.rot2\n assert numpy.isreal(duck.rot3)\n self._rot3 = duck.rot3\n assert numpy.isreal(duck.wavelength)\n self._wavelength = duck.wavelength\n self._detector = duck.detector\n\n def write(self, fd):\n \"\"\"Write this object to an open stream.\n \"\"\"\n fd.write((\"# Nota: C-Order, 1 refers to the Y axis,\"\n \" 2 to the X axis \\n\"))\n fd.write(\"# Calibration done at %s\\n\" % time.ctime())\n fd.write(\"poni_version: 2\\n\")\n detector = self.detector\n fd.write(\"Detector: %s\\n\" % detector.__class__.__name__)\n fd.write(\"Detector_config: %s\\n\" % json.dumps(detector.get_config()))\n\n fd.write(\"Distance: %s\\n\" % self._dist)\n fd.write(\"Poni1: %s\\n\" % self._poni1)\n fd.write(\"Poni2: %s\\n\" % self._poni2)\n fd.write(\"Rot1: %s\\n\" % self._rot1)\n fd.write(\"Rot2: %s\\n\" % self._rot2)\n fd.write(\"Rot3: %s\\n\" % self._rot3)\n if self._wavelength is not None:\n fd.write(\"Wavelength: %s\\n\" % self._wavelength)\n\n def as_dict(self):\n config = collections.OrderedDict([(\"poni_version\", 2)])\n config[\"detector\"] = self.detector.__class__.__name__\n config[\"detector_config\"] = self.detector.get_config()\n config[\"dist\"] = self._dist\n config[\"poni1\"] = self._poni1\n config[\"poni2\"] = self._poni2\n config[\"rot1\"] = self._rot1\n config[\"rot2\"] = self._rot2\n config[\"rot3\"] = self._rot3\n if self._wavelength:\n config[\"wavelength\"] = self._wavelength\n return config\n\n @property\n def detector(self):\n \"\"\":rtype: Union[None,float]\"\"\"\n return self._detector\n\n @property\n def dist(self):\n \"\"\":rtype: Union[None,float]\"\"\"\n return self._dist\n\n @property\n def poni1(self):\n \"\"\":rtype: Union[None,float]\"\"\"\n return self._poni1\n\n @property\n def poni2(self):\n \"\"\":rtype: Union[None,float]\"\"\"\n return self._poni2\n\n @property\n def rot1(self):\n \"\"\":rtype: Union[None,float]\"\"\"\n return self._rot1\n\n @property\n def rot2(self):\n \"\"\":rtype: Union[None,float]\"\"\"\n return self._rot2\n\n @property\n def rot3(self):\n \"\"\":rtype: Union[None,float]\"\"\"\n return self._rot3\n\n @property\n def wavelength(self):\n \"\"\":rtype: Union[None,float]\"\"\"\n return self._wavelength\n", "#!/usr/bin/env python\n# coding: utf-8\n#\n# Project: Azimuthal integration\n# https://github.com/silx-kit/pyFAI\n#\n# Copyright (C) 2015-2018 European Synchrotron Radiation Facility, Grenoble, France\n#\n# Principal author: Jérôme Kieffer ([email protected])\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n\n\"\"\"Test suite for math utilities library\"\"\"\n\n__author__ = \"Jérôme Kieffer\"\n__contact__ = \"[email protected]\"\n__license__ = \"MIT\"\n__copyright__ = \"European Synchrotron Radiation Facility, Grenoble, France\"\n__date__ = \"16/10/2020\"\n\nimport unittest\nimport numpy\nimport logging\nimport shutil\nimport os\nimport fabio\nimport h5py\n\nlogger = logging.getLogger(__name__)\n\nfrom .utilstest import UtilsTest\nfrom ..utils import header_utils\n\n\nclass TestEdfMonitor(unittest.TestCase):\n\n def setUp(self):\n header = {\n \"mon1\": \"100\",\n \"bad\": \"foo\",\n \"counter_pos\": \"12 13 14 foo\",\n \"counter_mne\": \"mon2 mon3 mon4 mon5\",\n \"bad_size_pos\": \"foo foo foo\",\n \"bad_size_mne\": \"mon2 mon3 mon4 mon5\",\n \"mne_not_exists_pos\": \"12 13 14 foo\",\n \"pos_not_exists_mne\": \"mon2 mon3 mon4 mon5\",\n }\n self.image = fabio.numpyimage.numpyimage(numpy.array([]), header)\n\n def test_monitor(self):\n result = header_utils._get_monitor_value_from_edf(self.image, \"mon1\")\n self.assertEqual(100, result)\n\n def test_monitor_in_counter(self):\n result = header_utils._get_monitor_value_from_edf(self.image, \"counter/mon3\")\n self.assertEqual(13, result)\n\n def test_bad_monitor(self):\n self.assertRaises(header_utils.MonitorNotFound, header_utils._get_monitor_value_from_edf, self.image, \"bad\")\n\n def test_bad_monitor_in_counter(self):\n self.assertRaises(header_utils.MonitorNotFound, header_utils._get_monitor_value_from_edf, self.image, \"counter/mon5\")\n\n def test_bad_counter_syntax(self):\n self.assertRaises(header_utils.MonitorNotFound, header_utils._get_monitor_value_from_edf, self.image, \"counter/mon5/1\")\n\n def test_missing_monitor(self):\n self.assertRaises(header_utils.MonitorNotFound, header_utils._get_monitor_value_from_edf, self.image, \"not_exists\")\n\n def test_missing_counter(self):\n self.assertRaises(header_utils.MonitorNotFound, header_utils._get_monitor_value_from_edf, self.image, \"not_exists/mon\")\n\n def test_missing_counter_monitor(self):\n self.assertRaises(header_utils.MonitorNotFound, header_utils._get_monitor_value_from_edf, self.image, \"counter/not_exists\")\n\n def test_missing_counter_mne(self):\n self.assertRaises(header_utils.MonitorNotFound, header_utils._get_monitor_value_from_edf, self.image, \"mne_not_exists/mon\")\n\n def test_missing_counter_pos(self):\n self.assertRaises(header_utils.MonitorNotFound, header_utils._get_monitor_value_from_edf, self.image, \"pos_not_exists/mon\")\n\n def test_missing_counter_pos_element(self):\n self.assertRaises(header_utils.MonitorNotFound, header_utils._get_monitor_value_from_edf, self.image, \"bad_size/mon\")\n\n def test_edf_file_motor(self):\n image = fabio.open(UtilsTest.getimage(\"Pilatus1M.edf\"))\n result = header_utils._get_monitor_value_from_edf(image, \"motor/lx\")\n self.assertEqual(result, -0.2)\n\n def test_edf_file_key(self):\n image = fabio.open(UtilsTest.getimage(\"Pilatus1M.edf\"))\n result = header_utils._get_monitor_value_from_edf(image, \"scan_no\")\n self.assertEqual(result, 19)\n\n\nclass TestHdf5Monitor(unittest.TestCase):\n\n @classmethod\n def setUpClass(cls):\n super(TestHdf5Monitor, cls).setUpClass()\n cls.tempDir = os.path.join(UtilsTest.tempdir, cls.__name__)\n os.makedirs(cls.tempDir)\n cls.file = os.path.join(cls.tempDir, \"file.h5\")\n\n h5 = h5py.File(cls.file, mode=\"w\")\n data = numpy.array([1.0, 1.2, 1.3, 1.4]) + numpy.array([0, 0, 0]).reshape(-1, 1)\n h5[\"images\"] = data.reshape(-1, 2, 2)\n h5[\"header/bar/vector\"] = numpy.array([1.0, 1.2, 1.3])\n h5[\"header/bar/const\"] = 1.5\n h5[\"header/bar/bad_type\"] = numpy.array([1.0, 1.2j, 1.3]).reshape(1, 3, 1)\n h5[\"header/bar/bad_shape\"] = numpy.array([1.0, 1.2, 1.3]).reshape(1, 3, 1)\n h5[\"header/bar/bad_size\"] = numpy.array([1.0, 1.2])\n h5.close()\n\n @classmethod\n def tearDownClass(cls):\n super(TestHdf5Monitor, cls).tearDownClass()\n shutil.rmtree(cls.tempDir)\n cls.tempDir = None\n\n def test_vector_monitor(self):\n pass\n\n def test_const_monitor(self):\n monitor_key = \"/header/bar/const\"\n with fabio.open(self.file + \"::/images\") as image:\n for iframe in range(image.nframes):\n frame = image.getframe(iframe)\n result = header_utils.get_monitor_value(frame, monitor_key)\n self.assertEqual(1.5, result)\n\n def test_missing_monitor(self):\n monitor_key = \"/header/bar/vector\"\n expected_values = [1.0, 1.2, 1.3]\n with fabio.open(self.file + \"::/images\") as image:\n for iframe in range(image.nframes):\n frame = image.getframe(iframe)\n result = header_utils.get_monitor_value(frame, monitor_key)\n self.assertAlmostEqual(result, expected_values[iframe])\n\n def test_bad_type_monitor(self):\n monitor_key = \"/header/bar/bad_type\"\n with fabio.open(self.file + \"::/images\") as image:\n frame = image.getframe(0)\n with self.assertRaises(header_utils.MonitorNotFound):\n header_utils.get_monitor_value(frame, monitor_key)\n\n def test_bad_shape_monitor(self):\n monitor_key = \"/header/bar/bad_shape\"\n with fabio.open(self.file + \"::/images\") as image:\n frame = image.getframe(0)\n with self.assertRaises(header_utils.MonitorNotFound):\n header_utils.get_monitor_value(frame, monitor_key)\n\n def test_bad_size_monitor(self):\n monitor_key = \"/header/bar/bad_size\"\n expected_values = [1.0, 1.2, header_utils.MonitorNotFound]\n with fabio.open(self.file + \"::/images\") as image:\n for iframe in range(image.nframes):\n frame = image.getframe(iframe)\n expected_value = expected_values[iframe]\n if isinstance(expected_value, type(Exception)) and issubclass(expected_value, Exception):\n with self.assertRaises(expected_value):\n header_utils.get_monitor_value(frame, monitor_key)\n else:\n result = header_utils.get_monitor_value(frame, monitor_key)\n self.assertAlmostEqual(result, expected_value)\n\n\ndef suite():\n loader = unittest.defaultTestLoader.loadTestsFromTestCase\n testsuite = unittest.TestSuite()\n testsuite.addTest(loader(TestEdfMonitor))\n return testsuite\n\n\nif __name__ == '__main__':\n runner = unittest.TextTestRunner()\n runner.run(suite())\n UtilsTest.clean_up()\n", "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Project: Fast Azimuthal integration\n# https://github.com/silx-kit/pyFAI\n#\n# Copyright (C) 2017-2018 European Synchrotron Radiation Facility, Grenoble, France\n#\n# Principal author: Jérôme Kieffer ([email protected])\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n# .\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n# .\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n\n\"\"\"\nUtilities, mainly for image treatment\n\"\"\"\n\n__author__ = \"Jérôme Kieffer\"\n__contact__ = \"[email protected]\"\n__license__ = \"MIT\"\n__copyright__ = \"European Synchrotron Radiation Facility, Grenoble, France\"\n__date__ = \"25/06/2021\"\n__status__ = \"production\"\n\nimport logging\nlogger = logging.getLogger(__name__)\n\nimport math\nimport numpy\nimport time\nimport scipy\nfrom .decorators import deprecated\n\ntry:\n from ..ext import relabel as _relabel\nexcept ImportError:\n logger.debug(\"Backtrace\", exc_info=True)\n _relabel = None\n\nEPS32 = (1.0 + numpy.finfo(numpy.float32).eps)\n\n\ndef deg2rad(dd, disc=1):\n \"\"\"\n Convert degrees to radian in the range [-π->π[ or [0->2π[ \n\n :param dd: angle in degrees\n :return: angle in radians in the selected range \n \"\"\"\n # range [0:2pi[\n rp = (dd / 180.0) % 2.0\n if disc: # range [-pi:pi[\n if rp >= 1.0:\n rp -= 2.0\n return rp * math.pi\n\n\ndef expand2d(vect, size2, vertical=True):\n \"\"\"\n This expands a vector to a 2d-array.\n\n The result is the same as:\n\n .. code-block:: python\n\n if vertical:\n numpy.outer(numpy.ones(size2), vect)\n else:\n numpy.outer(vect, numpy.ones(size2))\n\n This is a ninja optimization: replace \\\\*1 with a memcopy, saves 50% of\n time at the ms level.\n\n :param vect: 1d vector\n :param size2: size of the expanded dimension\n :param vertical: if False the vector is expanded to the first dimension.\n If True, it is expanded to the second dimension.\n \"\"\"\n size1 = vect.size\n size2 = int(size2)\n if vertical:\n out = numpy.empty((size2, size1), vect.dtype)\n q = vect.reshape(1, -1)\n q.strides = 0, vect.strides[0]\n else:\n out = numpy.empty((size1, size2), vect.dtype)\n q = vect.reshape(-1, 1)\n q.strides = vect.strides[0], 0\n out[:,:] = q\n return out\n\n\ndef gaussian(M, std):\n \"\"\"\n Return a Gaussian window of length M with standard-deviation std.\n\n This differs from the scipy.signal.gaussian implementation as:\n - The default for sym=False (needed for gaussian filtering without shift)\n - This implementation is normalized\n\n :param M: length of the windows (int)\n :param std: standatd deviation sigma\n\n The FWHM is 2*numpy.sqrt(2 * numpy.pi)*std\n\n \"\"\"\n x = numpy.arange(M) - M / 2.0\n return numpy.exp(-(x / std) ** 2 / 2.0) / std / numpy.sqrt(2 * numpy.pi)\n\n\ndef gaussian_filter(input_img, sigma, mode=\"reflect\", cval=0.0, use_scipy=True):\n \"\"\"\n 2-dimensional Gaussian filter implemented with FFT\n\n :param input_img: input array to filter\n :type input_img: array-like\n :param sigma: standard deviation for Gaussian kernel.\n The standard deviations of the Gaussian filter are given for each axis as a sequence,\n or as a single number, in which case it is equal for all axes.\n :type sigma: scalar or sequence of scalars\n :param mode: {'reflect','constant','nearest','mirror', 'wrap'}, optional\n The ``mode`` parameter determines how the array borders are\n handled, where ``cval`` is the value when mode is equal to\n 'constant'. Default is 'reflect'\n :param cval: scalar, optional\n Value to fill past edges of input if ``mode`` is 'constant'. Default is 0.0\n \"\"\"\n if use_scipy:\n res = scipy.ndimage.filters.gaussian_filter(input_img, sigma, mode=(mode or \"reflect\"))\n else:\n if isinstance(sigma, (list, tuple)):\n sigma = (float(sigma[0]), float(sigma[1]))\n else:\n sigma = (float(sigma), float(sigma))\n k0 = int(math.ceil(4.0 * float(sigma[0])))\n k1 = int(math.ceil(4.0 * float(sigma[1])))\n\n if mode != \"wrap\":\n input_img = expand(input_img, (k0, k1), mode, cval)\n s0, s1 = input_img.shape\n g0 = gaussian(s0, sigma[0])\n g1 = gaussian(s1, sigma[1])\n g0 = numpy.concatenate((g0[s0 // 2:], g0[:s0 // 2])) # faster than fftshift\n g1 = numpy.concatenate((g1[s1 // 2:], g1[:s1 // 2])) # faster than fftshift\n g2 = numpy.outer(g0, g1)\n fftIn = numpy.fft.ifft2(numpy.fft.fft2(input_img) * numpy.fft.fft2(g2).conjugate())\n res = fftIn.real.astype(numpy.float32)\n if mode != \"wrap\":\n res = res[k0:-k0, k1:-k1]\n return res\n\n\ndef shift(input_img, shift_val):\n \"\"\"\n Shift an array like scipy.ndimage.interpolation.shift(input_img, shift_val, mode=\"wrap\", order=0) but faster\n :param input_img: 2d numpy array\n :param shift_val: 2-tuple of integers\n :return: shifted image\n \"\"\"\n re = numpy.zeros_like(input_img)\n s0, s1 = input_img.shape\n d0 = shift_val[0] % s0\n d1 = shift_val[1] % s1\n r0 = (-d0) % s0\n r1 = (-d1) % s1\n re[d0:, d1:] = input_img[:r0,:r1]\n re[:d0, d1:] = input_img[r0:,:r1]\n re[d0:,:d1] = input_img[:r0, r1:]\n re[:d0,:d1] = input_img[r0:, r1:]\n return re\n\n\ndef dog(s1, s2, shape=None):\n \"\"\"\n 2D difference of gaussian\n typically 1 to 10 parameters\n \"\"\"\n if shape is None:\n maxi = max(s1, s2) * 5\n u, v = numpy.ogrid[-maxi:maxi + 1, -maxi:maxi + 1]\n else:\n u, v = numpy.ogrid[-shape[0] // 2:shape[0] - shape[0] // 2, -shape[1] // 2:shape[1] - shape[1] // 2]\n r2 = u * u + v * v\n centered = numpy.exp(-r2 / (2. * s1) ** 2) / 2. / numpy.pi / s1 - numpy.exp(-r2 / (2. * s2) ** 2) / 2. / numpy.pi / s2\n return centered\n\n\ndef dog_filter(input_img, sigma1, sigma2, mode=\"reflect\", cval=0.0):\n \"\"\"\n 2-dimensional Difference of Gaussian filter implemented with FFT\n\n :param input_img: input_img array to filter\n :type input_img: array-like\n :param sigma: standard deviation for Gaussian kernel.\n The standard deviations of the Gaussian filter are given for each axis as a sequence,\n or as a single number, in which case it is equal for all axes.\n :type sigma: scalar or sequence of scalars\n :param mode: {'reflect','constant','nearest','mirror', 'wrap'}, optional\n The ``mode`` parameter determines how the array borders are\n handled, where ``cval`` is the value when mode is equal to\n 'constant'. Default is 'reflect'\n :param cval: scalar, optional\n Value to fill past edges of input if ``mode`` is 'constant'. Default is 0.0\n \"\"\"\n\n if 1: # try:\n sigma = max(sigma1, sigma2)\n if mode != \"wrap\":\n input_img = expand(input_img, sigma, mode, cval)\n s0, s1 = input_img.shape\n if isinstance(sigma, (list, tuple)):\n k0 = int(math.ceil(4.0 * float(sigma[0])))\n k1 = int(math.ceil(4.0 * float(sigma[1])))\n else:\n k0 = k1 = int(math.ceil(4.0 * float(sigma)))\n\n res = numpy.fft.ifft2(numpy.fft.fft2(input_img.astype(complex)) *\n numpy.fft.fft2(shift(dog(sigma1, sigma2, (s0, s1)),\n (s0 // 2, s1 // 2)).astype(complex)).conjugate())\n if mode == \"wrap\":\n return res\n else:\n return res[k0:-k0, k1:-k1]\n\n\ndef expand(input_img, sigma, mode=\"constant\", cval=0.0):\n \"\"\"Expand array a with its reflection on boundaries\n\n :param a: 2D array\n :param sigma: float or 2-tuple of floats.\n :param mode: \"constant\", \"nearest\", \"reflect\" or \"mirror\"\n :param cval: filling value used for constant, 0.0 by default\n\n Nota: sigma is the half-width of the kernel. For gaussian convolution it is adviced that it is 4*sigma_of_gaussian\n \"\"\"\n s0, s1 = input_img.shape\n dtype = input_img.dtype\n if isinstance(sigma, (list, tuple)):\n k0 = int(math.ceil(float(sigma[0])))\n k1 = int(math.ceil(float(sigma[1])))\n else:\n k0 = k1 = int(math.ceil(float(sigma)))\n if k0 > s0 or k1 > s1:\n raise RuntimeError(\"Makes little sense to apply a kernel (%i,%i)larger than the image (%i,%i)\" % (k0, k1, s0, s1))\n output = numpy.zeros((s0 + 2 * k0, s1 + 2 * k1), dtype=dtype) + float(cval)\n output[k0:k0 + s0, k1:k1 + s1] = input_img\n if (mode == \"mirror\"):\n # 4 corners\n output[s0 + k0:, s1 + k1:] = input_img[-2:-k0 - 2:-1, -2:-k1 - 2:-1]\n output[:k0,:k1] = input_img[k0 - 0:0:-1, k1 - 0:0:-1]\n output[:k0, s1 + k1:] = input_img[k0 - 0:0:-1, s1 - 2: s1 - k1 - 2:-1]\n output[s0 + k0:,:k1] = input_img[s0 - 2: s0 - k0 - 2:-1, k1 - 0:0:-1]\n # 4 sides\n output[k0:k0 + s0,:k1] = input_img[:s0, k1 - 0:0:-1]\n output[:k0, k1:k1 + s1] = input_img[k0 - 0:0:-1,:s1]\n output[-k0:, k1:s1 + k1] = input_img[-2:s0 - k0 - 2:-1,:]\n output[k0:s0 + k0, -k1:] = input_img[:, -2:s1 - k1 - 2:-1]\n elif mode == \"reflect\":\n # 4 corners\n output[s0 + k0:, s1 + k1:] = input_img[-1:-k0 - 1:-1, -1:-k1 - 1:-1]\n output[:k0,:k1] = input_img[k0 - 1::-1, k1 - 1::-1]\n output[:k0, s1 + k1:] = input_img[k0 - 1::-1, s1 - 1: s1 - k1 - 1:-1]\n output[s0 + k0:,:k1] = input_img[s0 - 1: s0 - k0 - 1:-1, k1 - 1::-1]\n # 4 sides\n output[k0:k0 + s0,:k1] = input_img[:s0, k1 - 1::-1]\n output[:k0, k1:k1 + s1] = input_img[k0 - 1::-1,:s1]\n output[-k0:, k1:s1 + k1] = input_img[:s0 - k0 - 1:-1,:]\n output[k0:s0 + k0, -k1:] = input_img[:,:s1 - k1 - 1:-1]\n elif mode == \"nearest\":\n # 4 corners\n output[s0 + k0:, s1 + k1:] = input_img[-1, -1]\n output[:k0,:k1] = input_img[0, 0]\n output[:k0, s1 + k1:] = input_img[0, -1]\n output[s0 + k0:,:k1] = input_img[-1, 0]\n # 4 sides\n output[k0:k0 + s0,:k1] = expand2d(input_img[:, 0], k1, False)\n output[:k0, k1:k1 + s1] = expand2d(input_img[0,:], k0)\n output[-k0:, k1:s1 + k1] = expand2d(input_img[-1,:], k0)\n output[k0:s0 + k0, -k1:] = expand2d(input_img[:, -1], k1, False)\n elif mode == \"wrap\":\n # 4 corners\n output[s0 + k0:, s1 + k1:] = input_img[:k0,:k1]\n output[:k0,:k1] = input_img[-k0:, -k1:]\n output[:k0, s1 + k1:] = input_img[-k0:,:k1]\n output[s0 + k0:,:k1] = input_img[:k0, -k1:]\n # 4 sides\n output[k0:k0 + s0,:k1] = input_img[:, -k1:]\n output[:k0, k1:k1 + s1] = input_img[-k0:,:]\n output[-k0:, k1:s1 + k1] = input_img[:k0,:]\n output[k0:s0 + k0, -k1:] = input_img[:,:k1]\n elif mode == \"constant\":\n # Nothing to do\n pass\n\n else:\n raise RuntimeError(\"Unknown expand mode: %s\" % mode)\n return output\n\n\ndef relabel(label, data, blured, max_size=None):\n \"\"\"\n Relabel limits the number of region in the label array.\n They are ranked relatively to their max(I0)-max(blur(I0))\n\n :param label: a label array coming out of ``scipy.ndimage.measurement.label``\n :param data: an array containing the raw data\n :param blured: an array containing the blurred data\n :param max_size: the max number of label wanted\n :return: array like label\n \"\"\"\n if _relabel:\n max_label = label.max()\n _a, _b, _c, d = _relabel.countThem(label, data, blured)\n count = d\n sortCount = count.argsort()\n invSortCount = sortCount[-1::-1]\n invCutInvSortCount = numpy.zeros(max_label + 1, dtype=int)\n for i, j in enumerate(list(invSortCount[:max_size])):\n invCutInvSortCount[j] = i\n return invCutInvSortCount[label]\n else:\n logger.warning(\"relabel Cython module is not available...\")\n return label\n\n\ndef binning(input_img, binsize, norm=True):\n \"\"\"\n :param input_img: input ndarray\n :param binsize: int or 2-tuple representing the size of the binning\n :param norm: if False, do average instead of sum\n :return: binned input ndarray\n \"\"\"\n inputSize = input_img.shape\n outputSize = []\n assert(len(inputSize) == 2)\n if isinstance(binsize, int):\n binsize = (binsize, binsize)\n for i, j in zip(inputSize, binsize):\n assert(i % j == 0)\n outputSize.append(i // j)\n\n if numpy.array(binsize).prod() < 50:\n out = numpy.zeros(tuple(outputSize))\n for i in range(binsize[0]):\n for j in range(binsize[1]):\n out += input_img[i::binsize[0], j::binsize[1]]\n else:\n temp = input_img.copy()\n temp.shape = (outputSize[0], binsize[0], outputSize[1], binsize[1])\n out = temp.sum(axis=3).sum(axis=1)\n if not norm:\n out /= binsize[0] * binsize[1]\n return out\n\n\ndef unbinning(binnedArray, binsize, norm=True):\n \"\"\"\n :param binnedArray: input ndarray\n :param binsize: 2-tuple representing the size of the binning\n :param norm: if True (default) decrease the intensity by binning factor. If False, it is non-conservative\n :return: unBinned input ndarray\n \"\"\"\n if isinstance(binsize, int):\n binsize = (binsize, binsize)\n outputShape = []\n for i, j in zip(binnedArray.shape, binsize):\n outputShape.append(i * j)\n out = numpy.zeros(tuple(outputShape), dtype=binnedArray.dtype)\n for i in range(binsize[0]):\n for j in range(binsize[1]):\n out[i::binsize[0], j::binsize[1]] += binnedArray\n if norm:\n out /= binsize[0] * binsize[1]\n return out\n\n\n@deprecated(replacement=\"unbinning\", since_version=\"0.15\", only_once=True)\ndef unBinning(*args, **kwargs):\n return unbinning(*args, **kwargs)\n\n\ndef shift_fft(input_img, shift_val, method=\"fft\"):\n \"\"\"Do shift using FFTs\n\n Shift an array like scipy.ndimage.interpolation.shift(input, shift, mode=\"wrap\", order=\"infinity\") but faster\n :param input_img: 2d numpy array\n :param shift_val: 2-tuple of float\n :return: shifted image\n \"\"\"\n if method == \"fft\":\n d0, d1 = input_img.shape\n v0, v1 = shift_val\n f0 = numpy.fft.ifftshift(numpy.arange(-d0 // 2, d0 // 2))\n f1 = numpy.fft.ifftshift(numpy.arange(-d1 // 2, d1 // 2))\n m1, m0 = numpy.meshgrid(f1, f0)\n e0 = numpy.exp(-2j * numpy.pi * v0 * m0 / float(d0))\n e1 = numpy.exp(-2j * numpy.pi * v1 * m1 / float(d1))\n e = e0 * e1\n out = abs(numpy.fft.ifft2(numpy.fft.fft2(input_img) * e))\n else:\n out = scipy.ndimage.interpolation.shift(input, shift, mode=\"wrap\", order=\"infinity\")\n return out\n\n\n@deprecated(replacement=\"shift_fft\", since_version=\"0.15\", only_once=True)\ndef shiftFFT(*args, **kwargs):\n return shift_fft(*args, **kwargs)\n\n\ndef maximum_position(img):\n \"\"\"\n Same as scipy.ndimage.measurements.maximum_position:\n Find the position of the maximum of the values of the array.\n\n :param img: 2-D image\n :return: 2-tuple of int with the position of the maximum\n \"\"\"\n maxarg = numpy.argmax(img)\n _, s1 = img.shape\n return (maxarg // s1, maxarg % s1)\n\n\ndef center_of_mass(img):\n \"\"\"\n Calculate the center of mass of of the array.\n Like scipy.ndimage.measurements.center_of_mass\n :param img: 2-D array\n :return: 2-tuple of float with the center of mass\n \"\"\"\n d0, d1 = img.shape\n a0, a1 = numpy.ogrid[:d0,:d1]\n img = img.astype(\"float64\")\n img /= img.sum()\n return ((a0 * img).sum(), (a1 * img).sum())\n\n\ndef measure_offset(img1, img2, method=\"numpy\", withLog=False, withCorr=False):\n \"\"\"\n Measure the actual offset between 2 images\n :param img1: ndarray, first image\n :param img2: ndarray, second image, same shape as img1\n :param withLog: shall we return logs as well ? boolean\n :return: tuple of floats with the offsets\n \"\"\"\n method = str(method)\n ################################################################################\n # Start convolutions\n ################################################################################\n shape = img1.shape\n logs = []\n assert img2.shape == shape\n t0 = time.perf_counter()\n i1f = numpy.fft.fft2(img1)\n i2f = numpy.fft.fft2(img2)\n res = numpy.fft.ifft2(i1f * i2f.conjugate()).real\n t1 = time.perf_counter()\n\n ################################################################################\n # END of convolutions\n ################################################################################\n offset1 = maximum_position(res)\n res = shift(res, (shape[0] // 2, shape[1] // 2))\n mean = res.mean(dtype=\"float64\")\n maxi = res.max()\n std = res.std(dtype=\"float64\")\n SN = (maxi - mean) / std\n new = numpy.maximum(numpy.zeros(shape), res - numpy.ones(shape) * (mean + std * SN * 0.9))\n com2 = center_of_mass(new)\n logs.append(\"MeasureOffset: fine result of the centered image: %s %s \" % com2)\n offset2 = ((com2[0] - shape[0] // 2) % shape[0], (com2[1] - shape[1] // 2) % shape[1])\n delta0 = (offset2[0] - offset1[0]) % shape[0]\n delta1 = (offset2[1] - offset1[1]) % shape[1]\n if delta0 > shape[0] // 2:\n delta0 -= shape[0]\n if delta1 > shape[1] // 2:\n delta1 -= shape[1]\n if (abs(delta0) > 2) or (abs(delta1) > 2):\n logs.append(\"MeasureOffset: Raw offset is %s and refined is %s. Please investigate !\" % (offset1, offset2))\n listOffset = list(offset2)\n if listOffset[0] > shape[0] // 2:\n listOffset[0] -= shape[0]\n if listOffset[1] > shape[1] // 2:\n listOffset[1] -= shape[1]\n offset = tuple(listOffset)\n t2 = time.perf_counter()\n logs.append(\"MeasureOffset: fine result: %s %s\" % offset)\n logs.append(\"MeasureOffset: execution time: %.3fs with %.3fs for FFTs\" % (t2 - t0, t1 - t0))\n if withLog:\n if withCorr:\n return offset, logs, new\n else:\n return offset, logs\n else:\n if withCorr:\n return offset, new\n else:\n return offset\n\n\ndef _numpy_backport_percentile(a, q, axis=None, out=None, overwrite_input=False):\n \"\"\"\n Compute the qth percentile of the data along the specified axis.\n\n Returns the qth percentile of the array elements.\n\n Parameters\n ----------\n a : array_like\n Input array or object that can be converted to an array.\n q : float in range of [0,100] (or sequence of floats)\n Percentile to compute which must be between 0 and 100 inclusive.\n axis : int, optional\n Axis along which the percentiles are computed. The default (None)\n is to compute the median along a flattened version of the array.\n out : ndarray, optional\n Alternative output array in which to place the result. It must\n have the same shape and buffer length as the expected output,\n but the type (of the output) will be cast if necessary.\n overwrite_input : bool, optional\n If True, then allow use of memory of input array `a` for\n calculations. The input array will be modified by the call to\n median. This will save memory when you do not need to preserve\n the contents of the input array. Treat the input as undefined,\n but it will probably be fully or partially sorted.\n Default is False. Note that, if `overwrite_input` is True and the\n input is not already an array, an error will be raised.\n\n Returns\n -------\n pcntile : ndarray\n A new array holding the result (unless `out` is specified, in\n which case that array is returned instead). If the input contains\n integers, or floats of smaller precision than 64, then the output\n data-type is float64. Otherwise, the output data-type is the same\n as that of the input.\n\n See Also\n --------\n mean, median\n\n Notes\n -----\n Given a vector V of length N, the qth percentile of V is the qth ranked\n value in a sorted copy of V. A weighted average of the two nearest\n neighbors is used if the normalized ranking does not match q exactly.\n The same as the median if ``q=0.5``, the same as the minimum if ``q=0``\n and the same as the maximum if ``q=1``.\n\n Examples\n --------\n >>> a = np.array([[10, 7, 4], [3, 2, 1]])\n >>> a\n array([[10, 7, 4],\n [ 3, 2, 1]])\n >>> np.percentile(a, 50)\n 3.5\n >>> np.percentile(a, 0.5, axis=0)\n array([ 6.5, 4.5, 2.5])\n >>> np.percentile(a, 50, axis=1)\n array([ 7., 2.])\n\n >>> m = np.percentile(a, 50, axis=0)\n >>> out = np.zeros_like(m)\n >>> np.percentile(a, 50, axis=0, out=m)\n array([ 6.5, 4.5, 2.5])\n >>> m\n array([ 6.5, 4.5, 2.5])\n\n >>> b = a.copy()\n >>> np.percentile(b, 50, axis=1, overwrite_input=True)\n array([ 7., 2.])\n >>> assert not np.all(a==b)\n >>> b = a.copy()\n >>> np.percentile(b, 50, axis=None, overwrite_input=True)\n 3.5\n\n \"\"\"\n a = numpy.asarray(a)\n\n if q == 0:\n return a.min(axis=axis, out=out)\n elif q == 100:\n return a.max(axis=axis, out=out)\n\n if overwrite_input:\n if axis is None:\n sorted_list = a.ravel()\n sorted_list.sort()\n else:\n a.sort(axis=axis)\n sorted_list = a\n else:\n sorted_list = numpy.sort(a, axis=axis)\n if axis is None:\n axis = 0\n\n return _compute_qth_percentile(sorted_list, q, axis, out)\n\n\ndef _compute_qth_percentile(sorted_list, q, axis, out):\n \"\"\"\n Handle sequence of q's without calling sort multiple times\n \"\"\"\n if not numpy.isscalar(q):\n p = [_compute_qth_percentile(sorted_list, qi, axis, None)\n for qi in q]\n\n if out is not None:\n out.flat = p\n\n return p\n\n q = q / 100.0\n if (q < 0) or (q > 1):\n raise ValueError(\"percentile must be either in the range [0,100]\")\n\n indexer = [slice(None)] * sorted_list.ndim\n Nx = sorted_list.shape[axis]\n index = q * (Nx - 1)\n i = int(index)\n if i == index:\n indexer[axis] = slice(i, i + 1)\n weights = numpy.array(1)\n sumval = 1.0\n else:\n indexer[axis] = slice(i, i + 2)\n j = i + 1\n weights = numpy.array([(j - index), (index - i)], float)\n wshape = [1] * sorted_list.ndim\n wshape[axis] = 2\n weights.shape = wshape\n sumval = weights.sum()\n\n # Use add.reduce in both cases to coerce data type as well as\n # check and use out array.\n return numpy.add.reduce(sorted_list[indexer] * weights, axis=axis, out=out) / sumval\n\n\ntry:\n from numpy import percentile\nexcept ImportError:\n # backport percentile from numpy 1.6.2\n logger.debug(\"Backtrace\", exc_info=True)\n percentile = _numpy_backport_percentile\n\n\ndef round_fft(N):\n \"\"\"\n This function returns the integer >=N for which size the Fourier analysis is faster (fron the FFT point of view)\n\n Credit: Alessandro Mirone, ESRF, 2012\n\n :param N: interger on which one would like to do a Fourier transform\n :return: integer with a better choice\n \"\"\"\n FA, FB, FC, FD, FE, FFF = 2, 3, 5, 7, 11, 13\n DIFF = 9999999999\n RES = 1\n AA = 1\n for _ in range(int(math.log(N) / math.log(FA) + 2)):\n BB = AA\n for _ in range(int(math.log(N) / math.log(FB) + 2)):\n CC = BB\n\n for _ in range(int(math.log(N) / math.log(FC) + 2)):\n DD = CC\n\n for _ in range(int(math.log(N) / math.log(FD) + 2)):\n EE = DD\n\n for E in range(2):\n FF = EE\n\n for _ in range(2 - E):\n if FF >= N and DIFF > abs(N - FF):\n DIFF = abs(N - FF)\n RES = FF\n if FF > N:\n break\n FF = FF * FFF\n if EE > N:\n break\n EE = EE * FE\n if DD > N:\n break\n DD = DD * FD\n if CC > N:\n break\n CC = CC * FC\n if BB > N:\n break\n BB = BB * FB\n if AA > N:\n break\n AA = AA * FA\n return RES\n\n\n@deprecated(replacement=\"round_fft\", since_version=\"0.15\", only_once=True)\ndef roundfft(*args, **kwargs):\n return round_fft(*args, **kwargs)\n\n\ndef is_far_from_group(pt, lst_pts, d2):\n \"\"\"\n Tells if a point is far from a group of points, distance greater than d2 (distance squared)\n\n :param pt: point of interest\n :param lst_pts: list of points\n :param d2: minimum distance squarred\n :return: True If the point is far from all others.\n\n \"\"\"\n for apt in lst_pts:\n dsq = sum((i - j) * (i - j) for i, j in zip(apt, pt))\n if dsq <= d2:\n return False\n return True\n\n\ndef rwp(obt, ref):\n \"\"\"Compute :math:`\\\\sqrt{\\\\sum \\\\frac{4\\\\cdot(obt-ref)^2}{(obt + ref)^2}}`.\n\n This is done for symmetry reason between obt and ref\n\n :param obt: obtained data\n :type obt: 2-list of array of the same size\n :param obt: reference data\n :type obt: 2-list of array of the same size\n :return: Rwp value, lineary interpolated\n \"\"\"\n ref0, ref1 = ref[:2]\n obt0, obt1 = obt[:2]\n big0 = numpy.concatenate((obt0, ref0))\n big0.sort()\n big0 = numpy.unique(big0)\n big_ref = numpy.interp(big0, ref0, ref1, 0.0, 0.0)\n big_obt = numpy.interp(big0, obt0, obt1, 0.0, 0.0)\n big_mean = (big_ref + big_obt) / 2.0\n big_delta = (big_ref - big_obt)\n non_null = abs(big_mean) > 1e-10\n return numpy.sqrt(((big_delta[non_null]) ** 2 / ((big_mean[non_null]) ** 2)).sum())\n\n\ndef chi_square(obt, ref):\n \"\"\"Compute :math:`\\\\sqrt{\\\\sum \\\\frac{4\\\\cdot(obt-ref)^2}{(obt + ref)^2}}`.\n\n This is done for symmetry reason between obt and ref\n\n :param obt: obtained data\n :type obt: 3-tuple of array of the same size containing position, intensity, variance\n :param obt: reference data\n :type obt: 3-tuple of array of the same size containing position, intensity, variance\n :return: Chi² value, lineary interpolated\n \"\"\"\n ref_pos, ref_int, ref_std = ref\n obt_pos, obt_int, obt_std = obt\n big_pos = numpy.concatenate((ref_pos, obt_pos))\n big_pos.sort()\n big_pos = numpy.unique(big_pos)\n big_ref_int = numpy.interp(big_pos, ref_pos, ref_int, 0.0, 0.0)\n big_obt_int = numpy.interp(big_pos, obt_pos, obt_int, 0.0, 0.0)\n big_delta_int = (big_ref_int - big_obt_int)\n\n big_ref_var = numpy.interp(big_pos, ref_pos, ref_std, 0.0, 0.0) ** 2\n big_obt_var = numpy.interp(big_pos, obt_pos, obt_std, 0.0, 0.0) ** 2\n big_variance = (big_ref_var + big_obt_var) / 2.0\n non_null = abs(big_variance) > 1e-10\n return (big_delta_int[non_null] ** 2 / big_variance[non_null]).mean()\n\n\ndef interp_filter(ary, out=None):\n \"\"\"Interpolate missing values (nan or infinite) in a 1D array\n \n :param ary: 1D array \n :param out: destination array (use ary to avoid allocation)\n :return: 1D array\n \"\"\"\n x = numpy.arange(ary.shape[0])\n mask_valid = numpy.isfinite(ary)\n mask_invalid = numpy.logical_not(mask_valid)\n where = numpy.where(mask_valid)[0]\n first = ary[where[0]]\n last = ary[where[-1]]\n if out is None:\n out = ary.copy()\n elif id(out) == id(ary):\n pass\n else:\n out[mask_valid] = ary[mask_valid]\n out[mask_invalid] = numpy.interp(x[mask_invalid], x[mask_valid], ary[mask_valid],\n left=first, right=last)\n return out\n" ]
[ [ "numpy.isreal" ], [ "numpy.array" ], [ "numpy.fft.fft2", "scipy.ndimage.filters.gaussian_filter", "numpy.exp", "numpy.finfo", "numpy.where", "numpy.sort", "numpy.outer", "numpy.concatenate", "numpy.zeros_like", "numpy.empty", "numpy.add.reduce", "numpy.interp", "numpy.argmax", "numpy.arange", "numpy.isfinite", "numpy.sqrt", "numpy.array", "numpy.zeros", "numpy.isscalar", "numpy.logical_not", "scipy.ndimage.interpolation.shift", "numpy.asarray", "numpy.ones", "numpy.meshgrid", "numpy.unique" ] ]
VieZhong/pointer-generator-keyphrase
[ "b529d416a0e679411c1a0fac5da46d5f1e63341f" ]
[ "run_summarization.py" ]
[ "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n# Modifications Copyright 2017 Abigail See\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"This is the top-level file to train, evaluate or test your summarization model\"\"\"\n# coding=utf-8\nimport sys\nimport time\nimport os\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"0\"\nimport tensorflow as tf\nimport numpy as np\nfrom collections import namedtuple\nfrom data import Vocab, get_stop_word_ids\nfrom batcher import Batcher\nfrom model import SummarizationModel\nfrom decode import BeamSearchDecoder\nimport util\nfrom tensorflow.python import debug as tf_debug\n\nFLAGS = tf.app.flags.FLAGS\n\n# Where to find data\ntf.app.flags.DEFINE_string('data_path', '/tmp/tmp_input.txt', 'Path expression to tf.Example datafiles. Can include wildcards to access multiple datafiles.')\ntf.app.flags.DEFINE_string('vocab_path', '/data/nssd_data/finished_files/vocab', 'Path expression to text vocabulary file.')\ntf.app.flags.DEFINE_string('stop_words_path', '/data/nssd_data/stopword/stopword_cn.txt', 'Path expression to stop words file')\ntf.app.flags.DEFINE_string('ref_dir', '/data/__DATASET__/val_reference/', 'Path to reference words')\n\n# Important settings\ntf.app.flags.DEFINE_string('mode', 'decode', 'must be one of train/eval/decode')\ntf.app.flags.DEFINE_boolean('single_pass', True, 'For decode mode only. If True, run eval on the full dataset using a fixed checkpoint, i.e. take the current checkpoint, and use it to produce one summary for each example in the dataset, write the summaries to file and then get ROUGE scores for the whole dataset. If False (default), run concurrent decoding, i.e. repeatedly load latest checkpoint, use it to produce summaries for randomly-chosen examples and log the results to screen, indefinitely.')\ntf.app.flags.DEFINE_boolean('decode_only', True, 'If True, only decode, do not calculate f1 score. only for chinese, only for special format data_path')\n\n# Where to save output\ntf.app.flags.DEFINE_string('log_root', '/data/nssd_data/model/', 'Root directory for all logging.')\ntf.app.flags.DEFINE_string('exp_name', 'nssd_COPM_experiment', 'Name for experiment. Logs will be saved in a directory with this name, under log_root.')\ntf.app.flags.DEFINE_string('language', 'chinese', 'language')\n\n# Encoder and decoder settings\ntf.app.flags.DEFINE_string('cell_type', 'LSTM', 'LSTM or GRU')\ntf.app.flags.DEFINE_float('dropout', 0.0, 'for dropout')\n\n# Hyperparameters\ntf.app.flags.DEFINE_integer('hidden_dim', 256, 'dimension of RNN hidden states')\ntf.app.flags.DEFINE_integer('emb_dim', 128, 'dimension of word embeddings')\ntf.app.flags.DEFINE_integer('batch_size', 16, 'minibatch size')\ntf.app.flags.DEFINE_integer('max_enc_steps', 400, 'max timesteps of encoder (max source text tokens)')\ntf.app.flags.DEFINE_integer('max_dec_steps', 6, 'max timesteps of decoder (max summary tokens)')\ntf.app.flags.DEFINE_integer('beam_size', 50, 'beam size for beam search decoding.')\ntf.app.flags.DEFINE_integer('beam_depth', 6, 'beam depth for beam search decoding')\ntf.app.flags.DEFINE_integer('min_dec_steps', 1, 'Minimum sequence length of generated summary. Applies only for beam search decoding mode')\ntf.app.flags.DEFINE_integer('vocab_size', 50000, 'Size of vocabulary. These will be read from the vocabulary file in order. If the vocabulary file contains fewer words than this number, or if this number is set to 0, will take all words in the vocabulary file.')\ntf.app.flags.DEFINE_integer('max_keyphrase_num', 10, 'max keyphrase number')\ntf.app.flags.DEFINE_integer('max_title_len', 20, 'max title length')\ntf.app.flags.DEFINE_integer('occurrence_window_size', 3, 'window size while calculating occurrence matrix')\ntf.app.flags.DEFINE_float('lr', 0.15, 'learning rate')\ntf.app.flags.DEFINE_float('adagrad_init_acc', 0.1, 'initial accumulator value for Adagrad')\ntf.app.flags.DEFINE_float('rand_unif_init_mag', 0.02, 'magnitude for lstm cells random uniform inititalization')\ntf.app.flags.DEFINE_float('trunc_norm_init_std', 1e-4, 'std of trunc norm init, used for initializing everything else')\ntf.app.flags.DEFINE_float('max_grad_norm', 2.0, 'for gradient clipping')\ntf.app.flags.DEFINE_string('optimizer', 'Adagrad', 'Adagrad or Adam')\n\n# Pointer-generator or baseline model\ntf.app.flags.DEFINE_boolean('pointer_gen', True, 'If True, use pointer-generator model. If False, use baseline model.')\n\n# Coverage hyperparameters\ntf.app.flags.DEFINE_boolean('coverage', True, 'Use coverage mechanism. Note, the experiments reported in the ACL paper train WITHOUT coverage until converged, and then train for a short phase WITH coverage afterwards. i.e. to reproduce the results in the ACL paper, turn this off for most of training then turn on for a short phase at the end.')\ntf.app.flags.DEFINE_float('cov_loss_wt', 1.0, 'Weight of coverage loss (lambda in the paper). If zero, then no incentive to minimize coverage loss.')\n\n# VieZhong Improvement Hyperparameters\ntf.app.flags.DEFINE_boolean('attention_weighted', False, 'Whether attention mechanism is weighted.')\ntf.app.flags.DEFINE_boolean('coverage_weighted', False, 'Whether coverage mechanism is weighted.')\ntf.app.flags.DEFINE_boolean('coverage_weighted_expansion', False, 'Whether coverage mechanism is weighted.')\ntf.app.flags.DEFINE_boolean('co_occurrence', False, 'Whether to use co_occurrence factor.')\ntf.app.flags.DEFINE_boolean('co_occurrence_h', True, 'Whether to use co_occurrence_h factor.')\ntf.app.flags.DEFINE_boolean('co_occurrence_i', False, 'Whether to concat co_occurrence matrix to encoder embeddings.')\ntf.app.flags.DEFINE_boolean('prev_relation', False, 'Whether to use the previous output word to predict the next output word.')\ntf.app.flags.DEFINE_boolean('source_siding_bridge', False, 'Whether to use source siding bridging model.')\ntf.app.flags.DEFINE_boolean('target_siding_bridge', False, 'Whether to use target siding bridging model.')\ntf.app.flags.DEFINE_boolean('markov_attention', False, 'Whether to use markov attention mechanism.')\ntf.app.flags.DEFINE_boolean('markov_attention_contribution', False, 'Whether to use markov attention contribution mechanism.')\ntf.app.flags.DEFINE_boolean('markov_attention_contribution_used_x', False, 'Whether to use markov attention contribution mechanism.')\ntf.app.flags.DEFINE_boolean('tagger_attention', False, 'Whether to use tagger_attention factor')\ntf.app.flags.DEFINE_boolean('tagger_encoding', False, 'Whether to use tagger_attention factor')\ntf.app.flags.DEFINE_boolean('title_engaged', False, 'Whether to use title_engaged factor')\ntf.app.flags.DEFINE_boolean('title_guided', False, 'Whether to use title_guided factor')\ntf.app.flags.DEFINE_boolean('top_ten_kept', False, 'Whether to use top_ten_kept factor')\n\ntf.app.flags.DEFINE_boolean('generation_only', False, 'Whether in generation mode only')\ntf.app.flags.DEFINE_boolean('copy_only', False, 'Whether in copy mode only')\n\n# Utility flags, for restoring and changing checkpoints\ntf.app.flags.DEFINE_boolean('convert_to_coverage_model', False, 'Convert a non-coverage model to a coverage model. Turn this on and run in train mode. Your current training model will be copied to a new version (same name with _cov_init appended) that will be ready to run with coverage flag turned on, for the coverage training stage.')\ntf.app.flags.DEFINE_boolean('restore_best_model', False, 'Restore the best model in the eval/ dir and save it in the train/ dir, ready to be used for further training. Useful for early stopping, or if your training checkpoint has become corrupted with e.g. NaN values.')\n\n# Debugging. See https://www.tensorflow.org/programmers_guide/debugger\ntf.app.flags.DEFINE_boolean('debug', False, \"Run in tensorflow's debug mode (watches for NaN/inf values)\")\n\n\n\ndef calc_running_avg_loss(loss, running_avg_loss, summary_writer, step, decay=0.99):\n \"\"\"Calculate the running average loss via exponential decay.\n This is used to implement early stopping w.r.t. a more smooth loss curve than the raw loss curve.\n\n Args:\n loss: loss on the most recent eval step\n running_avg_loss: running_avg_loss so far\n summary_writer: FileWriter object to write for tensorboard\n step: training iteration step\n decay: rate of exponential decay, a float between 0 and 1. Larger is smoother.\n\n Returns:\n running_avg_loss: new running average loss\n \"\"\"\n if running_avg_loss == 0: # on the first iteration just take the loss\n running_avg_loss = loss\n else:\n running_avg_loss = running_avg_loss * decay + (1 - decay) * loss\n running_avg_loss = min(running_avg_loss, 12) # clip\n loss_sum = tf.Summary()\n tag_name = 'running_avg_loss/decay=%f' % (decay)\n loss_sum.value.add(tag=tag_name, simple_value=running_avg_loss)\n summary_writer.add_summary(loss_sum, step)\n tf.logging.info('running_avg_loss: %f', running_avg_loss)\n return running_avg_loss\n\n\ndef restore_best_model():\n \"\"\"Load bestmodel file from eval directory, add variables for adagrad, and save to train directory\"\"\"\n tf.logging.info(\"Restoring bestmodel for training...\")\n\n # Initialize all vars in the model\n sess = tf.Session(config=util.get_config())\n print(\"Initializing all variables...\")\n sess.run(tf.initialize_all_variables())\n\n # Restore the best model from eval dir\n saver = tf.train.Saver([v for v in tf.all_variables() if \"Adagrad\" not in v.name])\n print(\"Restoring all non-adagrad variables from best model in eval dir...\")\n curr_ckpt = util.load_ckpt(saver, sess, \"eval\")\n print (\"Restored %s.\" % curr_ckpt)\n\n # Save this model to train dir and quit\n new_model_name = curr_ckpt.split(\"/\")[-1].replace(\"bestmodel\", \"model\")\n new_fname = os.path.join(FLAGS.log_root, \"train\", new_model_name)\n print (\"Saving model to %s...\" % (new_fname))\n new_saver = tf.train.Saver() # this saver saves all variables that now exist, including Adagrad variables\n new_saver.save(sess, new_fname)\n print (\"Saved.\")\n exit()\n\n\ndef convert_to_coverage_model():\n \"\"\"Load non-coverage checkpoint, add initialized extra variables for coverage, and save as new checkpoint\"\"\"\n tf.logging.info(\"converting non-coverage model to coverage model..\")\n\n # initialize an entire coverage model from scratch\n sess = tf.Session(config=util.get_config())\n print(\"initializing everything...\")\n sess.run(tf.global_variables_initializer())\n\n # load all non-coverage weights from checkpoint\n saver = tf.train.Saver([v for v in tf.global_variables() if \"coverage\" not in v.name and \"Adagrad\" not in v.name])\n print(\"restoring non-coverage variables...\")\n curr_ckpt = util.load_ckpt(saver, sess)\n print(\"restored.\")\n\n # save this model and quit\n new_fname = curr_ckpt + '_cov_init'\n print(\"saving model to %s...\" % (new_fname))\n new_saver = tf.train.Saver() # this one will save all variables that now exist\n new_saver.save(sess, new_fname)\n print(\"saved.\")\n exit()\n\n\ndef setup_training(model, batcher):\n \"\"\"Does setup before starting training (run_training)\"\"\"\n train_dir = os.path.join(FLAGS.log_root, \"train\")\n if not os.path.exists(train_dir): os.makedirs(train_dir)\n\n model.build_graph() # build the graph\n if FLAGS.convert_to_coverage_model:\n assert FLAGS.coverage, \"To convert your non-coverage model to a coverage model, run with convert_to_coverage_model=True and coverage=True\"\n convert_to_coverage_model()\n if FLAGS.restore_best_model:\n restore_best_model()\n saver = tf.train.Saver(max_to_keep=3, keep_checkpoint_every_n_hours=6) # keep 3 checkpoints at a time\n\n sv = tf.train.Supervisor(logdir=train_dir,\n is_chief=True,\n saver=saver,\n summary_op=None,\n save_summaries_secs=60, # save summaries for tensorboard every 60 secs\n save_model_secs=60, # checkpoint every 60 secs\n global_step=model.global_step)\n summary_writer = sv.summary_writer\n tf.logging.info(\"Preparing or waiting for session...\")\n sess_context_manager = sv.prepare_or_wait_for_session(config=util.get_config())\n tf.logging.info(\"Created session.\")\n try:\n run_training(model, batcher, sess_context_manager, sv, summary_writer) # this is an infinite loop until interrupted\n except KeyboardInterrupt:\n tf.logging.info(\"Caught keyboard interrupt on worker. Stopping supervisor...\")\n sv.stop()\n\n\ndef run_training(model, batcher, sess_context_manager, sv, summary_writer):\n \"\"\"Repeatedly runs training iterations, logging loss to screen and writing summaries\"\"\"\n tf.logging.info(\"starting run_training\")\n with sess_context_manager as sess:\n if FLAGS.debug: # start the tensorflow debugger\n sess = tf_debug.LocalCLIDebugWrapperSession(sess)\n sess.add_tensor_filter(\"has_inf_or_nan\", tf_debug.has_inf_or_nan)\n while True: # repeats until interrupted\n batch = batcher.next_batch()\n\n tf.logging.info('running training step...')\n t0=time.time()\n results = model.run_train_step(sess, batch)\n t1=time.time()\n tf.logging.info('seconds for training step: %.3f', t1-t0)\n\n loss = results['loss']\n tf.logging.info('loss: %f', loss) # print the loss to screen\n\n if not np.isfinite(loss):\n raise Exception(\"Loss is not finite. Stopping.\")\n # continue\n\n if FLAGS.coverage:\n coverage_loss = results['coverage_loss']\n tf.logging.info(\"coverage_loss: %f\", coverage_loss) # print the coverage loss to screen\n\n # get the summaries and iteration number so we can write summaries to tensorboard\n summaries = results['summaries'] # we will write these summaries to tensorboard using summary_writer\n train_step = results['global_step'] # we need this to update our running average loss\n\n summary_writer.add_summary(summaries, train_step) # write the summaries\n if train_step % 100 == 0: # flush the summary writer every so often\n summary_writer.flush()\n\n\ndef run_eval(model, batcher, vocab):\n \"\"\"Repeatedly runs eval iterations, logging to screen and writing summaries. Saves the model with the best loss seen so far.\"\"\"\n model.build_graph() # build the graph\n saver = tf.train.Saver(max_to_keep=3) # we will keep 3 best checkpoints at a time\n sess = tf.Session(config=util.get_config())\n eval_dir = os.path.join(FLAGS.log_root, \"eval\") # make a subdir of the root dir for eval data\n bestmodel_save_path = os.path.join(eval_dir, 'bestmodel') # this is where checkpoints of best models are saved\n summary_writer = tf.summary.FileWriter(eval_dir)\n running_avg_loss = 0 # the eval job keeps a smoother, running average loss to tell it when to implement early stopping\n best_loss = None # will hold the best loss achieved so far\n\n while True:\n _ = util.load_ckpt(saver, sess) # load a new checkpoint\n batch = batcher.next_batch() # get the next batch\n\n # run eval on the batch\n t0=time.time()\n results = model.run_eval_step(sess, batch)\n t1=time.time()\n tf.logging.info('seconds for batch: %.2f', t1-t0)\n\n # print the loss and coverage loss to screen\n loss = results['loss']\n tf.logging.info('loss: %f', loss)\n if FLAGS.coverage:\n coverage_loss = results['coverage_loss']\n tf.logging.info(\"coverage_loss: %f\", coverage_loss)\n\n # add summaries\n summaries = results['summaries']\n train_step = results['global_step']\n summary_writer.add_summary(summaries, train_step)\n\n # calculate running avg loss\n running_avg_loss = calc_running_avg_loss(np.asscalar(loss), running_avg_loss, summary_writer, train_step)\n\n # If running_avg_loss is best so far, save this checkpoint (early stopping).\n # These checkpoints will appear as bestmodel-<iteration_number> in the eval dir\n if best_loss is None or running_avg_loss < best_loss:\n tf.logging.info('Found new best model with %.3f running_avg_loss. Saving to %s', running_avg_loss, bestmodel_save_path)\n saver.save(sess, bestmodel_save_path, global_step=train_step, latest_filename='checkpoint_best')\n best_loss = running_avg_loss\n\n # flush the summary writer every so often\n if train_step % 100 == 0:\n summary_writer.flush()\n\n\ndef main(unused_argv):\n if len(unused_argv) != 1: # prints a message if you've entered flags incorrectly\n raise Exception(\"Problem with flags: %s\" % unused_argv)\n\n tf.logging.set_verbosity(tf.logging.INFO) # choose what level of logging you want\n tf.logging.info('Starting seq2seq_attention in %s mode...', (FLAGS.mode))\n\n # Change log_root to FLAGS.log_root/FLAGS.exp_name and create the dir if necessary\n FLAGS.log_root = os.path.join(FLAGS.log_root, FLAGS.exp_name)\n if not os.path.exists(FLAGS.log_root):\n if FLAGS.mode==\"train\":\n os.makedirs(FLAGS.log_root)\n else:\n raise Exception(\"Logdir %s doesn't exist. Run in train mode to create it.\" % (FLAGS.log_root))\n\n vocab = Vocab(FLAGS.vocab_path, FLAGS.vocab_size) # create a vocabulary\n\n stop_word_ids = get_stop_word_ids(FLAGS.stop_words_path, vocab) if FLAGS.pointer_gen and (FLAGS.co_occurrence or FLAGS.prev_relation or FLAGS.co_occurrence_h or FLAGS.co_occurrence_i or (FLAGS.coverage and FLAGS.coverage_weighted)) or FLAGS.attention_weighted or FLAGS.markov_attention or FLAGS.markov_attention_contribution else None\n\n # If in decode mode, set batch_size = beam_size\n # Reason: in decode mode, we decode one example at a time.\n # On each step, we have beam_size-many hypotheses in the beam, so we need to make a batch of these hypotheses.\n if FLAGS.mode == 'decode':\n FLAGS.batch_size = FLAGS.beam_size\n\n # If single_pass=True, check we're in decode mode\n if FLAGS.single_pass and FLAGS.mode!='decode':\n raise Exception(\"The single_pass flag should only be True in decode mode\")\n\n # if FLAGS.prev_relation and not FLAGS.co_occurrence:\n # raise Exception(\"The co_occurrence flag should be True when the prev_relation flag is True\")\n\n # Make a namedtuple hps, containing the values of the hyperparameters that the model needs\n hparam_list = ['top_ten_kept', 'decode_only', 'generation_only', 'copy_only', 'occurrence_window_size', 'max_title_len', 'title_engaged', 'title_guided', 'ref_dir', 'tagger_encoding', 'tagger_attention', 'source_siding_bridge', 'target_siding_bridge', 'language', 'dropout', 'optimizer', 'mode', 'lr', 'adagrad_init_acc', 'rand_unif_init_mag', 'trunc_norm_init_std', 'max_grad_norm', 'hidden_dim', 'emb_dim', 'batch_size', 'beam_depth', 'max_dec_steps', 'max_enc_steps', 'max_keyphrase_num', 'attention_weighted', 'coverage', 'coverage_weighted', 'coverage_weighted_expansion', 'co_occurrence', 'prev_relation', 'co_occurrence_h', 'co_occurrence_i', 'cov_loss_wt', 'pointer_gen', 'cell_type', 'markov_attention', 'markov_attention_contribution', 'markov_attention_contribution_used_x']\n hps_dict = {}\n for key,val in FLAGS.__flags.items(): # for each flag\n if key in hparam_list: # if it's in the list\n hps_dict[key] = val # add it to the dict\n hps = namedtuple(\"HParams\", hps_dict.keys())(**hps_dict)\n\n # Create a batcher object that will create minibatches of data\n batcher = Batcher(FLAGS.data_path, vocab, hps, single_pass=FLAGS.single_pass, stop_words=stop_word_ids)\n\n tf.set_random_seed(111) # a seed value for randomness\n\n if hps.mode == 'train':\n print(\"creating model...\")\n model = SummarizationModel(hps, vocab)\n setup_training(model, batcher)\n elif hps.mode == 'eval':\n model = SummarizationModel(hps, vocab)\n run_eval(model, batcher, vocab)\n elif hps.mode == 'decode':\n decode_model_hps = hps # This will be the hyperparameters for the decoder model\n decode_model_hps = hps._replace(max_dec_steps=1) # The model is configured with max_dec_steps=1 because we only ever run one step of the decoder at a time (to do beam search). Note that the batcher is initialized with max_dec_steps equal to e.g. 100 because the batches need to contain the full summaries\n model = SummarizationModel(decode_model_hps, vocab)\n decoder = BeamSearchDecoder(model, batcher, vocab)\n decoder.decode() # decode indefinitely (unless single_pass=True, in which case deocde the dataset exactly once)\n else:\n raise ValueError(\"The 'mode' flag must be one of train/eval/decode\")\n\nif __name__ == '__main__':\n tf.app.run()\n" ]
[ [ "tensorflow.global_variables_initializer", "tensorflow.Summary", "tensorflow.set_random_seed", "tensorflow.train.Saver", "tensorflow.logging.info", "tensorflow.global_variables", "tensorflow.python.debug.LocalCLIDebugWrapperSession", "numpy.asscalar", "numpy.isfinite", "tensorflow.app.run", "tensorflow.train.Supervisor", "tensorflow.all_variables", "tensorflow.logging.set_verbosity", "tensorflow.initialize_all_variables", "tensorflow.app.flags.DEFINE_integer", "tensorflow.app.flags.DEFINE_string", "tensorflow.app.flags.DEFINE_boolean", "tensorflow.app.flags.DEFINE_float", "tensorflow.summary.FileWriter" ] ]
voxl-ai/u2net
[ "612d6f07b16e3def38515a59cc47a6189f080ffd" ]
[ "u2net_test.py" ]
[ "import os\nfrom skimage import io, transform\nimport torch\nimport torchvision\nfrom torch.autograd import Variable\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.utils.data import Dataset, DataLoader\nfrom torchvision import transforms # , utils\n\n# import torch.optim as optim\n\nimport numpy as np\nfrom PIL import Image\nimport glob\n\nfrom data_loader import RescaleT\nfrom data_loader import ToTensor\nfrom data_loader import ToTensorLab\nfrom data_loader import SalObjDataset\n\nfrom model import U2NET # full size version 173.6 MB\nfrom model import U2NETP # small version u2net 4.7 MB\n\n# normalize the predicted SOD probability map\ndef normPRED(d):\n ma = torch.max(d)\n mi = torch.min(d)\n\n dn = (d - mi) / (ma - mi)\n\n return dn\n\n\ndef save_output(image_name, pred, d_dir):\n\n predict = pred\n predict = predict.squeeze()\n predict_np = predict.cpu().data.numpy()\n\n im = Image.fromarray(predict_np * 255).convert(\"RGB\")\n img_name = image_name.split(os.sep)[-1]\n image = io.imread(image_name)\n imo = im.resize((image.shape[1], image.shape[0]), resample=Image.BILINEAR)\n\n pb_np = np.array(imo)\n\n aaa = img_name.split(\".\")\n bbb = aaa[0:-1]\n imidx = bbb[0]\n for i in range(1, len(bbb)):\n imidx = imidx + \".\" + bbb[i]\n\n imo.save(d_dir + imidx + \".png\")\n\n\ndef main():\n\n # --------- 1. get image path and name ---------\n model_name = \"u2net\" # u2netp\n\n image_dir = os.path.join(os.getcwd(), \"test_data\", \"test_images\")\n prediction_dir = os.path.join(\n os.getcwd(), \"test_data\", model_name + \"_results\" + os.sep\n )\n model_dir = os.path.join(\n os.getcwd(), \"saved_models\", model_name, model_name + \".pth\"\n )\n\n img_name_list = glob.glob(image_dir + os.sep + \"*\")\n print(img_name_list)\n\n # --------- 2. dataloader ---------\n # 1. dataloader\n test_salobj_dataset = SalObjDataset(\n img_name_list=img_name_list,\n lbl_name_list=[],\n transform=transforms.Compose([RescaleT(320), ToTensorLab(flag=0)]),\n )\n test_salobj_dataloader = DataLoader(\n test_salobj_dataset, batch_size=1, shuffle=False, num_workers=1\n )\n\n # --------- 3. model define ---------\n if model_name == \"u2net\":\n print(\"...load U2NET---173.6 MB\")\n net = U2NET(3, 1)\n elif model_name == \"u2netp\":\n print(\"...load U2NEP---4.7 MB\")\n net = U2NETP(3, 1)\n net.load_state_dict(torch.load(model_dir))\n if torch.cuda.is_available():\n net.cuda()\n net.eval()\n\n # --------- 4. inference for each image ---------\n for i_test, data_test in enumerate(test_salobj_dataloader):\n\n print(\"inferencing:\", img_name_list[i_test].split(os.sep)[-1])\n\n inputs_test = data_test[\"image\"]\n inputs_test = inputs_test.type(torch.FloatTensor)\n\n if torch.cuda.is_available():\n inputs_test = Variable(inputs_test.cuda())\n else:\n inputs_test = Variable(inputs_test)\n\n d1, d2, d3, d4, d5, d6, d7 = net(inputs_test)\n\n # normalization\n pred = d1[:, 0, :, :]\n pred = normPRED(pred)\n\n # save results to test_results folder\n if not os.path.exists(prediction_dir):\n os.makedirs(prediction_dir, exist_ok=True)\n save_output(img_name_list[i_test], pred, prediction_dir)\n\n del d1, d2, d3, d4, d5, d6, d7\n\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "numpy.array", "torch.min", "torch.max", "torch.autograd.Variable", "torch.cuda.is_available", "torch.utils.data.DataLoader", "torch.load" ] ]
artjoms-formulevics/Hip-Hop-Analytics
[ "346a313871eb686435ee1d5c6ae9028f7725c5e4" ]
[ "helper_functions.py" ]
[ "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Jan 3 22:36:21 2021\n\n@author: afo\n\"\"\"\n\nimport pandas as pd\nimport json\nfrom os.path import isfile, join\nfrom os import listdir\n\n# Function to get all the json file names in 3 subdirectories of given rapper - albums, eps, mixtapes\ndef load_album_names(paths, i, to_collect=2):\n \n # Placeholer lists\n files_alb = []\n files_ep = []\n files_mix = []\n \n # If 0 - collect albums, if 1 - albums+eps, if 2 - albums+eps+mixtapes (all)\n if to_collect >= 0:\n \n # Get list of files from albums\n files_alb = [f for f in listdir(paths[i]+'albums/') if isfile(join(paths[i]+'/albums', f))]\n try :\n files_alb.remove('.DS_Store')\n except ValueError:\n print()\n \n files_alb = [k for k in files_alb if '.json' in k]\n \n if to_collect >= 1:\n \n # Get list of files from eps\n files_ep = [f for f in listdir(paths[i]+'eps/') if isfile(join(paths[i]+'/eps', f))]\n try :\n files_ep.remove('.DS_Store')\n except ValueError:\n print()\n \n files_ep = [k for k in files_ep if '.json' in k]\n \n if to_collect >= 2:\n \n # Get list of files from mixtapes\n files_mix = [f for f in listdir(paths[i]+'mixtapes/') if isfile(join(paths[i]+'/mixtapes', f))]\n try :\n files_mix.remove('.DS_Store')\n except ValueError:\n print()\n \n files_mix = [k for k in files_mix if '.json' in k]\n \n files = files_alb + files_ep + files_mix # make single list\n \n return files\n\n# Function to load the json album. Path is based on type from the dataframe\ndef load_album(df, file, i, paths):\n\n # Determine the folder\n if df.iloc[file, 3] == 'Album':\n filepath = paths[i]+'albums/'+df.iloc[file,1]+'.json'\n elif df.iloc[file, 3] == 'EP':\n filepath = paths[i]+'eps/'+df.iloc[file,1]+'.json'\n else:\n filepath = paths[i]+'mixtapes/'+df.iloc[file,1]+'.json'\n \n # Load file\n with open(filepath) as f:\n data = json.load(f)\n \n return data\n\n# Function to write files for the word_count script. Writes album list, unique word count and full used lyric to files\ndef write_to_files(df, unique_lyric, joined_lyric):\n \n df.to_csv('data/'+df.iloc[0,0]+\"/albums.csv\", index=False)\n \n with open('data/'+df.iloc[0,0]+'/unque.txt', 'w') as the_file:\n the_file.write(str(unique_lyric))\n \n with open('data/'+df.iloc[0,0]+'/full.txt', 'w') as the_file:\n the_file.write(joined_lyric)\n \n print('Data is written to files!')\n\n# Function to load the saved artist album dataframe \ndef load_dataframe(paths, i):\n \n df = pd.read_csv(paths[i]+'/albums.csv')\n df['Year'] = pd.to_numeric(df['Year'])\n df = df.sort_values(by=['Type', 'Year']) # sort by year and type to make sure old albums go first, then EPs, then Mixtapes\n df['used'] = 0 # counter of used songs from album for word count\n df['total songs'] = 0 # counter of total songs in album for word count\n #df = df[df['Type'] == 'Album']\n\n return df\n\n\n\n\n\n\n\n\n\n\n" ]
[ [ "pandas.read_csv", "pandas.to_numeric" ] ]
yseokchoi/KMAwithBERTs
[ "72000c620c227f11d3c2cd041b67398c481ac1ae" ]
[ "optimizers.py" ]
[ "import torch\nimport torch.optim as optim\nfrom torch.nn.utils import clip_grad_norm_\n\nfrom math import sqrt\nimport functools\n\n\ndef build_torch_optimizer_for_bert(model, opt):\n \"\"\"\n no_decay = [\"bias\", \"LayerNorm.weight\"]\n encoder_params = [\n {\n \"params\": [p for n, p in model.encoder.named_parameters() if not any(nd in n for nd in no_decay)],\n \"weight_decay\": 0.0,\n },\n {\n \"params\": [p for n, p in model.encoder.named_parameters() if any(nd in n for nd in no_decay)],\n \"weight_decay\": 0.0,\n }\n ]\n decoder_params = [\n {\n \"params\": [p for n, p in model.decoder.named_parameters() if not any(nd in n for nd in no_decay)],\n \"weight_decay\": 0.0,\n },\n {\n \"params\": [p for n, p in model.decoder.named_parameters() if any(nd in n for nd in no_decay)],\n \"weight_decay\": 0.0,\n }]\n etc_params = [p for p in model.generator.parameters() if p.requires_grad] + [p for p in model.separator.parameters() if p.requires_grad]\n \"\"\"\n encoder_params = [p for p in model.encoder.parameters() if p.requires_grad]\n # decoder_params = [p for n, p in model.decoder.named_parameters() if p.requires_grad and 'cross' not in n]\n # etc_params = [p for n, p in model.decoder.named_parameters() if p.requires_grad and 'cross' in n] + [p for p in model.generator.parameters() if p.requires_grad] + [p for p in model.separator.parameters() if p.requires_grad]\n decoder_params = [p for n, p in model.decoder.named_parameters() if p.requires_grad]\n etc_params = [p for p in model.generator.parameters() if p.requires_grad] + [p for p in model.separator.parameters() if p.requires_grad]\n\n betas = [opt.adam_beta1, opt.adam_beta2]\n encoder_lr = opt.learning_rate if opt.enc_learning_rate == 0.0 else opt.enc_learning_rate\n decoder_lr = opt.learning_rate if opt.dec_learning_rate == 0.0 else opt.dec_learning_rate\n etc_lr = opt.learning_rate if opt.etc_learning_rate == 0.0 else opt.etc_learning_rate\n\n if opt.optim == 'sgd':\n if len(encoder_params) > 0 and len(decoder_params) > 0:\n optimizer = {\n \"encoder\": optim.SGD(encoder_params, lr=encoder_lr),\n \"decoder\": optim.SGD(decoder_params, lr=decoder_lr),\n \"etc\": optim.SGD(etc_params, lr=etc_lr)\n }\n elif len(decoder_params) > 0:\n optimizer = {\n \"decoder\": optim.SGD(decoder_params, lr=decoder_lr),\n \"etc\": optim.SGD(etc_params, lr=etc_lr)\n }\n else:\n optimizer = {\n \"etc\": optim.SGD(etc_params, lr=etc_lr)\n }\n elif opt.optim == 'adagrad':\n if len(encoder_params) > 0 and len(decoder_params) > 0:\n optimizer = {\n \"encoder\": optim.Adagrad(\n encoder_params,\n lr=encoder_lr,\n initial_accumulator_value=opt.adagrad_accumlator_init),\n \"decoder\": optim.Adagrad(\n decoder_params,\n lr=decoder_lr,\n initial_accumulator_value=opt.adagrad_accumlator_init),\n \"etc\": optim.Adagrad(\n etc_params,\n lr=etc_lr,\n initial_accumulator_value=opt.adagrad_accumlator_init)\n }\n elif len(decoder_params) > 0:\n optimizer = {\n \"decoder\": optim.Adagrad(\n decoder_params,\n lr=decoder_lr,\n initial_accumulator_value=opt.adagrad_accumlator_init),\n \"etc\": optim.Adagrad(\n etc_params,\n lr=etc_lr,\n initial_accumulator_value=opt.adagrad_accumlator_init)\n }\n else:\n optimizer = {\n \"etc\": optim.Adagrad(\n etc_params,\n lr=etc_lr,\n initial_accumulator_value=opt.adagrad_accumlator_init)\n }\n elif opt.optim == 'adadelta':\n if llen(encoder_params) > 0 and len(decoder_params) > 0:\n optimizer = {\n \"encoder\": optim.Adadelta(encoder_params, lr=encoder_lr),\n \"decoder\": optim.Adadelta(decoder_params, lr=decoder_lr),\n \"etc\": optim.Adadelta(etc_params, lr=etc_lr)\n }\n elif len(decoder_params) > 0:\n optimizer = {\n \"decoder\": optim.Adadelta(decoder_params, lr=decoder_lr),\n \"etc\": optim.Adadelta(etc_params, lr=etc_lr)\n }\n else:\n optimizer = {\n \"etc\": optim.Adadelta(etc_params, lr=etc_lr)\n }\n elif opt.optim == 'adam':\n if len(encoder_params) > 0 and len(decoder_params) > 0:\n optimizer = {\n \"encoder\": optim.Adam(encoder_params, lr=encoder_lr, betas=betas, eps=1e-12),\n \"decoder\": optim.Adam(decoder_params, lr=decoder_lr, betas=betas, eps=1e-12),\n \"etc\": optim.Adam(etc_params, lr=etc_lr, betas=betas, eps=1e-12)\n }\n elif len(decoder_params) > 0:\n optimizer = {\n \"decoder\": optim.Adam(decoder_params, lr=decoder_lr, betas=betas, eps=1e-12),\n \"etc\": optim.Adam(etc_params, lr=etc_lr, betas=betas, eps=1e-12)\n }\n else:\n optimizer = {\n \"etc\": optim.Adam(etc_params, lr=etc_lr, betas=betas, eps=1e-12)\n }\n else:\n raise ValueError(\"Invalid optimizer type: \" + opt.optim)\n\n return optimizer\n\n\ndef make_learning_rate_decay_fn(opt):\n \"\"\"Returns the learning decay function from options.\"\"\"\n if opt.decay_method == 'noam':\n return functools.partial(\n noam_decay,\n warmup_steps=opt.warmup_steps,\n model_size=opt.rnn_size)\n elif opt.decay_method == 'noamwd':\n return functools.partial(\n noamwd_decay,\n warmup_steps=opt.warmup_steps,\n model_size=opt.rnn_size,\n rate=opt.learning_rate_decay,\n decay_steps=opt.decay_steps,\n start_step=opt.start_decay_steps)\n elif opt.decay_method == 'rsqrt':\n return functools.partial(\n rsqrt_decay, warmup_steps=opt.warmup_steps)\n elif opt.start_decay_steps is not None:\n return functools.partial(\n exponential_decay,\n rate=opt.learning_rate_decay,\n decay_steps=opt.decay_steps,\n start_step=opt.start_decay_steps)\n else:\n return None\n\n\ndef noam_decay(step, warmup_steps, model_size):\n \"\"\"Learning rate schedule described in\n https://arxiv.org/pdf/1706.03762.pdf.\n \"\"\"\n return (\n model_size ** (-0.5) *\n min(step ** (-0.5), step * warmup_steps**(-1.5)))\n\n\ndef noamwd_decay(step, warmup_steps,\n model_size, rate, decay_steps, start_step=0):\n \"\"\"Learning rate schedule optimized for huge batches\n \"\"\"\n return (\n model_size ** (-0.5) *\n min(step ** (-0.5), step * warmup_steps**(-1.5)) *\n rate ** (max(step - start_step + decay_steps, 0) // decay_steps))\n\n\ndef exponential_decay(step, rate, decay_steps, start_step=0):\n \"\"\"A standard exponential decay, scaling the learning rate by :obj:`rate`\n every :obj:`decay_steps` steps.\n \"\"\"\n return rate ** (max(step - start_step + decay_steps, 0) // decay_steps)\n\n\ndef rsqrt_decay(step, warmup_steps):\n \"\"\"Decay based on the reciprocal of the step square root.\"\"\"\n return 1.0 / sqrt(max(step, warmup_steps))\n\nclass BertOptimizer(object):\n def __init__(self, optimizer, learning_rate, learning_rate_decay_fn=None, max_grad_norm=None):\n self._optimizer = optimizer\n self._learning_rate = learning_rate\n self._learning_rate_decay_fn = learning_rate_decay_fn\n self._max_grad_norm = max_grad_norm or 0\n self._training_step = 1\n self._decay_step = 1\n\n @classmethod\n def from_opt(cls, model, opt, checkpoint=None):\n optim_opt = opt\n optim_state_dict = None\n\n if opt.train_from and checkpoint is not None:\n optim = checkpoint[\"optim\"]\n ckpt_opt = checkpoint[\"opt\"]\n ckpt_state_dict = {}\n if isinstance(optim, BertOptimizer):\n ckpt_state_dict[\"training_step\"] = optim._step + 1\n ckpt_state_dict[\"decay_step\"] = optim._step + 1\n ckpt_state_dict[\"optimizer\"] = optim.optimizer.state_dict()\n else:\n ckpt_state_dict = optim\n\n if opt.reset_optim == 'none':\n optim_opt = ckpt_opt\n optim_state_dict = ckpt_state_dict\n elif opt.reset_optim == 'all':\n pass\n elif opt.reset_optim == 'states':\n optim_opt = ckpt_opt\n optim_state_dict = ckpt_state_dict\n del optim_state_dict[\"optimizer\"]\n elif opt.reset_optim == 'keep_states':\n optim_state_dict = ckpt_state_dict\n\n lr = {\n \"encoder\": optim_opt.learning_rate if optim_opt.enc_learning_rate == 0.0 else optim_opt.enc_learning_rate,\n \"decoder\": optim_opt.learning_rate if optim_opt.dec_learning_rate == 0.0 else optim_opt.dec_learning_rate,\n \"etc\": optim_opt.learning_rate if optim_opt.etc_learning_rate == 0.0 else optim_opt.etc_learning_rate\n }\n optimizer = cls(\n build_torch_optimizer_for_bert(model, optim_opt),\n lr,\n learning_rate_decay_fn=make_learning_rate_decay_fn(optim_opt),\n max_grad_norm=optim_opt.max_grad_norm\n )\n if optim_state_dict:\n optimizer.load_state_dict(optim_state_dict)\n\n return optimizer\n\n @property\n def training_step(self):\n return self._training_step\n\n def set_learning_rate(self, lr):\n self._learning_rate[\"encoder\"] = lr[\"encoder\"]\n self._learning_rate[\"decoder\"] = lr[\"decoder\"]\n self._learning_rate[\"etc\"] = lr[\"etc\"]\n\n def get_learning_rate(self):\n return self._learning_rate\n\n def learning_rate(self, rate=None):\n if rate is not None:\n if self._learning_rate_decay_fn is None:\n return rate\n scale = self._learning_rate_decay_fn(self._decay_step)\n lr = scale * rate\n return lr\n else:\n if self._learning_rate_decay_fn is None:\n return self._learning_rate\n scale = self._learning_rate_decay_fn(self._decay_step)\n lr = {stack: scale * r if stack in self._optimizer.keys() else r for stack, r in self._learning_rate.items()}\n return lr\n\n def state_dict(self):\n return {\n \"training_step\": self._training_step,\n \"decay_step\": self._decay_step,\n \"optimizer\": {stack:optimizer.state_dict() for stack, optimizer in self._optimizer.items()}\n }\n\n def load_state_dict(self, state_dict):\n self._training_step = state_dict['training_step']\n # State can be partially restored.\n if 'decay_step' in state_dict:\n self._decay_step = state_dict['decay_step']\n if 'optimizer' in state_dict:\n for stack, optimizer in self._optimizer.items():\n self._optimizer[stack].load_state_dict(state_dict['optimizer'][stack])\n\n def zero_grad(self):\n for stack, optimizer in self._optimizer.items():\n optimizer.zero_grad()\n\n def backward(self, loss):\n loss.backward()\n\n def step(self):\n for stack, optimizer in self._optimizer.items():\n learning_rate = self.learning_rate(self._learning_rate[stack])\n for group in optimizer.param_groups:\n group['lr'] = learning_rate\n if self._max_grad_norm > 0:\n clip_grad_norm_(group['params'], self._max_grad_norm)\n optimizer.step()\n self._decay_step += 1\n self._training_step += 1" ]
[ [ "torch.optim.Adagrad", "torch.nn.utils.clip_grad_norm_", "torch.optim.optimizer.state_dict", "torch.optim.SGD", "torch.optim.Adam", "torch.optim.Adadelta" ] ]
breisfeld/pandas
[ "f1fd50bb8e7603042fe93e01e862766673e33450" ]
[ "pandas/tests/test_reshape.py" ]
[ "from pandas import DataFrame\n\nimport numpy as np\n\nfrom pandas.core.reshape import melt, convert_dummies\nimport pandas.util.testing as tm\n\ndef test_melt():\n df = tm.makeTimeDataFrame()[:10]\n df['id1'] = (df['A'] > 0).astype(int)\n df['id2'] = (df['B'] > 0).astype(int)\n\n molten1 = melt(df)\n molten2 = melt(df, id_vars=['id1'])\n molten3 = melt(df, id_vars=['id1', 'id2'])\n\ndef test_convert_dummies():\n df = DataFrame({'A' : ['foo', 'bar', 'foo', 'bar',\n 'foo', 'bar', 'foo', 'foo'],\n 'B' : ['one', 'one', 'two', 'three',\n 'two', 'two', 'one', 'three'],\n 'C' : np.random.randn(8),\n 'D' : np.random.randn(8)})\n\n result = convert_dummies(df, ['A', 'B'])\n result2 = convert_dummies(df, ['A', 'B'], prefix_sep='.')\n\n expected = DataFrame({'A_foo' : [1, 0, 1, 0, 1, 0, 1, 1],\n 'A_bar' : [0, 1, 0, 1, 0, 1, 0, 0],\n 'B_one' : [1, 1, 0, 0, 0, 0, 1, 0],\n 'B_two' : [0, 0, 1, 0, 1, 1, 0, 0],\n 'B_three' : [0, 0, 0, 1, 0, 0, 0, 1],\n 'C' : df['C'].values,\n 'D' : df['D'].values},\n columns=result.columns, dtype=float)\n expected2 = expected.rename(columns=lambda x: x.replace('_', '.'))\n\n tm.assert_frame_equal(result, expected)\n tm.assert_frame_equal(result2, expected2)\n\nif __name__ == '__main__':\n import nose\n nose.runmodule(argv=[__file__,'-vvs','-x','--pdb', '--pdb-failure'],\n exit=False)\n\n" ]
[ [ "pandas.core.reshape.melt", "pandas.util.testing.assert_frame_equal", "pandas.util.testing.makeTimeDataFrame", "pandas.DataFrame", "numpy.random.randn", "pandas.core.reshape.convert_dummies" ] ]
nlapier2/MiniScrub
[ "2fb70c85a3737ac13d3db8346a12f31e5cb07534" ]
[ "pileup.py" ]
[ "import argparse, gc, gzip, multiprocessing, random, sys, time, traceback\nimport numpy as np\nimport scipy.misc\n\n\nstart = time.time()\n\n\ndef echo(msg):\n\tglobal start\n\tseconds = time.time() - start\n\tm, s = divmod(seconds, 60)\n\th, m = divmod(m, 60)\n\thms = \"%02d:%02d:%02d\" % (h, m, s)\n\tprint('['+hms+'] ' + msg)\n\n\ndef parse_args(): # handle user arguments\n\tparser = argparse.ArgumentParser(description='Create pileups from .paf read-to-read mapping and fastq reads.')\n\tparser.add_argument('--color', default='rgb', choices=['bw', 'rgb'], help='Color mode (bw or rgb).')\n\tparser.add_argument('--compression', default='none', choices=['none', 'gzip'], help='Compression format, or none')\n\tparser.add_argument('--debug', action='store_true', help='Debug mode.')\n\tparser.add_argument('-k', type=int, default=13, help='K-mer size of minimizers. Required.')\n\tparser.add_argument('--limit_fastq', type=int, default=0, help='Limit number of reads to scan from fastq file.')\n\tparser.add_argument('--limit_reads', type=int, default=0, help='Limit number of reads to generate pileups for.')\n\tparser.add_argument('--mapping', required=True, help='Path to the .paf file of read-to-read mappings.')\n\tparser.add_argument('--maxdepth', type=int, default=48, help='Maximum number of matched reads per pileup image.')\n\tparser.add_argument('--mode', default='minimizers', choices=['whole', 'minimizers'], help='Whole read or minimizers-only.')\n\tparser.add_argument('--plotdir', default='./', help='If --saveplots is used, directory path to save plots in.')\n\tparser.add_argument('--processes', type=int, default=1, help='Number of multiple processes to run concurrently.')\n\tparser.add_argument('--reads', required=True, help='Path to the .fastq reads file.')\n\tparser.add_argument('--saveplots', action='store_true', help='If used, will plot the pileups and save them.')\n\tparser.add_argument('--verbose', action='store_true', help='Verbose output option.')\n\targs = parser.parse_args()\n\treturn args\n\n\ndef process_reads(reads, compression, limit, verbose): # using fastq file, map read names to sequence and quality scores\n\tif compression == 'gzip':\n\t\treads_file = gzip.open(reads, 'r')\n\telse:\n\t\treads_file = open(reads, 'r')\n\treads_df, num, current = {}, -1, ''\n\tread_count = 0\n\tfor line in reads_file:\n\t\tif args.compression == 'gzip':\n\t\t\tline = line.decode('utf8').strip()\n\t\tnum = (num + 1) % 4\n\t\tif num == 0:\n\t\t\tcurrent = line[1:].split(' ')[0]\n\t\t\tread_count += 1\n\t\t\tif read_count % 10000 == 0 and verbose:\n\t\t\t\techo('Finished scanning ' + str(read_count) + ' reads')\n\t\t#elif num == 1:\n\t\t#\treads_df[current] = [line.upper()]\n\t\telif num == 3:\n\t\t\t#scores = [int((ord(ch)-33)*2.75) for ch in line]\n\t\t\t#scores = [(ord(ch)-33) for ch in line]\n\t\t\tscores = [ord(ch) for ch in line]\n\t\t\treads_df[current] = [scores, np.mean(scores)]\n\t\t\tif limit > 0 and read_count > limit:\n\t\t\t\tbreak\n\treads_file.close()\n\treturn reads_df\n\n\ndef stretch_factor_whole(startpos, selection):\n\tref_mins, match_mins = selection[12], selection[13]\n\tstart_index = len([i for i in ref_mins if i < startpos]) - 1\n\tif start_index >= len(ref_mins) or start_index < 0:\n\t\treturn startpos, 1.0\n\tref_start, ref_end = ref_mins[start_index], ref_mins[start_index+1] \n\tmatch_start, match_end = match_mins[start_index], match_mins[start_index+1]\n\tstretch = abs(match_end - match_start) / (ref_end - ref_start)\n\treturn ref_end, stretch\n\n\ndef stretch_factor_minimizers(startpos, line, all_mins, selection, color='rgb'):\n\tref_mins, match_mins = selection[12], selection[13]\n\tmatch_start = selection[13][selection[12].index(all_mins[startpos])]\n\tfor endpos in range(startpos+1, len(line)):\n\t\tif color == 'rgb' and line[endpos][0] == 255.0:\n\t\t\tmatch_end = selection[13][selection[12].index(all_mins[endpos])]\n\t\t\tstretch = (match_end - match_start) / (endpos - startpos)\n\t\t\treturn endpos, min(255, round(abs(stretch)*10))\n\t\telif color == 'bw' and line[endpos] >= 128.0:\n\t\t\tmatch_end = selection[13][selection[12].index(all_mins[endpos])]\n\t\t\tstretch = (match_end - match_start) / (endpos - startpos)\n\t\t\treturn endpos, min(127.0, round(abs(stretch)*5))\n\treturn len(line), 0\n\n\ndef make_pileup_bw_whole(pid, readname, readqual, readlen, matches, args):\n\ttry: \n\t\tk = args.k\n\t\treadqual = np.mean(readqual)\n\t\tmaxdepth, saveplots, plotdir, debug, pileup = args.maxdepth, args.saveplots, args.plotdir, args.debug, []\n\t\tminimizers = matches[0][12]\n\t\tdel matches[0]\n\t\tavg_stretch = 64.0\n\t\tpileup.append([128.0 + avg_stretch] * readlen) # the reference read\n\t\tfor i in range(maxdepth):\n\t\t\tpileup.append([0.0]) # fill in placeholder lines\n\n\t\tdepth_order, depth_index, num = list(range(1, maxdepth+1)), 0, 0\n\t\tfor s in matches:\n\t\t\tselection = matches[s]\n\t\t\tprefix = [0.0] * int(selection[2])\n\t\t\tsuffix = [0.0] * int(readlen-int(selection[3]))\n\t\t\treadqual = 0.1#np.mean(selection[14])\n\t\t\tpixels = [readqual] * (readlen - len(prefix) - len(suffix))\n\t\t\tseq = prefix + pixels + suffix\n\n\t\t\tfor i in range(len(minimizers)):\n\t\t\t\tif minimizers[i] < selection[12][0] or minimizers[i] > selection[12][-1]: # read does not cover this minimizer\n\t\t\t\t\tcontinue\n\t\t\t\tif minimizers[i] in selection[12]: # if that minimizer matched by this read\n\t\t\t\t\tif minimizers[i]+k < len(seq):\n\t\t\t\t\t\tseq[minimizers[i]:minimizers[i]+k] = [i+128.0 if i < 128.0 else i for i in seq[minimizers[i]:minimizers[i]+k]]\n\t\t\t\t\telse:\n\t\t\t\t\t\tseq[minimizers[i]:len(seq)] = [i+128.0 if i < 128.0 else i for i in seq[minimizers[i]:len(seq)]]\n\n\t\t\tpix = 0\n\t\t\twhile pix < len(seq):\n\t\t\t\tif seq[pix] < 128.0 and seq[pix] > 0.0:\n\t\t\t\t\tendpos, stretch = stretch_factor_whole(pix, selection)\n\t\t\t\t\tstretch = min(126.9, avg_stretch * (stretch ** 5))\n\t\t\t\t\tseq[pix:endpos] = [i+stretch for i in seq[pix:endpos]]\n\t\t\t\t\tif endpos <= pix:\n\t\t\t\t\t\tpix += 1\n\t\t\t\t\telse:\n\t\t\t\t\t\tpix = endpos\n\t\t\t\telse:\n\t\t\t\t\tpix += 1\n\n\t\t\tpileup[depth_order[depth_index]] = seq\n\t\t\tdepth_index += 1\n\t\t\tif depth_index >= maxdepth:\n\t\t\t\tbreak\n\n\t\tfor line in range(len(pileup)):\n\t\t\tpileup[line].extend([0.0] * (readlen - len(pileup[line])))\n\t\tpileup = np.array(pileup)\n\t\tif saveplots:\n\t\t\tscipy.misc.toimage(pileup, cmin=0.0, cmax=255.0).save(plotdir+readname+'.png')\n\t\treturn 0\n\texcept:\n\t\tprint('Error in process ' + str(pid))\n\t\terr = sys.exc_info()\n\t\ttb = traceback.format_exception(err[0], err[1], err[2])\n\t\tprint(''.join(tb) + '\\n')\n\t\treturn 1\n\n\ndef make_pileup_bw_minimizers(pid, readname, readqual, readlen, matches, args):\n\ttry: \n\t\tk = args.k\n\t\treadqual = np.mean(readqual)\n\t\tmaxdepth, saveplots, plotdir, pileup = args.maxdepth, args.saveplots, args.plotdir, []\n\t\tminimizers = matches[0][12]\n\t\tdel matches[0]\n\t\tseq = [128.0 + ((minimizers[i+1] - minimizers[i])*5.0) for i in list(range(len(minimizers)-1))] + [128.0 + ((readlen - minimizers[-1])*5.0)]\n\t\tpileup.append(seq)\n\t\tfor i in range(maxdepth):\n\t\t\tpileup.append([0.0]) # fill in placeholder lines\n\n\t\tdepth_order, depth_index, num = list(range(1, maxdepth+1)), 0, 0\n\t\tfor s in matches:\n\t\t\tselection = matches[s]\n\t\t\tseq = [1.0] * len(minimizers)\n\t\t\tfor i in range(len(minimizers)):\n\t\t\t\tif minimizers[i] < selection[12][0] or minimizers[i] > selection[12][-1]: # read does not cover this minimizer\n\t\t\t\t\tseq[i] = 0.0\n\t\t\t\telif minimizers[i] in selection[12]: # if that minimizer matched by this read\n\t\t\t\t\tseq[i] += 128.0\n\n\t\t\tpix = 0\n\t\t\twhile pix + 1 < len(seq):\n\t\t\t\tif seq[pix] != 0.0:\n\t\t\t\t\tendpos, stretch = stretch_factor_minimizers(pix, seq, minimizers, selection, color='bw')\n\t\t\t\t\tseq[pix:endpos] = [stretch+i for i in seq[pix:endpos]]\n\t\t\t\t\tif endpos <= pix:\n\t\t\t\t\t\tpix += 1\n\t\t\t\t\telse:\n\t\t\t\t\t\tpix = endpos\n\t\t\t\telse:\n\t\t\t\t\tpix += 1\n\n\t\t\tpileup[depth_order[depth_index]] = seq\n\t\t\tdepth_index += 1\n\t\t\tif depth_index >= maxdepth:\n\t\t\t\tbreak\n\n\t\tfor line in range(len(pileup)):\n\t\t\tpileup[line].extend([0.0] * (len(minimizers) - len(pileup[line])))\n\t\tpileup = np.array(pileup)\n\t\tif saveplots:\n\t\t\tscipy.misc.toimage(pileup, cmin=0.0, cmax=255.0).save(plotdir+readname+'.png')\n\t\treturn 0\n\texcept:\n\t\tprint('Error in process ' + str(pid))\n\t\terr = sys.exc_info()\n\t\ttb = traceback.format_exception(err[0], err[1], err[2])\n\t\tprint(''.join(tb) + '\\n')\n\t\treturn 1\n\n\ndef make_pileup_rgb_whole(pid, readname, readqual, readlen, matches, args):\n\ttry: \n\t\tk = args.k\n\t\tmaxdepth, saveplots, plotdir, pileup = args.maxdepth, args.saveplots, args.plotdir, []\n\t\tminimizers = matches[0][12]\n\t\tdel matches[0]\n\t\tavg_stretch = 128.0\n\t\tseq = [[255.0, i*2.0, avg_stretch] for i in readqual]\n\t\tpileup.append(seq)\n\t\tfor i in range(maxdepth):\n\t\t\tpileup.append([[0.0,0.0,0.0]]) # fill in placeholder lines\n\n\t\tdepth_order, depth_index, num = list(range(1, maxdepth+1)), 0, 0\n\t\tfor s in matches:\n\t\t\tselection = matches[s]\n\t\t\tmeanqual = np.mean(selection[14])*2.0\n\t\t\tprefix = [[0.0,0.0,0.0]] * int(selection[2])\n\t\t\tsuffix = [[0.0,0.0,0.0]] * int(readlen-int(selection[3]))\n\t\t\tpixels = [[70.0, meanqual, avg_stretch] for i in range(int(selection[2]), int(selection[3]))]\n\t\t\tseq = prefix + pixels + suffix\n\n\t\t\tfor i in range(len(minimizers)):\n\t\t\t\tif minimizers[i] < selection[12][0] or minimizers[i] > selection[12][-1]: # read does not cover this minimizer\n\t\t\t\t\tcontinue\n\t\t\t\tif minimizers[i] in selection[12]: # if that minimizer matched by this read\n\t\t\t\t\tmatch_loc = selection[13][selection[12].index(minimizers[i])]\n\t\t\t\t\tif minimizers[i]+k < len(seq):\n\t\t\t\t\t\treadquals = [selection[14][j] * 2.0 if j < len(selection[14]) else 0.0 for j in list(range(match_loc, match_loc+k))]\n\t\t\t\t\t\tseq[minimizers[i]:minimizers[i]+k] = [[255.0, readquals[j-minimizers[i]], seq[j][2]] if seq[j][0] == 70.0 else seq[j] for j in list(range(minimizers[i],minimizers[i]+k))]\n\t\t\t\t\telse:\n\t\t\t\t\t\treadquals = [selection[14][j] * 2.0 if j < len(selection[14]) else 0.0 for j in list(range(match_loc, match_loc+k))]\n\t\t\t\t\t\tseq[minimizers[i]:len(seq)] = [[255.0, readquals[j-minimizers[i]], seq[j][2]] if seq[j][0] == 70.0 else seq[j] for j in list(range(minimizers[i],len(seq)))]\n\n\t\t\tpix = 0\n\t\t\twhile pix < len(seq):\n\t\t\t\tif seq[pix][0] != 255.0 and seq[pix][2] != 0.0:\n\t\t\t\t\tendpos, stretch = stretch_factor_whole(pix, selection)\n\t\t\t\t\tstretch = min(255.0, avg_stretch * (stretch ** 5))\n\t\t\t\t\tseq[pix:endpos] = [[i[0], i[1], stretch] for i in seq[pix:endpos]]\n\t\t\t\t\tif endpos <= pix:\n\t\t\t\t\t\tpix += 1\n\t\t\t\t\telse:\n\t\t\t\t\t\tpix = endpos\n\t\t\t\telse:\n\t\t\t\t\tpix += 1\n\n\t\t\tpileup[depth_order[depth_index]] = seq\n\t\t\tdepth_index += 1\n\t\t\tif depth_index >= maxdepth:\n\t\t\t\tbreak\n\n\t\tfor line in range(len(pileup)):\n\t\t\tpileup[line].extend([[0.0,0.0,0.0]] * (readlen - len(pileup[line])))\n\n\t\tpileup = np.array(pileup)\n\t\tif saveplots:\n\t\t\tscipy.misc.toimage(pileup, cmin=0.0, cmax=255.0, mode='RGB').save(plotdir+readname+'.png')\n\t\treturn 0\n\texcept:\n\t\tprint('Error in process ' + str(pid))\n\t\terr = sys.exc_info()\n\t\ttb = traceback.format_exception(err[0], err[1], err[2])\n\t\tprint(''.join(tb) + '\\n')\n\t\treturn 1\n\n\ndef make_pileup_rgb_minimizers(pid, readname, readqual, readlen, matches, args):\n\ttry: \n\t\tk = args.k\n\t\tmaxdepth, saveplots, plotdir, pileup = args.maxdepth, args.saveplots, args.plotdir, []\n\t\tminimizers = matches[0][12]\n\t\tdel matches[0]\n\t\tavg_stretch = 1.0#128.0\n\t\tseq = [[255.0, np.mean(readqual[i:i+k])*2.0, avg_stretch] if i+k <= len(readqual) else [255.0, np.mean(readqual[i:len(readqual)])*2.0, avg_stretch] for i in minimizers]\n\t\tfor i in range(len(minimizers)-1):\n\t\t\tseq[i][2] = min(255, (minimizers[i+1] - minimizers[i])*10)\n\t\tseq[-1][2] = min(255, (readlen - minimizers[-1])*10)\n\t\tpileup.append(seq)\n\t\tfor i in range(maxdepth):\n\t\t\tpileup.append([[0.0,0.0,0.0]]) # fill in placeholder lines\n\n\t\tdepth_order, depth_index, num = list(range(1, maxdepth+1)), 0, 0\n\t\tfor s in matches:\n\t\t\tselection = matches[s]\n\t\t\tmeanqual = np.mean(selection[14])*2.0\n\t\t\tmatch_start, match_end = minimizers.index(selection[12][0]), minimizers.index(selection[12][-1])\n\t\t\tseq = [[255.0, meanqual, avg_stretch] if minimizers[i] in selection[12] else [70.0, meanqual, avg_stretch] for i in range(match_start, match_end+1)]\n\t\t\tseq = ([[0.0, 0.0, 0.0]] * match_start) + seq + ([[0.0, 0.0, 0.0]] * (len(minimizers) - match_end - 1))\n\n\t\t\tfor pix in range(len(seq)):\n\t\t\t\tif seq[pix][0] == 255.0 and seq[pix][2] != 0.0:\n\t\t\t\t\tmatchind = selection[13][selection[12].index(minimizers[pix])]\n\t\t\t\t\tseq[pix][1] = np.mean(selection[14][matchind:matchind+k])*2.0 if matchind+k < len(selection[14]) else np.mean(selection[14][matchind:len(selection[14])])*2.0\n\n\t\t\tpix = 0\n\t\t\twhile pix + 1 < len(seq):\n\t\t\t\tif seq[pix][2] != 0.0:\n\t\t\t\t\tendpos, stretch = stretch_factor_minimizers(pix, seq, minimizers, selection)\n\t\t\t\t\tseq[pix:endpos] = [[i[0], i[1], stretch] for i in seq[pix:endpos]]\n\t\t\t\t\tif endpos <= pix:\n\t\t\t\t\t\tpix += 1\n\t\t\t\t\telse:\n\t\t\t\t\t\tpix = endpos\n\t\t\t\telse:\n\t\t\t\t\tpix += 1\n\n\t\t\tpileup[depth_order[depth_index]] = seq\n\t\t\tdepth_index += 1\n\t\t\tif depth_index >= maxdepth:\n\t\t\t\tbreak\n\n\t\tfor line in range(len(pileup)):\n\t\t\tpileup[line].extend([[0.0,0.0,0.0]] * (len(minimizers) - len(pileup[line])))\n\t\tpileup = np.array(pileup)\n\t\tif saveplots:\n\t\t\tscipy.misc.toimage(pileup, cmin=0.0, cmax=255.0, mode='RGB').save(plotdir+readname+'.png')\n\t\treturn 0\n\texcept:\n\t\tprint('Error in process ' + str(pid))\n\t\terr = sys.exc_info()\n\t\ttb = traceback.format_exception(err[0], err[1], err[2])\n\t\tprint(''.join(tb) + '\\n')\n\t\treturn 1\n\n\nif __name__ == \"__main__\":\n\targs = parse_args()\n\tif not args.plotdir.endswith('/'):\n\t\targs.plotdir += '/'\n\tif args.maxdepth < 1:\n\t\tprint('Error: --maxdepth must be at least 1')\n\t\tsys.exit()\n\targs.maxdepth -= 1\n\tif args.debug:\n\t\targs.limit_fastq, args.limit_reads, args.saveplots = 10000, 10, True\n\tread_count, line_count, window_size = 0, 0, 200\n\techo('Scanning reads file...')\n\treads_df = process_reads(args.reads, args.compression, args.limit_fastq, args.verbose)\n\treads_list = list(reads_df)\n\techo('Done scanning reads file. Beginning pileup generation procresses...')\n\n\tcontext = multiprocessing.get_context(\"spawn\")\n\tpool = context.Pool(processes=args.processes)#, maxtasksperchild=100)\n\t#pool = multiprocessing.Pool(processes=args.processes, maxtasksperchild=100)\n\n\tread_data, cur_read = {}, ''\n\tif args.compression == 'gzip':\n\t\tf = gzip.open(args.mapping, 'r')\n\telse:\n\t\tf = open(args.mapping, 'r')\n\tfor line in f:\n\t\tif args.compression == 'gzip':\n\t\t\tline = line.decode('utf8')\n\t\tsplits = line.strip().split('\\t')\n\t\tsplits = splits[:12] + splits[13:15]\n\t\tfor i in (1,2,3,6,7,8,9,10,11):\n\t\t\tsplits[i] = float(splits[i])\n\t\tif splits[12][5] == 'I':\n\t\t\tsplits[12] = splits[12][1:]; splits[13] = splits[13][1:]\n\t\tsplits[12] = [int(i) for i in splits[12][5:].split(',')]\n\t\tsplits[13] = [int(i) for i in splits[13][5:].split(',')]\n\t\tif splits[0] not in reads_df or splits[5] not in reads_df:\n\t\t\tcontinue\n\t\tsplits.append(reads_df[splits[5]][0])\n\t\tif args.limit_fastq > 0 and (splits[0] not in reads_list or splits[5] not in reads_list):\n\t\t\tcontinue\n\t\tif read_data != {} and cur_read != splits[0]:\n\t\t\treadqual, readlen = reads_df[cur_read][0], len(reads_df[cur_read][0])\n\t\t\tselections = list(range(1,len(read_data)))\n\t\t\trandom.shuffle(selections)\n\t\t\tselections = selections[:args.maxdepth] + [0]\n\t\t\tread_data = {i:read_data[i] for i in selections}\n\n\t\t\tif args.debug:\n\t\t\t\tif args.mode == 'whole':\n\t\t\t\t\tif args.color == 'bw':\n\t\t\t\t\t\tmake_pileup_bw_whole(read_count, cur_read, readqual, readlen, read_data, args)\n\t\t\t\t\telse:\n\t\t\t\t\t\tmake_pileup_rgb_whole(read_count, cur_read, readqual, readlen, read_data, args)\n\t\t\t\telse:\n\t\t\t\t\tif args.color == 'bw':\n\t\t\t\t\t\tmake_pileup_bw_minimizers(read_count, cur_read, readqual, readlen, read_data, args)\n\t\t\t\t\telse:\n\t\t\t\t\t\tmake_pileup_rgb_minimizers(read_count, cur_read, readqual, readlen, read_data, args)\n\t\t\telse:\n\t\t\t\tif args.mode == 'whole':\n\t\t\t\t\tif args.color == 'bw':\n\t\t\t\t\t\tpool.apply_async(make_pileup_bw_whole, (read_count, cur_read, readqual, readlen, read_data, args,))\n\t\t\t\t\telse:\n\t\t\t\t\t\tpool.apply_async(make_pileup_rgb_whole, (read_count, cur_read, readqual, readlen, read_data, args,))\n\t\t\t\telse:\n\t\t\t\t\tif args.color == 'bw':\n\t\t\t\t\t\tpool.apply_async(make_pileup_bw_minimizers, (read_count, cur_read, readqual, readlen, read_data, args,))\n\t\t\t\t\telse:\n\t\t\t\t\t\tpool.apply_async(make_pileup_rgb_minimizers, (read_count, cur_read, readqual, readlen, read_data, args,))\n\t\t\tread_count += 1\n\t\t\tif read_count % 1000 == 0 and args.verbose:\n\t\t\t\techo('Finished pileups for ' + str(read_count) + ' lines')\n\t\t\tif args.limit_reads > 0 and read_count >= args.limit_reads:\n\t\t\t\tbreak\n\t\t\tread_data, line_count = {}, 0\n\n\t\tread_data[line_count] = splits\n\t\tcur_read = splits[0]\n\t\tline_count += 1\n\n\tif read_data != {} and (read_count < args.limit_reads or args.limit_reads == 0):\n\t\treadqual, readlen = reads_df[cur_read][0], len(reads_df[cur_read][0])\n\t\tif args.debug:\n\t\t\tif args.mode == 'whole':\n\t\t\t\tif args.color == 'bw':\n\t\t\t\t\tmake_pileup_bw_whole(read_count, cur_read, readqual, readlen, read_data, args)\n\t\t\t\telse:\n\t\t\t\t\tmake_pileup_rgb_whole(read_count, cur_read, readqual, readlen, read_data, args)\n\t\t\telse:\n\t\t\t\tif args.color == 'bw':\n\t\t\t\t\tmake_pileup_bw_minimizers(read_count, cur_read, readqual, readlen, read_data, args)\n\t\t\t\telse:\n\t\t\t\t\tmake_pileup_rgb_minimizers(read_count, cur_read, readqual, readlen, read_data, args)\n\t\telse:\n\t\t\tif args.mode == 'whole':\n\t\t\t\tif args.color == 'bw':\n\t\t\t\t\tpool.apply_async(make_pileup_bw_whole, (read_count, cur_read, readqual, readlen, read_data, args,))\n\t\t\t\telse:\n\t\t\t\t\tpool.apply_async(make_pileup_rgb_whole, (read_count, cur_read, readqual, readlen, read_data, args,))\n\t\t\telse:\n\t\t\t\tif args.color == 'bw':\n\t\t\t\t\tpool.apply_async(make_pileup_bw_minimizers, (read_count, cur_read, readqual, readlen, read_data, args,))\n\t\t\t\telse:\n\t\t\t\t\tpool.apply_async(make_pileup_rgb_minimizers, (read_count, cur_read, readqual, readlen, read_data, args,))\n\t\tread_count += 1\n\t\tif read_count % 1000 == 0 and args.verbose:\n\t\t\techo('Finished pileups for ' + str(read_count) + ' lines')\t\n\n\tf.close()\n\tpool.close()\n\tpool.join()\n\tprint(''); echo('Done')\n#" ]
[ [ "numpy.array", "numpy.mean" ] ]
ian-katsuno/chainer-fast-neuralstyle
[ "9a05d7838a539d01dab396e7693b085d828e80ae" ]
[ "train.py" ]
[ "from __future__ import print_function, division\nimport numpy as np\nimport os, re\nimport argparse\nfrom PIL import Image\n\nfrom chainer import cuda, Variable, optimizers, serializers\nfrom net import *\n\ndef load_image(path, size):\n image = Image.open(path).convert('RGB')\n w,h = image.size\n if w < h:\n if w < size:\n image = image.resize((size, size*h//w))\n w, h = image.size\n else:\n if h < size:\n image = image.resize((size*w//h, size))\n w, h = image.size\n image = image.crop(((w-size)*0.5, (h-size)*0.5, (w+size)*0.5, (h+size)*0.5))\n return xp.asarray(image, dtype=np.float32).transpose(2, 0, 1)\n\ndef gram_matrix(y):\n b, ch, h, w = y.data.shape\n features = F.reshape(y, (b, ch, w*h))\n gram = F.batch_matmul(features, features, transb=True)/np.float32(ch*w*h)\n return gram\n\ndef total_variation(x):\n xp = cuda.get_array_module(x.data)\n b, ch, h, w = x.data.shape\n wh = Variable(xp.asarray([[[[1], [-1]], [[0], [0]], [[0], [0]]], [[[0], [0]], [[1], [-1]], [[0], [0]]], [[[0], [0]], [[0], [0]], [[1], [-1]]]], dtype=np.float32), volatile=x.volatile)\n ww = Variable(xp.asarray([[[[1, -1]], [[0, 0]], [[0, 0]]], [[[0, 0]], [[1, -1]], [[0, 0]]], [[[0, 0]], [[0, 0]], [[1, -1]]]], dtype=np.float32), volatile=x.volatile)\n return F.sum(F.convolution_2d(x, W=wh) ** 2) + F.sum(F.convolution_2d(x, W=ww) ** 2)\n\nparser = argparse.ArgumentParser(description='Real-time style transfer')\nparser.add_argument('--gpu', '-g', default=-1, type=int,\n help='GPU ID (negative value indicates CPU)')\nparser.add_argument('--dataset', '-d', default='dataset', type=str,\n help='dataset directory path (according to the paper, use MSCOCO 80k images)')\nparser.add_argument('--style_image', '-s', type=str, required=True,\n help='style image path')\nparser.add_argument('--batchsize', '-b', type=int, default=1,\n help='batch size (default value is 1)')\nparser.add_argument('--initmodel', '-i', default=None, type=str,\n help='initialize the model from given file')\nparser.add_argument('--vggmodel', '-v', default='vgg16.model', type=str,\n help='path to vgg16.model')\nparser.add_argument('--resume', '-r', default=None, type=str,\n help='resume the optimization from snapshot')\nparser.add_argument('--output', '-o', default=None, type=str,\n help='output model file path without extension')\nparser.add_argument('--lambda_tv', default=1e-6, type=float,\n help='weight of total variation regularization according to the paper to be set between 10e-4 and 10e-6.')\nparser.add_argument('--lambda_feat', default=1.0, type=float)\nparser.add_argument('--lambda_style', default=5.0, type=float)\nparser.add_argument('--epoch', '-e', default=2, type=int)\nparser.add_argument('--lr', '-l', default=1e-3, type=float)\nparser.add_argument('--checkpoint', '-c', default=0, type=int)\nparser.add_argument('--image_size', default=256, type=int)\nargs = parser.parse_args()\n\nbatchsize = args.batchsize\n\nimage_size = args.image_size\nn_epoch = args.epoch\nlambda_tv = args.lambda_tv\nlambda_f = args.lambda_feat\nlambda_s = args.lambda_style\nstyle_prefix, _ = os.path.splitext(os.path.basename(args.style_image))\noutput = style_prefix if args.output == None else args.output\nfs = os.listdir(args.dataset)\nimagepaths = []\nfor fn in fs:\n base, ext = os.path.splitext(fn)\n if ext == '.jpg' or ext == '.png':\n imagepath = os.path.join(args.dataset,fn)\n imagepaths.append(imagepath)\nn_data = len(imagepaths)\nprint('num traning images:', n_data)\nn_iter = n_data // batchsize\nprint(n_iter, 'iterations,', n_epoch, 'epochs')\n\nmodel = FastStyleNet()\nvgg = VGG()\nserializers.load_npz(args.vggmodel, vgg)\nif args.initmodel:\n print('load model from', args.initmodel)\n serializers.load_npz(args.initmodel, model)\nif args.gpu >= 0:\n cuda.get_device(args.gpu).use()\n model.to_gpu()\n vgg.to_gpu()\nxp = np if args.gpu < 0 else cuda.cupy\n\nO = optimizers.Adam(alpha=args.lr)\nO.setup(model)\nif args.resume:\n print('load optimizer state from', args.resume)\n serializers.load_npz(args.resume, O)\n\nstyle = vgg.preprocess(np.asarray(Image.open(args.style_image).convert('RGB').resize((image_size,image_size)), dtype=np.float32))\nstyle = xp.asarray(style, dtype=xp.float32)\nstyle_b = xp.zeros((batchsize,) + style.shape, dtype=xp.float32)\nfor i in range(batchsize):\n style_b[i] = style\nfeature_s = vgg(Variable(style_b, volatile=True))\ngram_s = [gram_matrix(y) for y in feature_s]\n\nfor epoch in range(n_epoch):\n print('epoch', epoch)\n for i in range(n_iter):\n model.zerograds()\n vgg.zerograds()\n\n indices = range(i * batchsize, (i+1) * batchsize)\n x = xp.zeros((batchsize, 3, image_size, image_size), dtype=xp.float32)\n for j in range(batchsize):\n x[j] = load_image(imagepaths[i*batchsize + j], image_size)\n\n xc = Variable(x.copy(), volatile=True)\n x = Variable(x)\n\n y = model(x)\n\n xc -= 120\n y -= 120\n\n feature = vgg(xc)\n feature_hat = vgg(y)\n\n L_feat = lambda_f * F.mean_squared_error(Variable(feature[2].data), feature_hat[2]) # compute for only the output of layer conv3_3\n\n L_style = Variable(xp.zeros((), dtype=np.float32))\n for f, f_hat, g_s in zip(feature, feature_hat, gram_s):\n L_style += lambda_s * F.mean_squared_error(gram_matrix(f_hat), Variable(g_s.data))\n\n L_tv = lambda_tv * total_variation(y)\n L = L_feat + L_style + L_tv\n\n print('(epoch {}) batch {}/{}... training loss is...{}'.format(epoch, i, n_iter, L.data))\n\n L.backward()\n O.update()\n\n if args.checkpoint > 0 and i % args.checkpoint == 0:\n serializers.save_npz('models/{}_{}_{}.model'.format(output, epoch, i), model)\n serializers.save_npz('models/{}_{}_{}.state'.format(output, epoch, i), O)\n\n print('save \"style.model\"')\n serializers.save_npz('models/{}_{}.model'.format(output, epoch), model)\n serializers.save_npz('models/{}_{}.state'.format(output, epoch), O)\n\nserializers.save_npz('models/{}.model'.format(output), model)\nserializers.save_npz('models/{}.state'.format(output), O)\n" ]
[ [ "numpy.float32" ] ]
TGarfield17/FIRESONG
[ "ad9e3688ed88563cfdb81b9f25aaa63850cc99f9" ]
[ "firesong/Legend.py" ]
[ "#!/usr/bin/python\n# Authors: Chris Tung\n# Ignacio Taboada\n#\n\n\"\"\"Example script that simulates a population of sources with a luminosity\n distribution that is dependent on redshift\"\"\"\n\n# General imports\n# from __future__ import division\nimport argparse\n# Numpy / Scipy\nimport numpy as np\n# Firesong code\nfrom firesong.Evolution import get_LEvolution\nfrom firesong.input_output import output_writer, print_config_LEGEND, get_outputdir\nfrom firesong.sampling import InverseCDF\n\n\ndef legend_simulation(outputdir,\n filename='LEGEND.out',\n L_Evolution=\"HA2012BL\",\n zmax=10.,\n bins=10000,\n index=2.13,\n emin=1e4,\n emax=1e7,\n lmin=38,\n lmax=48,\n seed=None,\n verbose=True):\n \"\"\"\n Simulate a universe of neutrino sources with luminosity distribution \n dependent on redshift\n\n Args:\n outputdir (str or None): path to write output. If None, return results\n without writing a file\n filename (str): name of the output file. \n L_Evolution (str): Name of luminosity evolution model\n zmax (float, optional, default=10.): Farthest redshift to consider\n bins (int, optional, default=1000): Number of bins used when creating\n the redshift PDF\n fluxnorm (float, optional, default=0.9e-8): Normalization on the total\n astrophysical diffuse flux, E^2dPhi/dE. Units of GeV s^-1 sr^-1\n index (float, optional, default=2.13): Spectral index of diffuse flux\n emin (float, optional, default=1e4): Minimum neutrino energy in GeV\n emax (float, optional, default=1e7): Maximum neutrino energy in GeV\n lmin (float, optional, default=38): Minimum log10 luminosity in erg/s\n lmax (float, optional, default=38): Maximum log10 luminosity in erg/s\n seed (int or None, optional, default=None): random number seed\n verbose (bool, optional, default=True): print simulation paramaters\n if True else suppress printed output\n\n Returns:\n dict: keys contain simulation results, including the input params\n as well as the sources. Only returned if filename is None\n \"\"\"\n\n LE_model = get_LEvolution(L_Evolution, lmin, lmax)\n\n N_sample = int(LE_model.Nsources(zmax))\n\n delta_gamma = 2 - index\n if verbose:\n print_config_LEGEND(L_Evolution, lmin, lmax, N_sample)\n\n ##################################################\n # Simulation starts here\n ##################################################\n\n rng = np.random.RandomState(seed)\n\n # Prepare CDF for redshift generation\n redshift_bins = np.arange(0.0005, zmax, zmax / float(bins))\n RedshiftPDF = [LE_model.RedshiftDistribution(redshift_bins[i])\n for i in range(0, len(redshift_bins))]\n invCDF = InverseCDF(redshift_bins, RedshiftPDF)\n\n # Prepare a luminosity CDF as a function of redshift\n luminosity_bins = np.arange(lmin, lmax, (lmax - lmin) / 1000.)\n LE_model.L_CDF(redshift_bins, luminosity_bins)\n \n if filename is not None:\n out = output_writer(outputdir, filename)\n else:\n results = {}\n\n # Generate redshift\n zs = invCDF(rng.uniform(low=0.0, high=1.0, size=N_sample))\n # Generate luminosity as function of z\n lumis = LE_model.Luminosity_Sampling(zs)\n if np.ndim(lumis) < 1:\n lumis = np.array([lumis] * N_sample)\n # Calculate the flux of each source\n fluxes = LE_model.Lumi2Flux(lumis, index, emin, emax, zs)\n # Random declination over the entire sky\n sinDecs = rng.uniform(-1, 1, size=N_sample)\n declins = np.degrees(np.arcsin(sinDecs))\n # Random ra over the sky\n ras = rng.uniform(0.,360., size=N_sample)\n TotalFlux = np.sum(fluxes)\n\n # Write out\n if filename is not None:\n out.write(declins, ras, zs, fluxes)\n out.finish(TotalFlux)\n else:\n results['dec'] = declins\n results['ra'] = ras\n results['z'] = zs\n results['flux'] = fluxes\n\n # print before finish\n if verbose:\n print(\"Actual diffuse flux simulated :\")\n log = \"E^2 dNdE = {TotalFlux} (E/100 TeV)^({delta_gamma}) [GeV/cm^2.s.sr]\"\n print(log.format(**locals()))\n\n if filename is None:\n return results\n\n\nif __name__ == \"__main__\":\n outputdir = get_outputdir()\n\n # Process command line options\n parser = argparse.ArgumentParser()\n parser.add_argument('-o', action='store', dest='filename',\n default='Legend.out', help='Output filename')\n parser.add_argument(\"--Levolution\", action=\"store\",\n dest=\"Evolution\", default='HA2012BL',\n help=\"Source evolution options: HA2012BL\")\n parser.add_argument(\"--zmax\", action=\"store\", type=float,\n dest=\"zmax\", default=10.,\n help=\"Highest redshift to be simulated\")\n parser.add_argument(\"--index\", action=\"store\", dest='index',\n type=float, default=2.19,\n help=\"Spectral index of the outputflux\")\n parser.add_argument(\"--lmin\", action=\"store\", dest=\"lmin\",\n type=float, default=41.5,\n help=\"log10 of the minimum luminosity in erg/s\")\n parser.add_argument(\"--lmax\", action=\"store\", dest=\"lmax\",\n type=float, default=41.5,\n help=\"log10 of the maximum luminosity in erg/s\")\n options = parser.parse_args()\n\n legend_simulation(outputdir,\n filename=options.filename,\n L_Evolution=options.Evolution,\n zmax=options.zmax,\n index=options.index,\n lmin=options.lmin,\n lmax=options.lmax)\n" ]
[ [ "numpy.array", "numpy.random.RandomState", "numpy.arcsin", "numpy.sum", "numpy.arange", "numpy.ndim" ] ]
pzzhang/ASL
[ "156d5986d74099e0941139e2699d44381d924e6c" ]
[ "infer.py" ]
[ "import torch\nfrom src.helper_functions.helper_functions import parse_args\nfrom src.loss_functions.losses import AsymmetricLoss, AsymmetricLossOptimized\nfrom src.models import create_model\nimport argparse\nimport matplotlib\n\n# matplotlib.use('TkAgg')\nimport matplotlib.pyplot as plt\nfrom PIL import Image\nimport numpy as np\n\nparser = argparse.ArgumentParser(description='ASL MS-COCO Inference on a single image')\n\nparser.add_argument('--model_path', type=str, default='./models_local/TRresNet_L_448_86.6.pth')\nparser.add_argument('--pic_path', type=str, default='./pics/000000000885.jpg')\nparser.add_argument('--model_name', type=str, default='tresnet_l')\nparser.add_argument('--input_size', type=int, default=448)\nparser.add_argument('--dataset_type', type=str, default='MS-COCO')\nparser.add_argument('--th', type=float, default=None)\n\n\ndef main():\n print('ASL Example Inference code on a single image')\n\n # parsing args\n args = parse_args(parser)\n\n # setup model\n print('creating and loading the model...')\n state = torch.load(args.model_path, map_location='cpu')\n args.num_classes = state['num_classes']\n model = create_model(args).cuda()\n model.load_state_dict(state['model'], strict=True)\n model.eval()\n classes_list = np.array(list(state['idx_to_class'].values()))\n print('done\\n')\n\n # doing inference\n print('loading image and doing inference...')\n im = Image.open(args.pic_path)\n im_resize = im.resize((args.input_size, args.input_size))\n np_img = np.array(im_resize, dtype=np.uint8)\n tensor_img = torch.from_numpy(np_img).permute(2, 0, 1).float() / 255.0 # HWC to CHW\n tensor_batch = torch.unsqueeze(tensor_img, 0).cuda()\n output = torch.squeeze(torch.sigmoid(model(tensor_batch)))\n np_output = output.cpu().detach().numpy()\n detected_classes = classes_list[np_output > args.th]\n print('done\\n')\n\n # example loss calculation\n output = model(tensor_batch)\n loss_func1 = AsymmetricLoss()\n loss_func2 = AsymmetricLossOptimized()\n target = output.clone()\n target[output < 0] = 0 # mockup target\n target[output >= 0] = 1\n loss1 = loss_func1(output, target)\n loss2 = loss_func2(output, target)\n assert abs((loss1.item() - loss2.item())) < 1e-6\n\n # displaying image\n print('showing image on screen...')\n output_file = args.pic_path.replace('.jpg', '_tag.jpg')\n fig = plt.figure()\n plt.imshow(im)\n plt.axis('off')\n plt.axis('tight')\n # plt.rcParams[\"axes.titlesize\"] = 10\n plt.title(\"detected classes: {}\".format(detected_classes))\n\n # plt.show()\n plt.savefig(output_file)\n print('done\\n')\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "numpy.array", "matplotlib.pyplot.savefig", "matplotlib.pyplot.figure", "torch.unsqueeze", "torch.from_numpy", "torch.load", "matplotlib.pyplot.axis", "matplotlib.pyplot.imshow" ] ]
JessevanKempen/nutils
[ "a10ae3ca9f71b81ac5d64179555ef2cddf6658da" ]
[ "examples/platewithhole-nurbs.py" ]
[ "#!/usr/bin/env python3\n#\n# In this script we solve the same infinite plane strain problem as in\n# :ref:`examples/platewithhole.py`, but instead of using FCM to create the hole\n# we use a NURBS-based mapping. A detailed description of the testcase can be\n# found in Hughes et al., `Isogeometric analysis: CAD, finite elements, NURBS,\n# exact geometry and mesh refinement`, Computer Methods in Applied Mechanics\n# and Engineering, Elsevier, 2005, 194, 4135-4195.\n\nfrom nutils import mesh, function, solver, export, cli, testing\nimport numpy, treelog\n\ndef main(nrefine:int, traction:float, radius:float, poisson:float):\n '''\n Horizontally loaded linear elastic plate with IGA hole.\n\n .. arguments::\n\n nrefine [2]\n Number of uniform refinements starting from 1x2 base mesh.\n traction [.1]\n Far field traction (relative to Young's modulus).\n radius [.5]\n Cut-out radius.\n poisson [.3]\n Poisson's ratio, nonnegative and strictly smaller than 1/2.\n '''\n\n # create the coarsest level parameter domain\n domain, geom0 = mesh.rectilinear([1, 2])\n bsplinebasis = domain.basis('spline', degree=2)\n controlweights = numpy.ones(12)\n controlweights[1:3] = .5 + .25 * numpy.sqrt(2)\n weightfunc = bsplinebasis.dot(controlweights)\n nurbsbasis = bsplinebasis * controlweights / weightfunc\n\n # create geometry function\n indices = [0,2], [1,2], [2,1], [2,0]\n controlpoints = numpy.concatenate([\n numpy.take([0, 2**.5-1, 1], indices) * radius,\n numpy.take([0, .3, 1], indices) * (radius+1) / 2,\n numpy.take([0, 1, 1], indices)])\n geom = (nurbsbasis[:,numpy.newaxis] * controlpoints).sum(0)\n\n radiuserr = domain.boundary['left'].integral((function.norm2(geom) - radius)**2 * function.J(geom0), degree=9).eval()**.5\n treelog.info('hole radius exact up to L2 error {:.2e}'.format(radiuserr))\n\n # refine domain\n if nrefine:\n domain = domain.refine(nrefine)\n bsplinebasis = domain.basis('spline', degree=2)\n controlweights = domain.project(weightfunc, onto=bsplinebasis, geometry=geom0, ischeme='gauss9')\n nurbsbasis = bsplinebasis * controlweights / weightfunc\n\n ns = function.Namespace()\n ns.x = geom\n ns.lmbda = 2 * poisson\n ns.mu = 1 - poisson\n ns.ubasis = nurbsbasis.vector(2)\n ns.u_i = 'ubasis_ni ?lhs_n'\n ns.X_i = 'x_i + u_i'\n ns.strain_ij = '(d(u_i, x_j) + d(u_j, x_i)) / 2'\n ns.stress_ij = 'lmbda strain_kk δ_ij + 2 mu strain_ij'\n ns.r2 = 'x_k x_k'\n ns.R2 = radius**2 / ns.r2\n ns.k = (3-poisson) / (1+poisson) # plane stress parameter\n ns.scale = traction * (1+poisson) / 2\n ns.uexact_i = 'scale (x_i ((k + 1) (0.5 + R2) + (1 - R2) R2 (x_0^2 - 3 x_1^2) / r2) - 2 δ_i1 x_1 (1 + (k - 1 + R2) R2))'\n ns.du_i = 'u_i - uexact_i'\n\n sqr = domain.boundary['top,bottom'].integral('(u_i n_i)^2 d:x' @ ns, degree=9)\n cons = solver.optimize('lhs', sqr, droptol=1e-15)\n sqr = domain.boundary['right'].integral('du_k du_k d:x' @ ns, degree=20)\n cons = solver.optimize('lhs', sqr, droptol=1e-15, constrain=cons)\n\n # construct residual\n res = domain.integral('d(ubasis_ni, x_j) stress_ij d:x' @ ns, degree=9)\n\n # solve system\n lhs = solver.solve_linear('lhs', res, constrain=cons)\n\n # vizualize result\n bezier = domain.sample('bezier', 9)\n X, stressxx = bezier.eval(['X_i', 'stress_00'] @ ns, lhs=lhs)\n export.triplot('stressxx.png', X, stressxx, tri=bezier.tri, hull=bezier.hull, clim=(numpy.nanmin(stressxx), numpy.nanmax(stressxx)))\n\n # evaluate error\n err = domain.integral('<du_k du_k, d(du_i, x_j) d(du_i, x_j)>_n d:x' @ ns, degree=9).eval(lhs=lhs)**.5\n treelog.user('errors: L2={:.2e}, H1={:.2e}'.format(*err))\n\n return err, cons, lhs\n\n# If the script is executed (as opposed to imported), :func:`nutils.cli.run`\n# calls the main function with arguments provided from the command line. For\n# example, to keep with the default arguments simply run :sh:`python3\n# platewithhole-nurbs.py`.\n\nif __name__ == '__main__':\n cli.run(main)\n\n# Once a simulation is developed and tested, it is good practice to save a few\n# strategic return values for regression testing. The :mod:`nutils.testing`\n# module, which builds on the standard :mod:`unittest` framework, facilitates\n# this by providing :func:`nutils.testing.TestCase.assertAlmostEqual64` for the\n# embedding of desired results as compressed base64 data.\n\nclass test(testing.TestCase):\n\n @testing.requires('matplotlib')\n def test0(self):\n err, cons, lhs = main(nrefine=0, traction=.1, radius=.5, poisson=.3)\n with self.subTest('l2-error'):\n self.assertAlmostEqual(err[0], .00199, places=5)\n with self.subTest('h1-error'):\n self.assertAlmostEqual(err[1], .02269, places=5)\n with self.subTest('constraints'): self.assertAlmostEqual64(cons, '''\n eNpjYGBoQIIggMZXOKdmnHRe3vjh+cvGDAwA6w0LgQ==''')\n with self.subTest('left-hand side'): self.assertAlmostEqual64(lhs, '''\n eNpjYJh07qLhhnOTjb0vTDdmAAKVcy/1u85lGYforQDzFc6pGSedlzd+eP4ykA8AvkQRaA==''')\n\n @testing.requires('matplotlib')\n def test2(self):\n err, cons, lhs = main(nrefine=2, traction=.1, radius=.5, poisson=.3)\n with self.subTest('l2-error'):\n self.assertAlmostEqual(err[0], .00009, places=5)\n with self.subTest('h1-error'):\n self.assertAlmostEqual(err[1], .00286, places=5)\n with self.subTest('constraints'): self.assertAlmostEqual64(cons, '''\n eNpjYGBoIAKCwCBXp3kuysDjnLXR+3NPjTzPqxrnAnHeeQvjk+dTjZ9d2GG85soJYwYGAPkhPtE=''')\n with self.subTest('left-hand side'): self.assertAlmostEqual64(lhs, '''\n eNpjYOg890mv85yM4axz0kYHz+00Yj6vZJxzPtWY+0KPMffFucaml+caMwBB5LlCvYhzCw0qzu0wPHyu\n 0sjlPIsx14VoY/6LvcaxlxYZz7myCKzO+dwWPZdzBwzqz20z/Hguxmj2+TtGHRdsjHdfbDB2v7zUeMXV\n pWB1VucC9B3OORmuOCdhZHR+ktGu87eNbC6oGstfLDA+eWm1seG19WB1Buf+6ruce2p469wco9Dzb4wm\n n2c23nZe3djqQqpx88XNxrOv7gOr0zwXZeBxztro/bmnRp7nVY1zgTjvvIXxSaBfnl3YYbzmygmgOgDU\n Imlr''')\n" ]
[ [ "numpy.ones", "numpy.nanmin", "numpy.take", "numpy.sqrt", "numpy.nanmax" ] ]
ebouteillon/advent-of-code-2021
[ "dd433af29a6a377f2890d041f0d004e56704e3c0" ]
[ "day-05/part1.py" ]
[ "\"\"\"https://adventofcode.com/2021/day/5\"\"\"\n\nimport numpy as np\n\ndata = open(\"day-05/input.txt\").read().splitlines()\ndata = [x.replace(' -> ', ',').split(',') for x in data]\ndata = [list(map(int, x)) for x in data]\n\nsize = max(max(x) for x in data) + 1\ndiagram = np.zeros((size, size), dtype=int)\n\nfor x1, y1, x2, y2 in data:\n x1, y1, x2, y2 = min(x1, x2), min(y1, y2), max(x1, x2), max(y1, y2)\n if x1 == x2 or y1 == y2:\n diagram[y1: y2+1, x1: x2+1] += 1\n\nprint(np.sum(diagram > 1))\n# 6283\n" ]
[ [ "numpy.sum", "numpy.zeros" ] ]
HenryKenlay/graph_adversarial_attack
[ "5282d1269aa637ecafb0af239c53fa8396e5ef66" ]
[ "code/data_generator/gen_er_components.py" ]
[ "import sys\nimport cPickle as cp\nimport random\nimport numpy as np\nimport networkx as nx\nfrom tqdm import tqdm\nimport argparse\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--save_dir', help='Save directory.')\nparser.add_argument('--max_n', type=int, help='Upper bound on graph size.')\nparser.add_argument('--min_n', type=int, help='Lower bound on graph size.')\nparser.add_argument('--num_graph', type=int, help='Number of graphs to generate')\nparser.add_argument('--p', type=float, help='Connectivity parameter.')\nparser.add_argument('--n_comp', type=int, help='Number of connected components.')\nargs = parser.parse_args()\n\n\ndef get_component():\n \"\"\"Generate a connected ER component with min_n <= n <= max_n.\"\"\"\n cur_n = np.random.randint(max_n - min_n + 1) + min_n\n g = nx.erdos_renyi_graph(n=cur_n, p=p)\n\n comps = [c for c in nx.connected_component_subgraphs(g)]\n random.shuffle(comps)\n for i in range(1, len(comps)):\n x = random.choice(comps[i - 1].nodes())\n y = random.choice(comps[i].nodes())\n g.add_edge(x, y)\n assert nx.is_connected(g)\n return g\n\n\nif __name__ == '__main__':\n max_n = args.max_n\n min_n = args.min_n\n p = args.p\n n_comp = args.n_comp\n\n fout_name = '%s/ncomp-%d-nrange-%d-%d-n_graph-%d-p-%.2f.pkl' % (args.save_dir, n_comp, min_n, max_n, args.num_graph, p)\n print('Final Output: ' + fout_name)\n print(\"Generating graphs...\")\n min_n = min_n // n_comp\n max_n = max_n // n_comp\n\n for i in tqdm(range(args.num_graph)):\n\n for j in range(n_comp):\n g = get_component()\n \n if j == 0:\n g_all = g\n else:\n g_all = nx.disjoint_union(g_all, g)\n assert nx.number_connected_components(g_all) == n_comp\n\n with open(fout_name, 'ab') as fout:\n cp.dump(g_all, fout, cp.HIGHEST_PROTOCOL)\n" ]
[ [ "numpy.random.randint" ] ]
johnlime/cleanrl
[ "66f6f8ba12a559a812dc77aaa8f41e09ccd6f800" ]
[ "cleanrl/experiments/vdqn_atari.py" ]
[ "# https://github.com/facebookresearch/torchbeast/blob/master/torchbeast/core/environment.py\n\nimport numpy as np\nfrom collections import deque\nimport gym\nfrom gym import spaces\nimport cv2\ncv2.ocl.setUseOpenCL(False)\n\n\nclass NoopResetEnv(gym.Wrapper):\n def __init__(self, env, noop_max=30):\n \"\"\"Sample initial states by taking random number of no-ops on reset.\n No-op is assumed to be action 0.\n \"\"\"\n gym.Wrapper.__init__(self, env)\n self.noop_max = noop_max\n self.override_num_noops = None\n self.noop_action = 0\n assert env.unwrapped.get_action_meanings()[0] == 'NOOP'\n\n def reset(self, **kwargs):\n \"\"\" Do no-op action for a number of steps in [1, noop_max].\"\"\"\n self.env.reset(**kwargs)\n if self.override_num_noops is not None:\n noops = self.override_num_noops\n else:\n noops = self.unwrapped.np_random.randint(1, self.noop_max + 1) #pylint: disable=E1101\n assert noops > 0\n obs = None\n for _ in range(noops):\n obs, _, done, _ = self.env.step(self.noop_action)\n if done:\n obs = self.env.reset(**kwargs)\n return obs\n\n def step(self, ac):\n return self.env.step(ac)\n\nclass FireResetEnv(gym.Wrapper):\n def __init__(self, env):\n \"\"\"Take action on reset for environments that are fixed until firing.\"\"\"\n gym.Wrapper.__init__(self, env)\n assert env.unwrapped.get_action_meanings()[1] == 'FIRE'\n assert len(env.unwrapped.get_action_meanings()) >= 3\n\n def reset(self, **kwargs):\n self.env.reset(**kwargs)\n obs, _, done, _ = self.env.step(1)\n if done:\n self.env.reset(**kwargs)\n obs, _, done, _ = self.env.step(2)\n if done:\n self.env.reset(**kwargs)\n return obs\n\n def step(self, ac):\n return self.env.step(ac)\n\nclass EpisodicLifeEnv(gym.Wrapper):\n def __init__(self, env):\n \"\"\"Make end-of-life == end-of-episode, but only reset on true game over.\n Done by DeepMind for the DQN and co. since it helps value estimation.\n \"\"\"\n gym.Wrapper.__init__(self, env)\n self.lives = 0\n self.was_real_done = True\n\n def step(self, action):\n obs, reward, done, info = self.env.step(action)\n self.was_real_done = done\n # check current lives, make loss of life terminal,\n # then update lives to handle bonus lives\n lives = self.env.unwrapped.ale.lives()\n if lives < self.lives and lives > 0:\n # for Qbert sometimes we stay in lives == 0 condition for a few frames\n # so it's important to keep lives > 0, so that we only reset once\n # the environment advertises done.\n done = True\n self.lives = lives\n return obs, reward, done, info\n\n def reset(self, **kwargs):\n \"\"\"Reset only when lives are exhausted.\n This way all states are still reachable even though lives are episodic,\n and the learner need not know about any of this behind-the-scenes.\n \"\"\"\n if self.was_real_done:\n obs = self.env.reset(**kwargs)\n else:\n # no-op step to advance from terminal/lost life state\n obs, _, _, _ = self.env.step(0)\n self.lives = self.env.unwrapped.ale.lives()\n return obs\n\nclass MaxAndSkipEnv(gym.Wrapper):\n def __init__(self, env, skip=4):\n \"\"\"Return only every `skip`-th frame\"\"\"\n gym.Wrapper.__init__(self, env)\n # most recent raw observations (for max pooling across time steps)\n self._obs_buffer = np.zeros((2,)+env.observation_space.shape, dtype=np.uint8)\n self._skip = skip\n\n def step(self, action):\n \"\"\"Repeat action, sum reward, and max over last observations.\"\"\"\n total_reward = 0.0\n done = None\n for i in range(self._skip):\n obs, reward, done, info = self.env.step(action)\n if i == self._skip - 2: self._obs_buffer[0] = obs\n if i == self._skip - 1: self._obs_buffer[1] = obs\n total_reward += reward\n if done:\n break\n # Note that the observation on the done=True frame\n # doesn't matter\n max_frame = self._obs_buffer.max(axis=0)\n\n return max_frame, total_reward, done, info\n\n def reset(self, **kwargs):\n return self.env.reset(**kwargs)\n\nclass ClipRewardEnv(gym.RewardWrapper):\n def __init__(self, env):\n gym.RewardWrapper.__init__(self, env)\n\n def reward(self, reward):\n \"\"\"Bin reward to {+1, 0, -1} by its sign.\"\"\"\n return np.sign(reward)\n\n\nclass WarpFrame(gym.ObservationWrapper):\n def __init__(self, env, width=84, height=84, grayscale=True, dict_space_key=None):\n \"\"\"\n Warp frames to 84x84 as done in the Nature paper and later work.\n If the environment uses dictionary observations, `dict_space_key` can be specified which indicates which\n observation should be warped.\n \"\"\"\n super().__init__(env)\n self._width = width\n self._height = height\n self._grayscale = grayscale\n self._key = dict_space_key\n if self._grayscale:\n num_colors = 1\n else:\n num_colors = 3\n\n new_space = gym.spaces.Box(\n low=0,\n high=255,\n shape=(self._height, self._width, num_colors),\n dtype=np.uint8,\n )\n if self._key is None:\n original_space = self.observation_space\n self.observation_space = new_space\n else:\n original_space = self.observation_space.spaces[self._key]\n self.observation_space.spaces[self._key] = new_space\n assert original_space.dtype == np.uint8 and len(original_space.shape) == 3\n\n def observation(self, obs):\n if self._key is None:\n frame = obs\n else:\n frame = obs[self._key]\n\n if self._grayscale:\n frame = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)\n frame = cv2.resize(\n frame, (self._width, self._height), interpolation=cv2.INTER_AREA\n )\n if self._grayscale:\n frame = np.expand_dims(frame, -1)\n\n if self._key is None:\n obs = frame\n else:\n obs = obs.copy()\n obs[self._key] = frame\n return obs\n\n\nclass FrameStack(gym.Wrapper):\n def __init__(self, env, k):\n \"\"\"Stack k last frames.\n Returns lazy array, which is much more memory efficient.\n See Also\n --------\n baselines.common.atari_wrappers.LazyFrames\n \"\"\"\n gym.Wrapper.__init__(self, env)\n self.k = k\n self.frames = deque([], maxlen=k)\n shp = env.observation_space.shape\n self.observation_space = spaces.Box(low=0, high=255, shape=((shp[0] * k,)+shp[1:]), dtype=env.observation_space.dtype)\n\n def reset(self):\n ob = self.env.reset()\n for _ in range(self.k):\n self.frames.append(ob)\n return self._get_ob()\n\n def step(self, action):\n ob, reward, done, info = self.env.step(action)\n self.frames.append(ob)\n return self._get_ob(), reward, done, info\n\n def _get_ob(self):\n assert len(self.frames) == self.k\n return LazyFrames(list(self.frames))\n\nclass ScaledFloatFrame(gym.ObservationWrapper):\n def __init__(self, env):\n gym.ObservationWrapper.__init__(self, env)\n self.observation_space = gym.spaces.Box(low=0, high=1, shape=env.observation_space.shape, dtype=np.float32)\n\n def observation(self, observation):\n # careful! This undoes the memory optimization, use\n # with smaller replay buffers only.\n return np.array(observation).astype(np.float32) / 255.0\n\nclass LazyFrames(object):\n def __init__(self, frames):\n \"\"\"This object ensures that common frames between the observations are only stored once.\n It exists purely to optimize memory usage which can be huge for DQN's 1M frames replay\n buffers.\n This object should only be converted to numpy array before being passed to the model.\n You'd not believe how complex the previous solution was.\"\"\"\n self._frames = frames\n self._out = None\n\n def _force(self):\n if self._out is None:\n self._out = np.concatenate(self._frames, axis=0)\n self._frames = None\n return self._out\n\n def __array__(self, dtype=None):\n out = self._force()\n if dtype is not None:\n out = out.astype(dtype)\n return out\n\n def __len__(self):\n return len(self._force())\n\n def __getitem__(self, i):\n return self._force()[i]\n\n def count(self):\n frames = self._force()\n return frames.shape[frames.ndim - 1]\n\n def frame(self, i):\n return self._force()[..., i]\n\ndef wrap_atari(env, max_episode_steps=None):\n assert 'NoFrameskip' in env.spec.id\n env = NoopResetEnv(env, noop_max=30)\n env = MaxAndSkipEnv(env, skip=4)\n\n assert max_episode_steps is None\n\n return env\n\nclass ImageToPyTorch(gym.ObservationWrapper):\n \"\"\"\n Image shape to channels x weight x height\n \"\"\"\n\n def __init__(self, env):\n super(ImageToPyTorch, self).__init__(env)\n old_shape = self.observation_space.shape\n self.observation_space = gym.spaces.Box(\n low=0,\n high=255,\n shape=(old_shape[-1], old_shape[0], old_shape[1]),\n dtype=np.uint8,\n )\n\n def observation(self, observation):\n return np.transpose(observation, axes=(2, 0, 1))\n\ndef wrap_deepmind(env, episode_life=True, clip_rewards=True, frame_stack=False, scale=False):\n \"\"\"Configure environment for DeepMind-style Atari.\n \"\"\"\n if episode_life:\n env = EpisodicLifeEnv(env)\n if 'FIRE' in env.unwrapped.get_action_meanings():\n env = FireResetEnv(env)\n env = WarpFrame(env)\n if scale:\n env = ScaledFloatFrame(env)\n if clip_rewards:\n env = ClipRewardEnv(env)\n env = ImageToPyTorch(env)\n if frame_stack:\n env = FrameStack(env, 4)\n return env\n\n# https://github.com/openai/baselines/blob/master/baselines/deepq/replay_buffer.py\nimport operator\nclass SegmentTree(object):\n def __init__(self, capacity, operation, neutral_element):\n \"\"\"Build a Segment Tree data structure.\n https://en.wikipedia.org/wiki/Segment_tree\n Can be used as regular array, but with two\n important differences:\n a) setting item's value is slightly slower.\n It is O(lg capacity) instead of O(1).\n b) user has access to an efficient ( O(log segment size) )\n `reduce` operation which reduces `operation` over\n a contiguous subsequence of items in the array.\n Paramters\n ---------\n capacity: int\n Total size of the array - must be a power of two.\n operation: lambda obj, obj -> obj\n and operation for combining elements (eg. sum, max)\n must form a mathematical group together with the set of\n possible values for array elements (i.e. be associative)\n neutral_element: obj\n neutral element for the operation above. eg. float('-inf')\n for max and 0 for sum.\n \"\"\"\n assert capacity > 0 and capacity & (capacity - 1) == 0, \"capacity must be positive and a power of 2.\"\n self._capacity = capacity\n self._value = [neutral_element for _ in range(2 * capacity)]\n self._operation = operation\n\n def _reduce_helper(self, start, end, node, node_start, node_end):\n if start == node_start and end == node_end:\n return self._value[node]\n mid = (node_start + node_end) // 2\n if end <= mid:\n return self._reduce_helper(start, end, 2 * node, node_start, mid)\n else:\n if mid + 1 <= start:\n return self._reduce_helper(start, end, 2 * node + 1, mid + 1, node_end)\n else:\n return self._operation(\n self._reduce_helper(start, mid, 2 * node, node_start, mid),\n self._reduce_helper(mid + 1, end, 2 * node + 1, mid + 1, node_end)\n )\n\n def reduce(self, start=0, end=None):\n \"\"\"Returns result of applying `self.operation`\n to a contiguous subsequence of the array.\n self.operation(arr[start], operation(arr[start+1], operation(... arr[end])))\n Parameters\n ----------\n start: int\n beginning of the subsequence\n end: int\n end of the subsequences\n Returns\n -------\n reduced: obj\n result of reducing self.operation over the specified range of array elements.\n \"\"\"\n if end is None:\n end = self._capacity\n if end < 0:\n end += self._capacity\n end -= 1\n return self._reduce_helper(start, end, 1, 0, self._capacity - 1)\n\n def __setitem__(self, idx, val):\n # index of the leaf\n idx += self._capacity\n self._value[idx] = val\n idx //= 2\n while idx >= 1:\n self._value[idx] = self._operation(\n self._value[2 * idx],\n self._value[2 * idx + 1]\n )\n idx //= 2\n\n def __getitem__(self, idx):\n assert 0 <= idx < self._capacity\n return self._value[self._capacity + idx]\n\n\nclass SumSegmentTree(SegmentTree):\n def __init__(self, capacity):\n super(SumSegmentTree, self).__init__(\n capacity=capacity,\n operation=operator.add,\n neutral_element=0.0\n )\n\n def sum(self, start=0, end=None):\n \"\"\"Returns arr[start] + ... + arr[end]\"\"\"\n return super(SumSegmentTree, self).reduce(start, end)\n\n def find_prefixsum_idx(self, prefixsum):\n \"\"\"Find the highest index `i` in the array such that\n sum(arr[0] + arr[1] + ... + arr[i - i]) <= prefixsum\n if array values are probabilities, this function\n allows to sample indexes according to the discrete\n probability efficiently.\n Parameters\n ----------\n perfixsum: float\n upperbound on the sum of array prefix\n Returns\n -------\n idx: int\n highest index satisfying the prefixsum constraint\n \"\"\"\n assert 0 <= prefixsum <= self.sum() + 1e-5\n idx = 1\n while idx < self._capacity: # while non-leaf\n if self._value[2 * idx] > prefixsum:\n idx = 2 * idx\n else:\n prefixsum -= self._value[2 * idx]\n idx = 2 * idx + 1\n return idx - self._capacity\n\n\nclass MinSegmentTree(SegmentTree):\n def __init__(self, capacity):\n super(MinSegmentTree, self).__init__(\n capacity=capacity,\n operation=min,\n neutral_element=float('inf')\n )\n\n def min(self, start=0, end=None):\n \"\"\"Returns min(arr[start], ..., arr[end])\"\"\"\n\n return super(MinSegmentTree, self).reduce(start, end)\n\nclass ReplayBuffer(object):\n def __init__(self, size):\n \"\"\"Create Replay buffer.\n Parameters\n ----------\n size: int\n Max number of transitions to store in the buffer. When the buffer\n overflows the old memories are dropped.\n \"\"\"\n self._storage = []\n self._maxsize = size\n self._next_idx = 0\n\n def __len__(self):\n return len(self._storage)\n\n def add(self, obs_t, action, reward, obs_tp1, done):\n data = (obs_t, action, reward, obs_tp1, done)\n\n if self._next_idx >= len(self._storage):\n self._storage.append(data)\n else:\n self._storage[self._next_idx] = data\n self._next_idx = (self._next_idx + 1) % self._maxsize\n\n def _encode_sample(self, idxes):\n obses_t, actions, rewards, obses_tp1, dones = [], [], [], [], []\n for i in idxes:\n data = self._storage[i]\n obs_t, action, reward, obs_tp1, done = data\n obses_t.append(np.array(obs_t, copy=False))\n actions.append(np.array(action, copy=False))\n rewards.append(reward)\n obses_tp1.append(np.array(obs_tp1, copy=False))\n dones.append(done)\n return np.array(obses_t), np.array(actions), np.array(rewards), np.array(obses_tp1), np.array(dones)\n\n def sample(self, batch_size):\n \"\"\"Sample a batch of experiences.\n Parameters\n ----------\n batch_size: int\n How many transitions to sample.\n Returns\n -------\n obs_batch: np.array\n batch of observations\n act_batch: np.array\n batch of actions executed given obs_batch\n rew_batch: np.array\n rewards received as results of executing act_batch\n next_obs_batch: np.array\n next set of observations seen after executing act_batch\n done_mask: np.array\n done_mask[i] = 1 if executing act_batch[i] resulted in\n the end of an episode and 0 otherwise.\n \"\"\"\n idxes = [random.randint(0, len(self._storage) - 1) for _ in range(batch_size)]\n return self._encode_sample(idxes)\n\n\nclass PrioritizedReplayBuffer(ReplayBuffer):\n def __init__(self, size, alpha):\n \"\"\"Create Prioritized Replay buffer.\n Parameters\n ----------\n size: int\n Max number of transitions to store in the buffer. When the buffer\n overflows the old memories are dropped.\n alpha: float\n how much prioritization is used\n (0 - no prioritization, 1 - full prioritization)\n See Also\n --------\n ReplayBuffer.__init__\n \"\"\"\n super(PrioritizedReplayBuffer, self).__init__(size)\n assert alpha >= 0\n self._alpha = alpha\n\n it_capacity = 1\n while it_capacity < size:\n it_capacity *= 2\n\n self._it_sum = SumSegmentTree(it_capacity)\n self._it_min = MinSegmentTree(it_capacity)\n self._max_priority = 1.0\n\n def add(self, *args, **kwargs):\n \"\"\"See ReplayBuffer.store_effect\"\"\"\n idx = self._next_idx\n super().add(*args, **kwargs)\n self._it_sum[idx] = self._max_priority ** self._alpha\n self._it_min[idx] = self._max_priority ** self._alpha\n\n def _sample_proportional(self, batch_size):\n res = []\n p_total = self._it_sum.sum(0, len(self._storage) - 1)\n every_range_len = p_total / batch_size\n for i in range(batch_size):\n mass = random.random() * every_range_len + i * every_range_len\n idx = self._it_sum.find_prefixsum_idx(mass)\n res.append(idx)\n return res\n\n def sample(self, batch_size, beta):\n \"\"\"Sample a batch of experiences.\n compared to ReplayBuffer.sample\n it also returns importance weights and idxes\n of sampled experiences.\n Parameters\n ----------\n batch_size: int\n How many transitions to sample.\n beta: float\n To what degree to use importance weights\n (0 - no corrections, 1 - full correction)\n Returns\n -------\n obs_batch: np.array\n batch of observations\n act_batch: np.array\n batch of actions executed given obs_batch\n rew_batch: np.array\n rewards received as results of executing act_batch\n next_obs_batch: np.array\n next set of observations seen after executing act_batch\n done_mask: np.array\n done_mask[i] = 1 if executing act_batch[i] resulted in\n the end of an episode and 0 otherwise.\n weights: np.array\n Array of shape (batch_size,) and dtype np.float32\n denoting importance weight of each sampled transition\n idxes: np.array\n Array of shape (batch_size,) and dtype np.int32\n idexes in buffer of sampled experiences\n \"\"\"\n assert beta > 0\n\n idxes = self._sample_proportional(batch_size)\n\n weights = []\n p_min = self._it_min.min() / self._it_sum.sum()\n max_weight = (p_min * len(self._storage)) ** (-beta)\n\n for idx in idxes:\n p_sample = self._it_sum[idx] / self._it_sum.sum()\n weight = (p_sample * len(self._storage)) ** (-beta)\n weights.append(weight / max_weight)\n weights = np.array(weights)\n encoded_sample = self._encode_sample(idxes)\n return tuple(list(encoded_sample) + [weights, idxes])\n\n def update_priorities(self, idxes, priorities):\n \"\"\"Update priorities of sampled transitions.\n sets priority of transition at index idxes[i] in buffer\n to priorities[i].\n Parameters\n ----------\n idxes: [int]\n List of idxes of sampled transitions\n priorities: [float]\n List of updated priorities corresponding to\n transitions at the sampled idxes denoted by\n variable `idxes`.\n \"\"\"\n assert len(idxes) == len(priorities)\n for idx, priority in zip(idxes, priorities):\n assert priority > 0\n assert 0 <= idx < len(self._storage)\n self._it_sum[idx] = priority ** self._alpha\n self._it_min[idx] = priority ** self._alpha\n\n self._max_priority = max(self._max_priority, priority)\n\n def add_with_priority(self, priority, *args, **kwargs):\n \"\"\"See ReplayBuffer.store_effect\"\"\"\n idx = self._next_idx\n super().add(*args, **kwargs)\n self._it_sum[idx] = priority ** self._alpha\n self._it_min[idx] = priority ** self._alpha\n\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.nn.functional as F\nfrom torch.distributions.categorical import Categorical\nfrom torch.utils.tensorboard import SummaryWriter\n\nimport argparse\nfrom distutils.util import strtobool\nimport numpy as np\nimport gym\nfrom gym.wrappers import TimeLimit, Monitor\nimport pybullet_envs\nfrom gym.spaces import Discrete, Box, MultiBinary, MultiDiscrete, Space\nimport time\nimport random\nimport os\nfrom stable_baselines3.common.atari_wrappers import (\n NoopResetEnv, MaxAndSkipEnv, EpisodicLifeEnv, FireResetEnv, WarpFrame, ClipRewardEnv)\nfrom stable_baselines3.common.vec_env import DummyVecEnv, SubprocVecEnv, VecEnvWrapper\nfrom stable_baselines3.common.vec_env import VecFrameStack\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description='DQN agent')\n # Common arguments\n parser.add_argument('--exp-name', type=str, default=os.path.basename(__file__).rstrip(\".py\"),\n help='the name of this experiment')\n parser.add_argument('--gym-id', type=str, default=\"BreakoutNoFrameskip-v4\",\n help='the id of the gym environment')\n parser.add_argument('--learning-rate', type=float, default=1e-4,\n help='the learning rate of the optimizer')\n parser.add_argument('--seed', type=int, default=2,\n help='seed of the experiment')\n parser.add_argument('--total-timesteps', type=int, default=10000000,\n help='total timesteps of the experiments')\n parser.add_argument('--torch-deterministic', type=lambda x:bool(strtobool(x)), default=True, nargs='?', const=True,\n help='if toggled, `torch.backends.cudnn.deterministic=False`')\n parser.add_argument('--cuda', type=lambda x:bool(strtobool(x)), default=True, nargs='?', const=True,\n help='if toggled, cuda will not be enabled by default')\n parser.add_argument('--prod-mode', type=lambda x:bool(strtobool(x)), default=False, nargs='?', const=True,\n help='run the script in production mode and use wandb to log outputs')\n parser.add_argument('--capture-video', type=lambda x:bool(strtobool(x)), default=False, nargs='?', const=True,\n help='weather to capture videos of the agent performances (check out `videos` folder)')\n parser.add_argument('--wandb-project-name', type=str, default=\"cleanRL\",\n help=\"the wandb's project name\")\n parser.add_argument('--wandb-entity', type=str, default=None,\n help=\"the entity (team) of wandb's project\")\n \n # Algorithm specific arguments\n parser.add_argument('--num-envs', type=int, default=8,\n help='the number of parallel game environment')\n parser.add_argument('--buffer-size', type=int, default=1000000,\n help='the replay memory buffer size')\n parser.add_argument('--pr-alpha', type=float, default=0.6,\n help='alpha parameter for prioritized replay buffer')\n parser.add_argument('--pr-beta0', type=float, default=0.4,\n help='initial value of beta for prioritized replay buffer')\n parser.add_argument('--pr-eps', type=float, default=1e-6,\n help='epsilon to add to the TD errors when updating priorities.')\n parser.add_argument('--gamma', type=float, default=0.99,\n help='the discount factor gamma')\n parser.add_argument('--target-network-frequency', type=int, default=1000,\n help=\"the timesteps it takes to update the target network\")\n parser.add_argument('--max-grad-norm', type=float, default=0.5,\n help='the maximum norm for the gradient clipping')\n parser.add_argument('--batch-size', type=int, default=32,\n help=\"the batch size of sample from the reply memory\")\n parser.add_argument('--start-e', type=float, default=1.,\n help=\"the starting epsilon for exploration\")\n parser.add_argument('--end-e', type=float, default=0.02,\n help=\"the ending epsilon for exploration\")\n parser.add_argument('--exploration-fraction', type=float, default=0.10,\n help=\"the fraction of `total-timesteps` it takes from start-e to go end-e\")\n parser.add_argument('--learning-starts', type=int, default=80000,\n help=\"timestep to start learning\")\n parser.add_argument('--train-frequency', type=int, default=4,\n help=\"the frequency of training\")\n args = parser.parse_args()\n if not args.seed:\n args.seed = int(time.time())\n\n# TRY NOT TO MODIFY: setup the environment\nexperiment_name = f\"{args.gym_id}__{args.exp_name}__{args.seed}__{int(time.time())}\"\nwriter = SummaryWriter(f\"runs/{experiment_name}\")\nwriter.add_text('hyperparameters', \"|param|value|\\n|-|-|\\n%s\" % (\n '\\n'.join([f\"|{key}|{value}|\" for key, value in vars(args).items()])))\nif args.prod_mode:\n import wandb\n wandb.init(project=args.wandb_project_name, entity=args.wandb_entity, sync_tensorboard=True, config=vars(args), name=experiment_name, monitor_gym=True, save_code=True)\n writer = SummaryWriter(f\"/tmp/{experiment_name}\")\n\n# TRY NOT TO MODIFY: seeding\ndevice = torch.device('cuda' if torch.cuda.is_available() and args.cuda else 'cpu')\nrandom.seed(args.seed)\nnp.random.seed(args.seed)\ntorch.manual_seed(args.seed)\ntorch.backends.cudnn.deterministic = args.torch_deterministic\ndef make_env(gym_id, seed, idx):\n def thunk():\n env = gym.make(gym_id)\n env = wrap_atari(env)\n env = gym.wrappers.RecordEpisodeStatistics(env) # records episode reward in `info['episode']['r']`\n env = wrap_deepmind(\n env,\n clip_rewards=True,\n frame_stack=True,\n scale=False,\n )\n env.seed(args.seed)\n env.action_space.seed(args.seed)\n env.observation_space.seed(args.seed)\n random.seed(args.seed)\n return env\n return thunk\nenvs = DummyVecEnv([make_env(args.gym_id, args.seed+i, i) for i in range(args.num_envs)])\nassert isinstance(envs.action_space, Discrete), \"only discrete action space is supported\"\n\n# ALGO LOGIC: initialize agent here:\nclass Scale(nn.Module):\n def __init__(self, scale):\n super().__init__()\n self.scale = scale\n\n def forward(self, x):\n return x * self.scale\nclass QNetwork(nn.Module):\n def __init__(self, env, frames=4):\n super(QNetwork, self).__init__()\n self.network = nn.Sequential(\n Scale(1/255),\n nn.Conv2d(frames, 32, 8, stride=4),\n nn.ReLU(),\n nn.Conv2d(32, 64, 4, stride=2),\n nn.ReLU(),\n nn.Conv2d(64, 64, 3, stride=1),\n nn.ReLU(),\n nn.Flatten(),\n nn.Linear(3136, 512),\n nn.ReLU(),\n nn.Linear(512, env.action_space.n)\n )\n\n def forward(self, x):\n return self.network(x)\n\ndef linear_schedule(start_e: float, end_e: float, duration: int, t: int):\n slope = (end_e - start_e) / duration\n return max(slope * t + start_e, end_e)\n\nrb = PrioritizedReplayBuffer(args.buffer_size, args.pr_alpha)\nq_network = QNetwork(envs).to(device)\ntarget_network = QNetwork(envs).to(device)\ntarget_network.load_state_dict(q_network.state_dict())\noptimizer = optim.Adam(q_network.parameters(), lr=args.learning_rate)\nloss_fn = nn.MSELoss()\nprint(device.__repr__())\nprint(q_network)\n\n# TRY NOT TO MODIFY: start the game\n# c stands for combined\nc_obs = np.zeros((envs.num_envs+args.batch_size,)+envs.observation_space.shape, dtype=np.float32)\nc_actions = np.zeros((envs.num_envs+args.batch_size,), dtype=np.float32)\nc_rewards = np.zeros((envs.num_envs+args.batch_size,), dtype=np.float32)\nc_next_obses = np.zeros((envs.num_envs+args.batch_size,)+envs.observation_space.shape, dtype=np.float32)\nc_dones = np.zeros((envs.num_envs+args.batch_size,), dtype=np.float32)\n\nupdate_step = 0\nobs = envs.reset()\nfor global_step in range(0, args.total_timesteps, args.num_envs):\n update_step += 1\n epsilon = linear_schedule(args.start_e, args.end_e, args.exploration_fraction*args.total_timesteps, global_step)\n beta = linear_schedule(args.pr_beta0, 1.0, args.total_timesteps, global_step)\n \n if global_step < args.learning_starts:\n action = [envs.action_space.sample() for _ in range(envs.num_envs)]\n next_obs, reward, done, infos = envs.step(action)\n # TRY NOT TO MODIFY: record rewards for plotting purposes\n for info in infos:\n if 'episode' in info.keys():\n print(f\"global_step={global_step}, episode_reward={info['episode']['r']}\")\n writer.add_scalar(\"charts/episode_reward\", info['episode']['r'], global_step)\n writer.add_scalar(\"charts/epsilon\", epsilon, global_step)\n break\n for o, a, r, n, d in zip(obs, action, reward, next_obs, done):\n rb.add(o, a, r, n, d)\n obs = next_obs\n continue\n\n # batch the sampled obs and the environment obs together to make it faster\n experience = rb.sample(args.batch_size, beta=beta)\n (s_obs, s_actions, s_rewards, s_next_obses, s_dones, s_weights, s_batch_idxes) = experience\n c_obs[:args.batch_size], c_obs[args.batch_size:] = s_obs, obs\n logits = q_network.forward(torch.from_numpy(c_obs).to(device))\n _, env_obs_logits = logits[:args.batch_size], logits[args.batch_size:]\n \n # env step\n action = torch.argmax(env_obs_logits, dim=1)\n random_action = torch.randint(0, envs.action_space.n, (envs.num_envs,), device=device)\n random_action_flag = torch.rand(envs.num_envs, device=device) > epsilon\n action = torch.where(random_action_flag, action, random_action)\n cpu_action = action.cpu().numpy()\n next_obs, reward, done, infos = envs.step(cpu_action)\n\n\n c_actions[:args.batch_size], c_actions[args.batch_size:] = s_actions, cpu_action\n c_rewards[:args.batch_size], c_rewards[args.batch_size:] = s_rewards, reward\n c_next_obses[:args.batch_size], c_next_obses[args.batch_size:] = s_next_obses, next_obs\n c_dones[:args.batch_size], c_dones[args.batch_size:] = s_dones, done\n\n with torch.no_grad():\n # target_max = torch.max(target_network.forward(s_next_obses), dim=1)[0]\n temp_c_next_obses = torch.from_numpy(c_next_obses).to(device)\n current_value = q_network.forward(temp_c_next_obses)\n target_value = target_network.forward(temp_c_next_obses)\n target_max = target_value.gather(1, torch.max(current_value, 1)[1].unsqueeze(1)).squeeze(1)\n td_target = torch.from_numpy(c_rewards).to(device) + args.gamma * target_max * (1 - torch.Tensor(c_dones).to(device))\n\n old_val = logits.gather(1, torch.LongTensor(c_actions).view(-1,1).to(device)).squeeze()\n td_errors = td_target - old_val\n \n loss = (td_errors[:args.batch_size] ** 2).mean()\n writer.add_scalar(\"losses/td_loss\", loss, global_step)\n if update_step % args.train_frequency == 0:\n \n # update the weights in the prioritized replay\n new_priorities = np.abs(td_errors[:args.batch_size].tolist()) + args.pr_eps\n rb.update_priorities(s_batch_idxes, new_priorities)\n \n # optimize the midel\n optimizer.zero_grad()\n loss.backward()\n nn.utils.clip_grad_norm_(list(q_network.parameters()), args.max_grad_norm)\n optimizer.step()\n \n # update the target network\n if update_step % args.target_network_frequency == 0:\n print(\"updated\")\n target_network.load_state_dict(q_network.state_dict())\n\n # TRY NOT TO MODIFY: record rewards for plotting purposes\n for info in infos:\n if 'episode' in info.keys():\n print(f\"global_step={global_step}, episode_reward={info['episode']['r']}\")\n writer.add_scalar(\"charts/episode_reward\", info['episode']['r'], global_step)\n writer.add_scalar(\"charts/epsilon\", epsilon, global_step)\n break\n\n for p, o, a, r, n, d in zip(np.abs(td_errors[args.batch_size:].tolist()), obs, cpu_action, reward, next_obs, done):\n rb.add_with_priority(p, o, a, r, n, d)\n obs = next_obs\n\nenvs.close()\nwriter.close()" ]
[ [ "torch.nn.Linear", "numpy.sign", "torch.cuda.is_available", "torch.LongTensor", "torch.where", "numpy.concatenate", "torch.manual_seed", "torch.randint", "numpy.transpose", "torch.Tensor", "torch.utils.tensorboard.SummaryWriter", "numpy.expand_dims", "torch.nn.Flatten", "numpy.array", "numpy.zeros", "torch.max", "torch.nn.ReLU", "torch.nn.Conv2d", "torch.argmax", "torch.rand", "torch.nn.MSELoss", "numpy.random.seed", "torch.no_grad", "torch.from_numpy" ] ]
TOPO-EPFL/DDLoc
[ "120eaf0de08609d10b17ceb3a78523d062040924" ]
[ "training/train_initial_coord_regressor_C.py" ]
[ "import os, time, sys\nimport math\nimport torch\nfrom torch.utils.data import Dataset, DataLoader\nimport torch.nn as nn\nimport torch.optim as optim\nfrom torch.optim import lr_scheduler \nimport torch.nn.functional as F\nfrom torch.autograd import Variable\n\nimport torchvision\nfrom torchvision import datasets, models, transforms\nfrom torchvision.utils import make_grid\nfrom tensorboardX import SummaryWriter\n\nfrom models.depth_generator_networks import _UNetGenerator, init_weights, _UNet_coord_down_8_skip_layer, _UNet_coord_down_8_skip_layer_ft\n\nfrom utils.metrics import *\nfrom utils.image_pool import ImagePool\n\nfrom training.base_model import set_requires_grad, base_model\n\nimport warnings # ignore warnings\nwarnings.filterwarnings(\"ignore\")\n\n\ndef get_pixel_grid(SUBSAMPLE):\n \"\"\"\n Generate grid of target reprojection pixel positions (tensor)\n \"\"\"\n pixel_grid = torch.zeros((2,\n math.ceil(1080 / SUBSAMPLE),\n # 1200px is max limit of image size, increase if needed\n math.ceil(1080 / SUBSAMPLE)))\n\n for x in range(0, pixel_grid.size(2)):\n for y in range(0, pixel_grid.size(1)):\n pixel_grid[0, y, x] = x * SUBSAMPLE + SUBSAMPLE / 2\n pixel_grid[1, y, x] = y * SUBSAMPLE + SUBSAMPLE / 2\n\n pixel_grid = pixel_grid.cuda()\n return pixel_grid\n\ndef get_cam_mat(width, height, focal_length):\n \"\"\"\n Get intrinsic camera matrix (tensor)\n \"\"\"\n cam_mat = torch.eye(3)\n cam_mat[0, 0] = focal_length\n cam_mat[1, 1] = focal_length\n cam_mat[0, 2] = width / 2\n cam_mat[1, 2] = height / 2\n cam_mat = cam_mat.cuda()\n return cam_mat\n\ndef coords_world_to_cam(scene_coords, gt_coords, gt_poses):\n \"\"\"\n Transform the scene coordinates to camera coordinates.\n @param scene_coords [B, 3, N] Predicted scene coords tensor.\n @param gt_coords [B, 3, N] Ground-truth scene coords tensor.\n @param gt_poses [B, 4, 4] cam-to-world matrix.\n @return camera_coords [B, 3, N] camera coords tensor corresponding to scene_coords.\n @return target_camera_coords [B, 3, N] camera coords tensor corresponding to gt_coords.\n \"\"\"\n gt_pose_inv = gt_poses.inverse()[:, 0:3, :] # [B, 3, 4], world to camera matrix\n ones = torch.ones((scene_coords.size(0), 1, scene_coords.size(2))).cuda()\n\n scene_coords_ = torch.cat([scene_coords, ones], dim=1) # [B, 4, N]\n gt_coords_ = torch.cat([gt_coords, ones], dim=1) # [B, 4, N]\n\n camera_coords = torch.bmm(gt_pose_inv, scene_coords_) # [B, 3, N] = [B, 3, 4] * [B, 4, N]\n target_camera_coords = torch.bmm(gt_pose_inv, gt_coords_) # [B, 3, N] = [B, 3, 4] * [B, 4, N]\n\n return camera_coords, target_camera_coords\n\ndef pick_valid_points(coord_input, nodata_value, boolean=False):\n \"\"\"\n Pick valid 3d points from provided ground-truth labels.\n @param coord_input [B, C, N] or [C, N] tensor for 3D labels such as scene coordinates or depth.\n @param nodata_value Scalar to indicate NODATA element of ground truth 3D labels.\n @param boolean Return boolean variable or explicit index.\n @return val_points [B, N] or [N, ] Boolean tensor or valid points index.\n \"\"\"\n batch_mode = True\n if len(coord_input.shape) == 2:\n # coord_input shape is [C, N], let's make it compatible\n batch_mode = False\n coord_input = coord_input.unsqueeze(0) # [B, C, N], with B = 1\n\n val_points = torch.sum(coord_input == nodata_value, dim=1) == 0 # [B, N]\n val_points = val_points.to(coord_input.device)\n if not batch_mode:\n val_points = val_points.squeeze(0) # [N, ]\n if boolean:\n pass\n else:\n val_points = torch.nonzero(val_points, as_tuple=True) # a tuple for rows and columns indices\n return val_points\n\ndef check_constraints(camera_coords, reproj_error, cam_coords_reg_error, mask_gt_coords_nodata,\n min_depth = 0.1, max_reproj_error = 10., max_coords_reg_error = 50.0):\n\t\"\"\"\n\tCheck constraints on network prediction.\n\t@param camera_coords [B, 3, N] tensor for camera coordinates.\n\t@param reproj_error [B, N] tensor for reprojection errors.\n\t@param cam_coords_reg_error [B, N] tensor for scene coordinate regression raw errors including invalid points.\n\t@param mask_gt_coords_nodata [B, N] tensor indicating points w/o valid scene coords labels.\n\t@param min_depth Scalar, threshold of minimum depth before camera panel in meter.\n\t@param max_reproj_error Scalar, threshold of maximum reprojection error in pixel.\n\t@param max_coords_reg_error Scalar, threshold of maximum scene coords regression error in meter.\n\t@return valid_sc [B, N] Pixels w/ valid scene coords prediction, goes for reprojection error.\n\t\"\"\"\n\t# check predicted scene coordinate for various constraints\n\tinvalid_min_depth = camera_coords[:, 2] < min_depth # [B, N], behind or too close to camera plane\n\tinvalid_repro = reproj_error > max_reproj_error # [B, N], very large reprojection errors\n\n\t# check for additional constraints regarding ground truth scene coordinates\n\tinvalid_gt_distance = cam_coords_reg_error > max_coords_reg_error # [B, N] too far from ground truth scene coordinates\n\tinvalid_gt_distance[mask_gt_coords_nodata] = 0 # [B, N], filter out unknown ground truth scene coordinates\n\n\t# print('mean reprojection error',reproj_error.mean())\n\t# print('1+2',torch.sum((invalid_min_depth + invalid_repro) == 0,dim=1))\n\t# print('2+3',torch.sum((invalid_repro + invalid_gt_distance) == 0,dim=1))\n\t# print('1+3',torch.sum((invalid_min_depth + invalid_gt_distance) == 0,dim=1))\n\t# print('1+2+3',torch.sum((invalid_min_depth + invalid_repro + invalid_gt_distance) == 0,dim=1))\n\n\t# combine all constraints\n\tvalid_sc = (invalid_min_depth + invalid_repro + invalid_gt_distance) == 0 # [B, N]\n\n\treturn valid_sc\n\ndef get_repro_err(camera_coords, cam_mat, pixel_grid_crop, min_depth=0.1):\n \"\"\"\n Get reprojection error for each pixel.\n @param camera_coords [B, 3, N] tensor for camera coordinates.\n @param cam_mat [3, 3] tensor for intrinsic camera matrix.\n @param pixel_grid_crop [2, N] tensor for pixel grid.\n @param min_depth Scalar for minimum reprojected depth.\n @return reprojection_error [B, N] tensor for reprojection error in pixel.\n \"\"\"\n batch_size = camera_coords.size(0)\n reprojection_error = torch.bmm(cam_mat.expand(batch_size, -1, -1), camera_coords) # [B, 3, H_ds*W_ds]\n reprojection_error[:, 2].clamp_(min=min_depth) # avoid division by zero\n reprojection_error = reprojection_error[:, 0:2] / reprojection_error[:, 2:] # [B, 2, H_ds*W_ds]\n\n reprojection_error = reprojection_error - pixel_grid_crop[None, :, :]\n reprojection_error = reprojection_error.norm(p=2, dim=1).clamp(min=1.e-7) # [B, H_ds*W*ds]\n return reprojection_error\n\nclass train_initial_coord_regressor_C(base_model):\n\tdef __init__(self, args, dataloaders_xLabels_joint):\n\t\tsuper(train_initial_coord_regressor_C, self).__init__(args)\n\t\tself._initialize_training()\n\t\t\n\t\tself.dataloaders_xLabels_joint = dataloaders_xLabels_joint\n\n\t\tif args.data_augment:\n\t\t\tself.coordRegressor = _UNet_coord_down_8_skip_layer_ft(input_nc = 3, output_nc = 3, norm='instance')\n\t\telse:\n\t\t\tself.coordRegressor = _UNet_coord_down_8_skip_layer(input_nc = 3, output_nc = 3, norm='instance')\n\t\t\n\t\tself.model_name = ['coordRegressor']\n\t\tself.L1loss = nn.L1Loss()\n\n\t\t######\n\t\tself.softclamp = args.softclamp\n\t\tself.hardclamp = args.hardclamp\n\t\tself.isdownsample = args.isdownsample # True\n\t\tif self.isdownsample:\n\t\t\tself.pixel_grid = get_pixel_grid(8)\n\t\telse:\n\t\t\tself.pixel_grid = get_pixel_grid(1)\n\t\tself.start_epoch = args.start_epoch\n\t\tself.data_augment = args.data_augment\n\n\n\t\tif self.isTrain:\n\t\t\tself.coord_optimizer = optim.Adam(self.coordRegressor.parameters(), lr=self.task_lr)\n\t\t\tself.optim_name = ['coord_optimizer']\n\t\t\tself._get_scheduler(optim_type='linear',constant_ratio = 0.4)\n\t\t\tself._initialize_networks()\n\n\t\t\tif self.start_epoch > 0:\n\t\t\t\tself._load_models(self.model_name, self.start_epoch, isTrain=True,model_path=self.save_dir)\n\t\t\t\t# take step in optimizer\n\t\t\t\tfor scheduler in self.scheduler_list:\n\t\t\t\t\tfor _ in range(self.start_epoch):\n\t\t\t\t\t\tscheduler.step()\n\t\t\t\tfor optimizer in self.optim_name:\t\t\t\t\n\t\t\t\t\tlr = getattr(self, optimizer).param_groups[0]['lr']\n\t\t\t\t\tlr_update = 'Start with epoch {}/{} Optimizaer: {} learning rate = {:.7f} '.format(\n\t\t\t\t\t\tself.start_epoch+1, self.total_epoch_num, optimizer, lr)\n\t\t\t\t\tprint(lr_update)\n\n\t\tself.EVAL_best_loss = float('inf')\n\t\tself.EVAL_best_model_epoch = 0\n\t\tself.EVAL_all_results = {}\n\n\t\tself._check_parallel()\n\n\tdef _get_project_name(self):\n\t\treturn 'train_initial_coord_regressor_C'\n\n\tdef _initialize_networks(self):\n\t\tfor name in self.model_name:\n\t\t\tgetattr(self, name).train().to(self.device)\n\t\t\tinit_weights(getattr(self, name), net_name=name, init_type='normal', gain=0.02)\n\n\tdef compute_coord_loss(self, image, coord_gt, gt_poses, focal_length, size, size_adapt=False, nodata_value=-1.):\n\t\t\n\t\tif size_adapt:\n\t\t\t# feed with size information when data augmentation is activated\n\t\t\t# size = [[img_h, img_w], [coords_h, coords_w]]\n\t\t\tcoord_pred = self.coordRegressor(image, size[0][0], size[0][1], size[1][0], size[1][1])\n\t\telse:\n\t\t\tcoord_pred = self.coordRegressor(image)\n\t\t\n\t\tcam_mat = get_cam_mat(image.size(3), image.size(2), focal_length)\n\t\tpixel_grid_crop = self.pixel_grid[:, 0:coord_gt.size(2), 0:coord_gt.size(3)].clone().view(2, -1)\n\n\t\tcoord_pred = coord_pred.view(coord_pred.size(0), 3, -1) # [B, 3, H_ds*W_ds]\n\t\tcoord_gt = coord_gt.view(coord_gt.size(0), 3, -1) # [B, 3, H_ds*W_ds]\n\n\t\tcamera_coords, target_camera_coords = coords_world_to_cam(coord_pred, coord_gt, gt_poses) # [B, 3, H_ds*W_ds]*2\n\t\tcamera_coords_reg_error = torch.norm(camera_coords - target_camera_coords, dim=1, p=2) # [B, H_ds*W_ds]\n\n\t\treprojection_error = get_repro_err(camera_coords, cam_mat, pixel_grid_crop) # [B, H_ds*W_ds]\n\n\t\t# check for invalid/unknown ground truth scene coordinates\n\t\tmask_gt_coords_valdata = pick_valid_points(coord_gt[:, :3, :], nodata_value, boolean=True) # [B, H_ds*W_ds]\n\t\tmask_gt_coords_nodata = torch.logical_not(mask_gt_coords_valdata) # [B, H_ds*W_ds]\n\n\t\t# [B, H_ds*W_ds], warning: it is not coupled with mask_gt_coords_valdata!\n\t\tvalid_scene_coordinates = check_constraints(camera_coords, reprojection_error, camera_coords_reg_error,\n\t\t\t\t\t\t\t\t\t\t\t\t\tmask_gt_coords_nodata, max_reproj_error = self.hardclamp) \n\t\t\n\t\tinvalid_scene_coordinates = torch.logical_not(valid_scene_coordinates) # [B, H_ds*W_ds]\n\t\tnum_valid_sc = valid_scene_coordinates.sum(dim=1).cpu().numpy() # [B]\n\t\tnum_pixels_batch = valid_scene_coordinates.numel() # number of all pixels in the batch\n\t\tnum_pixels_instance = valid_scene_coordinates[0].numel() # number of pixels in one data point\n\n\t\tvalid_pred_rate = float(num_valid_sc.sum() / num_pixels_batch) # scalar\n\t\t# assemble loss\n\t\tloss = 0\n\n\t\t\"\"\"Reprojection error for all valid scene coordinates\"\"\"\n\t\tif num_valid_sc.sum() > 0:\n\t\t\t# calculate soft clamped l1 loss of reprojection error\n\t\t\treprojection_error = reprojection_error * valid_scene_coordinates # [B, H_ds*W_ds]\n\t\t\tloss_l1 = torch.sum(reprojection_error * (reprojection_error <= self.softclamp), dim=1).clamp(min=1.e-7) # [B]\n\t\t\tloss_sqrt = reprojection_error * (reprojection_error > self.softclamp) # [B, H_ds*W_ds]\n\t\t\tloss_sqrt = torch.sum(torch.sqrt(self.softclamp * loss_sqrt + 1.e-7), dim=1).clamp(min=1.e-7) # [B]\n\t\t\tloss += loss_l1 + loss_sqrt # [B]\n\n\t\t\"\"\"3D distance loss for all invalid scene coordinates where the ground truth is known\"\"\"\n\t\tinvalid_scene_coordinates[mask_gt_coords_nodata] = 0 # filter out pixels w/o valid labels\n\n\t\tloss_3d = torch.sum(camera_coords_reg_error * invalid_scene_coordinates,\n\t\t\t\t\t\t\tdim=1) # [B], applied to invalid pixels w/ valid labels\n\t\tloss += loss_3d\n\t\t\n\t\tloss = loss.sum() # scalar, mean over each pixels within the batch\t\n\t\tloss /= num_pixels_batch\n\n\t\tloss_3d = loss_3d.sum()\n\t\tloss_3d /= num_pixels_batch\n\n\t\treturn loss, valid_pred_rate\n\n\tdef train(self):\n\t\tphase = 'train'\n\t\tsince = time.time()\n\n\t\ttensorboardX_iter_count = 0\n\t\tfor epoch in range(self.start_epoch,self.total_epoch_num):\n\t\t\tprint('\\nEpoch {}/{}'.format(epoch+1, self.total_epoch_num))\n\t\t\tprint('-' * 10)\n\t\t\tfn = open(self.train_log,'a')\n\t\t\tfn.write('\\nEpoch {}/{}\\n'.format(epoch+1, self.total_epoch_num))\n\t\t\tfn.write('--'*5+'\\n')\n\t\t\tfn.close()\n\n\t\t\tself._set_models_train(['coordRegressor'])\n\t\t\titerCount = 0\n\n\t\t\tsta_list = []\n\t\t\tstart_time = time.time()\n\t\t\tfor sample_dict in self.dataloaders_xLabels_joint:\n\t\t\t\tif self.data_augment:\n\t\t\t\t\timage_real, coord_real, gt_poses_real, focal_length_real, img_h_real, img_w_real, coords_h_real, coords_w_real = sample_dict['real']\n\t\t\t\t\timage_syn, coord_syn, gt_poses_syn, focal_length_syn, img_h_syn, img_w_syn, coords_h_syn, coords_w_syn = sample_dict['syn']\n\t\t\t\t\tsize_real = [[img_h_real, img_w_real], [coords_h_real, coords_w_real]]\n\t\t\t\t\tsize_syn = [[img_h_syn, img_w_syn], [coords_h_syn, coords_w_syn]]\n\t\t\t\telse:\n\t\t\t\t\timage_real, coord_real, gt_poses_real, focal_length_real = sample_dict['real']\n\t\t\t\t\timage_syn, coord_syn, gt_poses_syn, focal_length_syn = sample_dict['syn']\n\t\t\t\t\tsize_real = [[0,0],[0,0]] #useless if no data_aug\n\t\t\t\t\tsize_syn = [[0,0],[0,0]]\n\n\n\t\t\t\tfocal_length_real = float(focal_length_real.view(-1)[0])\n\t\t\t\tfocal_length_syn = float(focal_length_syn.view(-1)[0])\n\n\t\t\t\timage_real = image_real.to(self.device)\n\t\t\t\tcoord_real = coord_real.to(self.device)\n\t\t\t\tgt_poses_real = gt_poses_real.to(self.device)\n\n\t\t\t\timage_syn = image_syn.to(self.device)\n\t\t\t\tcoord_syn = coord_syn.to(self.device)\n\t\t\t\tgt_poses_syn = gt_poses_syn.to(self.device)\n\n\n\t\t\t\twith torch.set_grad_enabled(phase=='train'):\n\t\t\t\t\ttotal_loss = 0.\n\t\t\t\t\tself.coordRegressor.zero_grad()\n\t\t\t\t\treal_coord_loss, valid_rate_real = self.compute_coord_loss(image_real, coord_real, gt_poses_real, focal_length_real,size_real,size_adapt=self.data_augment)\n\t\t\t\t\tsyn_coord_loss, valid_rate_syn = self.compute_coord_loss(image_syn, coord_syn, gt_poses_syn, focal_length_syn,size_syn,size_adapt=self.data_augment)\n\t\t\t\t\ttotal_loss += (real_coord_loss + syn_coord_loss)\n\n\t\t\t\t\ttotal_loss.backward()\n\n\t\t\t\t\tself.coord_optimizer.step()\n\n\t\t\t\tsta_list.append([float(total_loss),float(real_coord_loss),float(valid_rate_real),float(syn_coord_loss),float(valid_rate_syn)])\n\t\t\t\titerCount += 1\n\n\t\t\t\tif iterCount % 5 == 0:\n\t\t\t\t\tloss_summary = '\\t{}/{} total_loss: {:.7f}, real_coord_loss: {:.7f}, syn_coord_loss: {:.7f}'.format(\n\t\t\t\t\t\titerCount, len(self.dataloaders_xLabels_joint), total_loss, real_coord_loss, syn_coord_loss)\n\n\t\t\t\t\tprint(loss_summary)\n\t\t\t\t\tprint('valid prediction rate for real data is {:.7f}, valid prediction rate for syn data is {:.7f}'.format(valid_rate_real, valid_rate_syn))\n\t\t\t\t\tfn = open(self.train_log,'a')\n\t\t\t\t\tfn.write(loss_summary)\n\t\t\t\t\tfn.close()\n\n\t\t\tif self.use_tensorboardX:\t\n\t\t\t\tsta_list = np.mean(sta_list,axis=0)\n\t\t\t\tprint(sta_list)\n\t\t\t\t# add loss values\n\t\t\t\tloss_val_list = [sta_list[0],sta_list[1],sta_list[2],sta_list[3],sta_list[4]]\n\t\t\t\tloss_name_list = ['total_loss', 'real_coord_loss', 'valid_rate_real', 'syn_coord_loss','valid_rate_syn']\n\t\t\t\tself.write_2_tensorboardX(self.train_SummaryWriter, loss_val_list, name=loss_name_list, mode='scalar', count=tensorboardX_iter_count)\n\n\t\t\t\ttensorboardX_iter_count += 1\n\t\t\tprint('\\t average total loss: {:.7f}, real_coord_loss: {:.7f}, valid_rate_real: {:.7f}, syn_coord_loss: {:.7f}, valid_rate_syn: {:.7f}'.format(\n\t\t\t\tsta_list[0],sta_list[1],sta_list[2],sta_list[3],sta_list[4]))\n\t\t\tfn = open(self.train_log,'a')\n\t\t\tfn.write('\\t average total loss: {:.7f}, real_coord_loss: {:.7f}, valid_rate_real: {:.7f}, syn_coord_loss: {:.7f}, valid_rate_syn: {:.7f}'.format(\n\t\t\t\tsta_list[0],sta_list[1],sta_list[2],sta_list[3],sta_list[4]))\n\t\t\tfn.close()\n\t\t\t# take step in optimizer\n\t\t\tfor scheduler in self.scheduler_list:\n\t\t\t\tscheduler.step()\n\t\t\tfor optim in self.optim_name:\t\t\t\t\n\t\t\t\tlr = getattr(self, optim).param_groups[0]['lr']\n\t\t\t\ttime_elapsed = time.time() - start_time\n\t\t\t\tlr_update = 'Epoch {}/{} finished: {} learning rate = {:.7f} time taken: {:.0f}m {:.0f}s'.format(\n\t\t\t\t\tepoch+1, self.total_epoch_num, optim, lr, time_elapsed // 60, time_elapsed % 60)\n\t\t\t\tprint(lr_update)\n\t\t\t\t\n\t\t\t\tfn = open(self.train_log,'a')\n\t\t\t\tfn.write(lr_update)\n\t\t\t\tfn.close()\n\n\t\t\tif (epoch+1) % self.save_steps == 0:\n\t\t\t\tself.save_models(self.model_name, mode=epoch+1)\n\n\t\ttime_elapsed = time.time() - since\n\t\tprint('\\nTraining complete in {:.0f}m {:.0f}s'.format(time_elapsed // 60, time_elapsed % 60))\n\t\t\n\t\tfn = open(self.train_log,'a')\n\t\tfn.write('\\nTraining complete in {:.0f}m {:.0f}s\\n'.format(time_elapsed // 60, time_elapsed % 60))\n\t\tfn.close()\n" ]
[ [ "torch.nonzero", "torch.cat", "torch.sqrt", "torch.norm", "torch.bmm", "torch.nn.L1Loss", "torch.logical_not", "torch.eye", "torch.set_grad_enabled", "torch.sum" ] ]
DongChengdongHangZhou/csv_read_write
[ "e83dc84cc3ee38e1da73367903fec01a0a632b01" ]
[ "csvWriter.py" ]
[ "import csv\r\nimport torch\r\nimport numpy as np\r\nimport tifffile as tiff\r\nimport matplotlib.pyplot as plt\r\nimport pandas as pd\r\nfrom azimuth_integral import GetPSD1D\r\n\r\ndef write_csv():\r\n f = open('fake_fingerprint.csv','w',newline='')\r\n f_mean = open('mean_fake_fingerprint.csv','w',newline='')\r\n writer1 = csv.writer(f)\r\n writer2 = csv.writer(f_mean)\r\n sum = np.zeros((128,))\r\n for i in range(7400):\r\n try:\r\n print(i)\r\n dir = 'trainB/'+str(i)+'_B.tiff'\r\n spec = tiff.imread(dir)\r\n spec = 0.5*spec + 0.5\r\n spec = np.exp(spec*16)-1\r\n res = GetPSD1D(spec)\r\n res = res/res[0]\r\n sum = sum + res\r\n writer1.writerow(res)\r\n except:\r\n print('trainB/'+str(i)+'_B.tiff')\r\n\r\n mean = sum/7400\r\n writer2.writerow(mean)\r\n\r\ndef read_csv():\r\n with open('mean_results.csv','r')as f:\r\n f_csv = csv.reader(f)\r\n for row1 in f_csv:\r\n row1 = [float(i) for i in row1]\r\n print(row1)\r\n\r\n with open('mean_fake_fingerprint.csv','r')as f:\r\n f_csv = csv.reader(f)\r\n for row2 in f_csv:\r\n row2 = [float(i) for i in row2]\r\n print(row2)\r\n\r\n with open('mean_targetData.csv','r')as f:\r\n f_csv = csv.reader(f)\r\n for row3 in f_csv:\r\n row3 = [float(i) for i in row3]\r\n print(row3)\r\n plt.plot(row1,'red')\r\n plt.plot(row2,'blue')\r\n plt.plot(row3,'green')\r\n plt.show()\r\n\r\ndef draw_spec_all():\r\n data_results = pd.read_csv('./results.csv',header=None)\r\n data_results = np.array(data_results)\r\n data_fake_fingerprint = pd.read_csv('./fake_fingerprint.csv',header=None)\r\n data_fake_fingerprint = np.array(data_fake_fingerprint)\r\n data_targetData = pd.read_csv('./targetData.csv',header=None)\r\n data_targetData = np.array(data_targetData)\r\n for i in range(200):\r\n row1,row2,row3 = data_results[i], data_fake_fingerprint[i],data_targetData[i]\r\n row1 = [float(i) for i in row1]\r\n row2 = [float(i) for i in row2]\r\n row3 = [float(i) for i in row3]\r\n plt.plot(row1,'red')\r\n plt.plot(row2,'blue')\r\n plt.plot(row3,'green')\r\n plt.show() \r\n\r\ndef crossEntropyLossValue(tensor1,tensor2):\r\n\r\n '''\r\n you must rewrite your own crossEntropyLoss since\r\n the pytorch version of crossEntropyLoss is\r\n (p(x)*log(q(x))).sum()\r\n but the crossEntropyLoss applied in this paper is\r\n (p(x)*log(q(x))+(1-p(x))*log(1-q(x))).sum()\r\n '''\r\n loss = tensor1*torch.log(tensor2)+(1-tensor1)*torch.log(1-tensor2)\r\n return loss\r\n\r\nif __name__ == '__main__':\r\n draw_spec_all()\r\n\r\n\r\n\r\n" ]
[ [ "numpy.array", "numpy.zeros", "matplotlib.pyplot.plot", "numpy.exp", "torch.log", "matplotlib.pyplot.show", "pandas.read_csv" ] ]
albwrekt/housing_median_cost_prediction
[ "46e96fabf381978cd3074117a46696dfbddd621a" ]
[ "netflix_demo.py" ]
[ "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Nov 23 21:26:42 2020\n\n@author: albwrekt\n\"\"\"\n\nimport os\nimport pandas as pd\nimport numpy as np\nfrom pandas.plotting import scatter_matrix\n\n\n# this dataset will be used to predict the rating of the movie\n\nDATA_PATH = \"../../archive/netflix_titles.csv\"\nINDEX = \"show_id\"\n\n# method for reading in the data\ndef load_data(datapath=DATA_PATH):\n return pd.read_csv(datapath)\n\n# split up the training set and the testing set\ndef split_test_train_set(testset, test_ratio):\n shuffled_indices = np.random.permutation(len(testset))\n test_set_size = int(len(testset) * test_ratio)\n test_indices = shuffled_indices[:test_set_size]\n train_indices = shuffled_indices[test_set_size:]\n return testset.iloc[train_indices], testset.iloc[test_indices]\n\ndef split_numbers_and_categories(dataset, index=INDEX):\n num_list = dataset.copy()\n cat_list = dataset.copy()\n for key in dataset.keys():\n if dataset[key].dtype == 'object':\n num_list.drop(key, axis=1, inplace=True)\n else:\n cat_list.drop(key, axis=1, inplace=True)\n max_data_count = len(dataset[index])\n return max_data_count, num_list, cat_list\n\ndef process_numeric_dataset(num_set):\n num_corr = num_set.corr()\n for key in num_set.keys():\n print(num_corr[key].sort_values(ascending=False))\n print(\"\\n\\n\")\n scatter_matrix(num_set, figsize=(12, 8))\n \ndef process_category_datasets(cat_set):\n print(\"category somethign here\")\n\n# investigate the dataset and display the information to the user\ndef investigate_dataset(data):\n print(data.head())\n print(data.info())\n print(data.describe())\n print(data.keys())\n for key in data.keys():\n value_counts = data[key].value_counts().to_frame()\n print(\"Overall Key:\", key)\n for value in value_counts:\n print(type(value))\n print(value)\n \n \ndataset = load_data()\ntrain_set, test_set = split_test_train_set(dataset, 0.3)\nmax_data_count, num_list, cat_list = split_numbers_and_categories(train_set)\nprocess_numeric_dataset(num_list)\n\n\n\n\n " ]
[ [ "pandas.read_csv", "pandas.plotting.scatter_matrix" ] ]
pranaysy/neurodsp
[ "4236a32335af561f0e10b591b1aecfd7719aec59" ]
[ "neurodsp/burst/dualthresh.py" ]
[ "\"\"\"The dual threshold algorithm for detecting oscillatory bursts in a neural signal.\"\"\"\n\nimport numpy as np\n\nfrom neurodsp.utils.core import get_avg_func\nfrom neurodsp.utils.checks import check_param_options\nfrom neurodsp.utils.decorators import multidim\nfrom neurodsp.timefrequency.hilbert import amp_by_time\n\n###################################################################################################\n###################################################################################################\n\n@multidim()\ndef detect_bursts_dual_threshold(sig, fs, dual_thresh, f_range=None,\n min_n_cycles=3, min_burst_duration=None,\n avg_type='median', magnitude_type='amplitude',\n **filter_kwargs):\n \"\"\"Detect bursts in a signal using the dual threshold algorithm.\n\n Parameters\n ----------\n sig : 1d array\n Time series.\n fs : float\n Sampling rate, in Hz.\n dual_thresh : tuple of (float, float)\n Low and high threshold values for burst detection.\n Units are normalized by the average signal magnitude.\n f_range : tuple of (float, float), optional\n Frequency range, to filter signal to, before running burst detection.\n If f_range is None, then no filtering is applied prior to running burst detection.\n min_n_cycles : float, optional, default: 3\n Minimum burst duration in to keep.\n Only used if `f_range` is defined, and is used as the number of cycles at f_range[0].\n min_burst_duration : float, optional, default: None\n Minimum length of a burst, in seconds. Must be defined if not filtering.\n Only used if `f_range` is not defined, or if `min_n_cycles` is set to None.\n avg_type : {'median', 'mean'}, optional\n Averaging method to use to normalize the magnitude that is used for thresholding.\n magnitude_type : {'amplitude', 'power'}, optional\n Metric of magnitude used for thresholding.\n **filter_kwargs\n Keyword parameters to pass to `filter_signal`.\n\n Returns\n -------\n is_burst : 1d array\n Boolean indication of where bursts are present in the input signal.\n True indicates that a burst was detected at that sample, otherwise False.\n\n Notes\n -----\n The dual-threshold burst detection algorithm was originally proposed in [1]_.\n\n References\n ----------\n .. [1] Feingold, J., Gibson, D. J., DePasquale, B., & Graybiel, A. M. (2015).\n Bursts of beta oscillation differentiate postperformance activity in\n the striatum and motor cortex of monkeys performing movement tasks.\n Proceedings of the National Academy of Sciences, 112(44), 13687–13692.\n DOI: https://doi.org/10.1073/pnas.1517629112\n\n Examples\n --------\n Detect bursts using the dual threshold algorithm:\n\n >>> from neurodsp.sim import sim_combined\n >>> sig = sim_combined(n_seconds=10, fs=500,\n ... components={'sim_synaptic_current': {},\n ... 'sim_bursty_oscillation' : {'freq': 10}},\n ... component_variances=[0.1, 0.9])\n >>> is_burst = detect_bursts_dual_threshold(sig, fs=500, dual_thresh=(1, 2), f_range=(8, 12))\n \"\"\"\n\n if len(dual_thresh) != 2:\n raise ValueError(\"Invalid number of elements in 'dual_thresh' parameter\")\n\n # Compute amplitude time series\n sig_magnitude = amp_by_time(sig, fs, f_range, remove_edges=False, **filter_kwargs)\n\n # Set magnitude as power or amplitude: square if power, leave as is if amplitude\n check_param_options(magnitude_type, 'magnitude_type', ['amplitude', 'power'])\n if magnitude_type == 'power':\n sig_magnitude = sig_magnitude**2\n\n # Calculate normalized magnitude\n sig_magnitude = sig_magnitude / get_avg_func(avg_type)(sig_magnitude)\n\n # Identify time periods of bursting using the 2 thresholds\n is_burst = _dual_threshold_split(sig_magnitude, dual_thresh[1], dual_thresh[0])\n\n # Remove bursts detected that are too short\n # Use a number of cycles defined on the frequency range, if available\n if f_range is not None and min_n_cycles is not None:\n min_burst_samples = int(np.ceil(min_n_cycles * fs / f_range[0]))\n # Otherwise, make sure minimum duration is set, and use that\n else:\n if min_burst_duration is None:\n raise ValueError(\"Minimum burst duration must be defined if not filtering \"\n \"and using a number of cycles threshold.\")\n min_burst_samples = int(np.ceil(min_burst_duration * fs))\n\n is_burst = _rmv_short_periods(is_burst, min_burst_samples)\n\n return is_burst.astype(bool)\n\n\ndef _dual_threshold_split(sig, thresh_hi, thresh_lo):\n \"\"\"Identify periods that are above thresh_lo and have at least one value above thresh_hi.\"\"\"\n\n # Find all values above thresh_hi\n # To avoid bug in later loop, do not allow first or last index to start off as 1\n sig[[0, -1]] = 0\n idx_over_hi = np.where(sig >= thresh_hi)[0]\n\n # Initialize values in identified period\n positive = np.zeros(len(sig))\n positive[idx_over_hi] = 1\n\n # Iteratively test if a value is above thresh_lo if it is not currently in an identified period\n sig_len = len(sig)\n\n for ind in idx_over_hi:\n\n j_down = ind - 1\n if positive[j_down] == 0:\n j_down_done = False\n while j_down_done is False:\n if sig[j_down] >= thresh_lo:\n positive[j_down] = 1\n j_down -= 1\n if j_down < 0:\n j_down_done = True\n else:\n j_down_done = True\n\n j_up = ind + 1\n if positive[j_up] == 0:\n j_up_done = False\n while j_up_done is False:\n if sig[j_up] >= thresh_lo:\n positive[j_up] = 1\n j_up += 1\n if j_up >= sig_len:\n j_up_done = True\n else:\n j_up_done = True\n\n return positive\n\n\ndef _rmv_short_periods(sig, n_samples):\n \"\"\"Remove periods that are equal to 1 for less than n_samples.\"\"\"\n\n if np.sum(sig) == 0:\n return sig\n\n osc_changes = np.diff(1 * sig)\n osc_starts = np.where(osc_changes == 1)[0]\n osc_ends = np.where(osc_changes == -1)[0]\n\n if len(osc_starts) == 0:\n osc_starts = [0]\n if len(osc_ends) == 0:\n osc_ends = [len(osc_changes)]\n\n if osc_ends[0] < osc_starts[0]:\n osc_starts = np.insert(osc_starts, 0, 0)\n if osc_ends[-1] < osc_starts[-1]:\n osc_ends = np.append(osc_ends, len(osc_changes))\n\n osc_length = osc_ends - osc_starts\n osc_starts_long = osc_starts[osc_length >= n_samples]\n osc_ends_long = osc_ends[osc_length >= n_samples]\n\n is_osc = np.zeros(len(sig))\n for ind in range(len(osc_starts_long)):\n is_osc[osc_starts_long[ind]:osc_ends_long[ind]] = 1\n\n return is_osc\n" ]
[ [ "numpy.ceil", "numpy.sum", "numpy.diff", "numpy.where", "numpy.insert" ] ]
DancunManyinsa/netbot
[ "625349e785103eb318dd3302cb5672bf64a410f1" ]
[ "scripts/plot.py" ]
[ "#!/usr/bin/env python3.6\n\n\nimport os\nimport sys\nfrom datetime import datetime\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\n\n\ndef csv_path():\n return os.path.join(os.path.dirname(__file__), os.path.pardir, \"data\", \"metrics.csv\")\n\ndef main():\n last = int(sys.argv[1]) if len(sys.argv) == 2 else 50\n\n metrics_csv = os.path.join(os.path.dirname(__file__), )\n network = pd.read_csv(csv_path())\n\n last = min(last, len(network))\n\n network = network.tail(last)\n network['time_raw'] = network.apply(lambda row: datetime.strptime(row.time, \"%Y-%m-%d %H:%M:%S\"), axis=1)\n network = network.drop(network[network.status == \"offline\"].index)\n\n fig, axes = plt.subplots(3, 1, sharex=True)\n fig.subplots_adjust(hspace=0.1)\n\n fig.suptitle(\"Ping, upload and download for the last {} observations\".format(last))\n\n for i, (metric, unit) in enumerate(zip(['ping', 'upload', 'download'], ['ms', 'Mbps', 'Mbps'])):\n metric_mean = np.mean(network[metric])\n\n axes[i].plot(network['time_raw'], network[metric], label=\"{} ({})\".format(metric, unit))\n axes[i].axhline(y=metric_mean, color='r', linestyle='-', label=\"mean: {:.2f}\".format(metric_mean))\n axes[i].legend(fancybox=True, framealpha=1, shadow=True, borderpad=1, frameon=True)\n axes[i].set(ylabel=metric)\n\n axes[i].set_xlabel(\"Time\")\n\n plt.show()\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "matplotlib.pyplot.show", "numpy.mean", "matplotlib.pyplot.subplots" ] ]
alex-simm/c3
[ "9e36fb04ebdaca2ba59134d7d1775fd6a5b174f0" ]
[ "test/test_two_qubits.py" ]
[ "\"\"\"\nintegration testing module for C1 optimization through two-qubits example\n\"\"\"\n\nimport os\nimport tempfile\nimport copy\nimport pickle\nimport numpy as np\nimport pytest\nfrom numpy.testing import assert_array_almost_equal as almost_equal\n\n# Main C3 objects\nfrom c3.c3objs import Quantity as Qty\nfrom c3.parametermap import ParameterMap as Pmap\nfrom c3.experiment import Experiment as Exp\nfrom c3.model import Model as Mdl\nfrom c3.generator.generator import Generator as Gnr\n\n# Building blocks\nimport c3.generator.devices as devices\nimport c3.signal.pulse as pulse\nimport c3.signal.gates as gates\n\n# Libs and helpers\nimport c3.libraries.algorithms as algorithms\nimport c3.libraries.chip as chip\nimport c3.libraries.envelopes as envelopes\nimport c3.libraries.fidelities as fidelities\nimport c3.libraries.hamiltonians as hamiltonians\nimport c3.libraries.tasks as tasks\n\nfrom c3.optimizers.optimalcontrol import OptimalControl\n\nlogdir = os.path.join(tempfile.TemporaryDirectory().name, \"c3logs\")\n\nqubit_lvls = 3\nfreq_q1 = 5e9\nanhar_q1 = -210e6\nt1_q1 = 27e-6\nt2star_q1 = 39e-6\nqubit_temp = 50e-3\n\nq1 = chip.Qubit(\n name=\"Q1\",\n desc=\"Qubit 1\",\n freq=Qty(value=freq_q1, min_val=4.995e9, max_val=5.005e9, unit=\"Hz 2pi\"),\n anhar=Qty(value=anhar_q1, min_val=-380e6, max_val=-120e6, unit=\"Hz 2pi\"),\n hilbert_dim=qubit_lvls,\n t1=Qty(value=t1_q1, min_val=1e-6, max_val=90e-6, unit=\"s\"),\n t2star=Qty(value=t2star_q1, min_val=10e-6, max_val=90e-3, unit=\"s\"),\n temp=Qty(value=qubit_temp, min_val=0.0, max_val=0.12, unit=\"K\"),\n)\n\nfreq_q2 = 5.6e9\nanhar_q2 = -240e6\nt1_q2 = 23e-6\nt2star_q2 = 31e-6\nq2 = chip.Qubit(\n name=\"Q2\",\n desc=\"Qubit 2\",\n freq=Qty(value=freq_q2, min_val=5.595e9, max_val=5.605e9, unit=\"Hz 2pi\"),\n anhar=Qty(value=anhar_q2, min_val=-380e6, max_val=-120e6, unit=\"Hz 2pi\"),\n hilbert_dim=qubit_lvls,\n t1=Qty(value=t1_q2, min_val=1e-6, max_val=90e-6, unit=\"s\"),\n t2star=Qty(value=t2star_q2, min_val=10e-6, max_val=90e-6, unit=\"s\"),\n temp=Qty(value=qubit_temp, min_val=0.0, max_val=0.12, unit=\"K\"),\n)\n\ncoupling_strength = 20e6\nq1q2 = chip.Coupling(\n name=\"Q1-Q2\",\n desc=\"coupling\",\n comment=\"Coupling qubit 1 to qubit 2\",\n connected=[\"Q1\", \"Q2\"],\n strength=Qty(\n value=coupling_strength, min_val=-1 * 1e3, max_val=200e6, unit=\"Hz 2pi\"\n ),\n hamiltonian_func=hamiltonians.int_XX,\n)\n\n\ndrive = chip.Drive(\n name=\"d1\",\n desc=\"Drive 1\",\n comment=\"Drive line 1 on qubit 1\",\n connected=[\"Q1\"],\n hamiltonian_func=hamiltonians.x_drive,\n)\ndrive2 = chip.Drive(\n name=\"d2\",\n desc=\"Drive 2\",\n comment=\"Drive line 2 on qubit 2\",\n connected=[\"Q2\"],\n hamiltonian_func=hamiltonians.x_drive,\n)\n\nm00_q1 = 0.97 # Prop to read qubit 1 state 0 as 0\nm01_q1 = 0.04 # Prop to read qubit 1 state 0 as 1\nm00_q2 = 0.96 # Prop to read qubit 2 state 0 as 0\nm01_q2 = 0.05 # Prop to read qubit 2 state 0 as 1\none_zeros = np.array([0] * qubit_lvls)\nzero_ones = np.array([1] * qubit_lvls)\none_zeros[0] = 1\nzero_ones[0] = 0\nval1 = one_zeros * m00_q1 + zero_ones * m01_q1\nval2 = one_zeros * m00_q2 + zero_ones * m01_q2\nmin_val = one_zeros * 0.8 + zero_ones * 0.0\nmax_val = one_zeros * 1.0 + zero_ones * 0.2\nconfusion_row1 = Qty(value=val1, min_val=min_val, max_val=max_val, unit=\"\")\nconfusion_row2 = Qty(value=val2, min_val=min_val, max_val=max_val, unit=\"\")\nconf_matrix = tasks.ConfusionMatrix(Q1=confusion_row1, Q2=confusion_row2)\n\ninit_temp = 50e-3\ninit_ground = tasks.InitialiseGround(\n init_temp=Qty(value=init_temp, min_val=-0.001, max_val=0.22, unit=\"K\")\n)\n\nmodel = Mdl(\n [q1, q2], # Individual, self-contained components\n [drive, drive2, q1q2], # Interactions between components\n [conf_matrix, init_ground], # SPAM processing\n)\n\nmodel.set_lindbladian(False)\nmodel.set_dressed(True)\n\nsim_res = 100e9 # Resolution for numerical simulation\nawg_res = 2e9 # Realistic, limited resolution of an AWG\n\ngenerator = Gnr(\n devices={\n \"LO\": devices.LO(name=\"lo\", resolution=sim_res, outputs=1),\n \"AWG\": devices.AWG(name=\"awg\", resolution=awg_res, outputs=1),\n \"DigitalToAnalog\": devices.DigitalToAnalog(\n name=\"dac\", resolution=sim_res, inputs=1, outputs=1\n ),\n \"Response\": devices.Response(\n name=\"resp\",\n rise_time=Qty(value=0.3e-9, min_val=0.05e-9, max_val=0.6e-9, unit=\"s\"),\n resolution=sim_res,\n inputs=1,\n outputs=1,\n ),\n \"Mixer\": devices.Mixer(name=\"mixer\", inputs=2, outputs=1),\n \"VoltsToHertz\": devices.VoltsToHertz(\n name=\"v_to_hz\",\n V_to_Hz=Qty(value=1e9, min_val=0.9e9, max_val=1.1e9, unit=\"Hz/V\"),\n inputs=1,\n outputs=1,\n ),\n },\n chains={\n \"d1\": [\"LO\", \"AWG\", \"DigitalToAnalog\", \"Response\", \"Mixer\", \"VoltsToHertz\"],\n \"d2\": [\"LO\", \"AWG\", \"DigitalToAnalog\", \"Response\", \"Mixer\", \"VoltsToHertz\"],\n },\n)\n\nt_final = 7e-9 # Time for single qubit gates\nsideband = 50e6\ngauss_params_single = {\n \"amp\": Qty(value=0.5, min_val=0.4, max_val=0.6, unit=\"V\"),\n \"t_final\": Qty(\n value=t_final, min_val=0.5 * t_final, max_val=1.5 * t_final, unit=\"s\"\n ),\n \"sigma\": Qty(value=t_final / 4, min_val=t_final / 8, max_val=t_final / 2, unit=\"s\"),\n \"xy_angle\": Qty(value=0.0, min_val=-0.5 * np.pi, max_val=2.5 * np.pi, unit=\"rad\"),\n \"freq_offset\": Qty(\n value=-sideband - 3e6, min_val=-56 * 1e6, max_val=-52 * 1e6, unit=\"Hz 2pi\"\n ),\n \"delta\": Qty(value=-1, min_val=-5, max_val=3, unit=\"\"),\n}\n\ngauss_env_single = pulse.Envelope(\n name=\"gauss\",\n desc=\"Gaussian comp for single-qubit gates\",\n params=gauss_params_single,\n shape=envelopes.gaussian_nonorm,\n)\n\nnodrive_env = pulse.Envelope(\n name=\"no_drive\",\n params={\n \"t_final\": Qty(\n value=t_final, min_val=0.5 * t_final, max_val=1.5 * t_final, unit=\"s\"\n )\n },\n shape=envelopes.no_drive,\n)\n\nlo_freq_q1 = 5e9 + sideband\ncarrier_parameters = {\n \"freq\": Qty(value=lo_freq_q1, min_val=4.5e9, max_val=6e9, unit=\"Hz 2pi\"),\n \"framechange\": Qty(value=0.0, min_val=-np.pi, max_val=3 * np.pi, unit=\"rad\"),\n}\n\ncarr = pulse.Carrier(\n name=\"carrier\", desc=\"Frequency of the local oscillator\", params=carrier_parameters\n)\n\nlo_freq_q2 = 5.6e9 + sideband\ncarr_2 = copy.deepcopy(carr)\ncarr_2.params[\"freq\"].set_value(lo_freq_q2)\n\nrx90p_q1 = gates.Instruction(\n name=\"rx90p\",\n targets=[0],\n t_start=0.0,\n t_end=t_final,\n channels=[\"d1\", \"d2\"],\n params={\"use_t_before\": True},\n)\nrx90p_q2 = gates.Instruction(\n name=\"rx90p\",\n targets=[1],\n t_start=0.0,\n t_end=t_final,\n channels=[\"d1\", \"d2\"],\n params={\"use_t_before\": True},\n)\nQId_q1 = gates.Instruction(\n name=\"id\",\n targets=[0],\n t_start=0.0,\n t_end=t_final,\n channels=[\"d1\", \"d2\"],\n params={\"use_t_before\": True},\n)\nQId_q2 = gates.Instruction(\n name=\"id\",\n targets=[1],\n t_start=0.0,\n t_end=t_final,\n channels=[\"d1\", \"d2\"],\n params={\"use_t_before\": True},\n)\n\nrx90p_q1.add_component(gauss_env_single, \"d1\")\nrx90p_q1.add_component(carr, \"d1\")\nrx90p_q1.add_component(nodrive_env, \"d2\")\nrx90p_q1.add_component(copy.deepcopy(carr_2), \"d2\")\nrx90p_q1.comps[\"d2\"][\"carrier\"].params[\"framechange\"].set_value(\n (-sideband * t_final) * 2 * np.pi % (2 * np.pi)\n)\n\nrx90p_q2.add_component(copy.deepcopy(gauss_env_single), \"d2\")\nrx90p_q2.add_component(carr_2, \"d2\")\nrx90p_q2.add_component(nodrive_env, \"d1\")\nrx90p_q2.add_component(copy.deepcopy(carr), \"d1\")\nrx90p_q2.comps[\"d1\"][\"carrier\"].params[\"framechange\"].set_value(\n (-sideband * t_final) * 2 * np.pi % (2 * np.pi)\n)\n\n\nQId_q1.add_component(nodrive_env, \"d1\")\nQId_q1.add_component(copy.deepcopy(carr), \"d1\")\nQId_q1.add_component(nodrive_env, \"d2\")\nQId_q1.add_component(copy.deepcopy(carr_2), \"d2\")\nQId_q2.add_component(copy.deepcopy(nodrive_env), \"d2\")\nQId_q2.add_component(copy.deepcopy(carr_2), \"d2\")\nQId_q2.add_component(nodrive_env, \"d1\")\nQId_q2.add_component(copy.deepcopy(carr), \"d1\")\n\nY90p_q1 = copy.deepcopy(rx90p_q1)\nY90p_q1.name = \"ry90p\"\nX90m_q1 = copy.deepcopy(rx90p_q1)\nX90m_q1.name = \"rx90m\"\nY90m_q1 = copy.deepcopy(rx90p_q1)\nY90m_q1.name = \"ry90m\"\nY90p_q1.comps[\"d1\"][\"gauss\"].params[\"xy_angle\"].set_value(0.5 * np.pi)\nX90m_q1.comps[\"d1\"][\"gauss\"].params[\"xy_angle\"].set_value(np.pi)\nY90m_q1.comps[\"d1\"][\"gauss\"].params[\"xy_angle\"].set_value(1.5 * np.pi)\nsingle_q_gates = [QId_q1, rx90p_q1, Y90p_q1, X90m_q1, Y90m_q1]\n\n\nY90p_q2 = copy.deepcopy(rx90p_q2)\nY90p_q2.name = \"ry90p\"\nX90m_q2 = copy.deepcopy(rx90p_q2)\nX90m_q2.name = \"rx90m\"\nY90m_q2 = copy.deepcopy(rx90p_q2)\nY90m_q2.name = \"ry90m\"\nY90p_q2.comps[\"d2\"][\"gauss\"].params[\"xy_angle\"].set_value(0.5 * np.pi)\nX90m_q2.comps[\"d2\"][\"gauss\"].params[\"xy_angle\"].set_value(np.pi)\nY90m_q2.comps[\"d2\"][\"gauss\"].params[\"xy_angle\"].set_value(1.5 * np.pi)\nsingle_q_gates.extend([QId_q2, rx90p_q2, Y90p_q2, X90m_q2, Y90m_q2])\n\npmap = Pmap(single_q_gates, generator, model)\n\nexp = Exp(pmap)\n\ngenerator.devices[\"AWG\"].enable_drag_2()\n\nexp.set_opt_gates([\"rx90p[0]\"])\n\ngateset_opt_map = [\n [\n (\"rx90p[0]\", \"d1\", \"gauss\", \"amp\"),\n ],\n [\n (\"rx90p[0]\", \"d1\", \"gauss\", \"freq_offset\"),\n ],\n [\n (\"rx90p[0]\", \"d1\", \"gauss\", \"xy_angle\"),\n ],\n [\n (\"rx90p[0]\", \"d1\", \"gauss\", \"delta\"),\n ],\n]\n\npmap.set_opt_map(gateset_opt_map)\n\nopt = OptimalControl(\n dir_path=logdir,\n fid_func=fidelities.average_infid_set,\n fid_subspace=[\"Q1\", \"Q2\"],\n pmap=pmap,\n algorithm=algorithms.tf_sgd,\n options={\"maxiters\": 5},\n run_name=\"better_X90_tf_sgd\",\n)\n\nopt.set_exp(exp)\n\nwith open(\"test/two_qubit_data.pickle\", \"rb\") as filename:\n test_data = pickle.load(filename)\n\ngen_signal = generator.generate_signals(pmap.instructions[\"rx90p[0]\"])\nts = gen_signal[\"d1\"][\"ts\"]\nhdrift, hks = model.get_Hamiltonians()\npropagator = exp.propagation(gen_signal, \"rx90p[0]\")\n\n\ndef test_signals() -> None:\n np.testing.assert_allclose(ts, test_data[\"ts\"])\n np.testing.assert_allclose(\n actual=gen_signal[\"d1\"][\"values\"].numpy(),\n desired=test_data[\"signal\"][\"d1\"][\"values\"].numpy(),\n )\n\n\ndef test_hamiltonians() -> None:\n assert (hdrift.numpy() - test_data[\"hdrift\"].numpy() < 1).any()\n for key in hks:\n almost_equal(hks[key], test_data[\"hks\"][key])\n\n\ndef test_propagation() -> None:\n almost_equal(propagator, test_data[\"propagator\"])\n\n\[email protected]\[email protected]\[email protected]\[email protected]\ndef test_optim_tf_sgd() -> None:\n \"\"\"\n check if optimization result is below 1e-2\n \"\"\"\n opt.store_unitaries = True\n opt.optimize_controls()\n assert opt.current_best_goal < 0.01\n\n\[email protected]\[email protected]\[email protected]\[email protected]\ndef test_bad_tf_sgd() -> None:\n bad_tf_opt = OptimalControl(\n dir_path=logdir,\n fid_func=fidelities.average_infid_set,\n fid_subspace=[\"Q1\", \"Q2\"],\n pmap=pmap,\n algorithm=algorithms.tf_sgd,\n options={\"maxfun\": 2},\n run_name=\"better_X90_bad_tf\",\n )\n bad_tf_opt.set_exp(exp)\n\n with pytest.raises(KeyError):\n bad_tf_opt.optimize_controls()\n\n\[email protected]\[email protected]\[email protected]\ndef test_optim_lbfgs() -> None:\n lbfgs_opt = OptimalControl(\n dir_path=logdir,\n fid_func=fidelities.average_infid_set,\n fid_subspace=[\"Q1\", \"Q2\"],\n pmap=pmap,\n algorithm=algorithms.lbfgs,\n options={\"maxfun\": 2},\n run_name=\"better_X90_lbfgs\",\n )\n lbfgs_opt.set_exp(exp)\n\n lbfgs_opt.optimize_controls()\n assert lbfgs_opt.current_best_goal < 0.01\n\n\[email protected]\[email protected]\[email protected]\ndef test_optim_lbfgs_grad_free() -> None:\n lbfgs_grad_free_opt = OptimalControl(\n dir_path=logdir,\n fid_func=fidelities.average_infid_set,\n fid_subspace=[\"Q1\", \"Q2\"],\n pmap=pmap,\n algorithm=algorithms.lbfgs_grad_free,\n options={\"maxfun\": 5},\n run_name=\"grad_free_lbfgs\",\n )\n lbfgs_grad_free_opt.set_exp(exp)\n\n lbfgs_grad_free_opt.optimize_controls()\n assert lbfgs_grad_free_opt.current_best_goal < 0.01\n" ]
[ [ "numpy.testing.assert_array_almost_equal", "numpy.testing.assert_allclose", "numpy.array" ] ]
stanton119/data-analysis
[ "b6fda815c6cc1798ba13a5d2680369b7e5dfcdf9" ]
[ "TimeSeries/not_finished/other/cnn.py" ]
[ "# %%\nimport numpy as np\nimport pandas as pd\nfrom time import process_time, time\nfrom sklearn.preprocessing import StandardScaler\nimport matplotlib.pyplot as plt\nimport tensorflow as tf\nimport utilities\nimport data\n\nplt.style.use(\"seaborn-whitegrid\")\n\n\n# %% Generate data\n# df = utilities.gen_ar_data()\n# df = utilities.get_stock_data()\n# df = utilities.get_weather_data()\ndf = data.get_energy_data()\n\ndf_train, df_test = utilities.split_ts(df)\ndf_train.plot()\ndf_test.plot()\n\n\nforecast_gap = 170\ntrain_len = 60\nforecast_len = 24\n\n# split into training samples\nx_train, y_train = utilities.split_sequence(\n df=df_train,\n y_col=\"y\",\n train_len=train_len,\n forecast_gap=forecast_gap,\n forecast_len=forecast_len,\n)\nx_test, y_test = utilities.split_sequence(\n df=df_test,\n y_col=\"y\",\n train_len=train_len,\n forecast_gap=forecast_gap,\n forecast_len=forecast_len,\n)\n\n# tf approach\nif 0:\n window_df = tf.keras.preprocessing.timeseries_dataset_from_array(\n df_train[\"y\"].iloc[:-forecast_gap].to_numpy(),\n df_train[\"y\"].iloc[forecast_gap:].to_numpy(),\n sequence_length=train_len,\n )\n\n for batch in window_df:\n inputs, targets = batch\n\n\n# x_train.shape = (n, train_len, n_features), y_train.shape = (n, forecast_len, forecast_len)\n# _, train_len, n_features = x_train.shape\n# forecast_len = y_train.shape[1]\n\n# scale inputs\nscaler = StandardScaler()\n\nx_train = scaler.fit_transform(x_train.reshape(-1, x_train.shape[-1])).reshape(\n x_train.shape\n)\nx_test = scaler.transform(x_test.reshape(-1, x_test.shape[-1])).reshape(\n x_test.shape\n)\n\n\n# %%\n\n# define model\nmodel = tf.keras.Sequential()\nmodel.add(\n tf.keras.layers.Conv1D(\n filters=64,\n kernel_size=30,\n activation=\"relu\",\n input_shape=(train_len, n_features),\n )\n)\nmodel.add(tf.keras.layers.MaxPooling1D(pool_size=2))\nmodel.add(tf.keras.layers.Flatten())\nmodel.add(tf.keras.layers.Dense(50, activation=\"relu\"))\nmodel.add(tf.keras.layers.Dense(forecast_len))\n\nmodel.compile(\n loss=\"mse\",\n optimizer=tf.optimizers.Adam(),\n # optimizer=tf.optimizers.SGD(learning_rate=0.01),\n # metrics=[\"mae\"],\n)\n\nmodel.summary()\n\nx_train.shape\nx_test.shape\n\n# %%\n# early stopping\nearly_stopping = tf.keras.callbacks.EarlyStopping(\n patience=20, monitor=\"val_loss\", restore_best_weights=True\n)\n\n# fit model\nhistory = model.fit(\n x_train,\n y_train,\n validation_split=0.25,\n batch_size=64,\n epochs=1000,\n verbose=True,\n callbacks=[early_stopping],\n)\nplt.plot(history.history[\"loss\"])\nplt.plot(history.history[\"val_loss\"])\n\n# %% Predict\ny_train_hat = model.predict(x_train, verbose=1)\ny_test_hat = model.predict(x_test, verbose=1)\n\n\n# %% Plot results\ndef construct_results_df(df, y, y_hat):\n df_results = pd.DataFrame(index=df.index)\n df_results[\"y\"] = df[\"y\"]\n df_results[\"tf_y\"] = np.nan\n df_results[\"tf_y\"].iloc[train_len + forecast_gap-1 : -forecast_len-1] = y[:, 0].flatten()\n\n for col in range(y_hat.shape[1]):\n df_results[f\"y_hat_{col}\"] = np.nan\n df_results[f\"y_hat_{col}\"].iloc[\n train_len + forecast_gap-1 + col : -forecast_len-1 + col\n ] = y_hat[:, col]\n\n # df_results[f'mse_{col}'] = (df_results[f'y_hat_{col}'] - df_results['y'])**2\n return df_results\n\n\ny_train.shape\ny_train[:, 0].shape\ndf_train.iloc[train_len : -forecast_len - 1].shape\ndf_results_train = construct_results_df(df_train, y_train, y_train_hat)\ndf_results_test = construct_results_df(df_test, y_test, y_test_hat)\n\ndf_results_train.iloc[:,:5].plot()\ndf_results_test.iloc[:,:5].plot()\n\n# %% Metrics\ndef construct_metrics_df(df_results):\n df_metrics = pd.DataFrame(index=df.index)\n\n for col in df_results.columns:\n if \"hat\" in col:\n df_metrics[f\"mse_{col}\"] = (df_results[col] - df_results[\"y\"]) ** 2\n return df_metrics\n\n\ndf_metrics_train = construct_metrics_df(df_results_train)\ndf_metrics_test = construct_metrics_df(df_results_test)\n\nmse_cols = [col for col in df_results_train.columns if \"mse\" in col]\nhat_cols = [col for col in df_results_train.columns if \"hat\" in col]\n\ndf_metrics_train.cumsum().plot()\ndf_metrics_test.cumsum().plot()\n\ndf_results_train.plot()\ndf_results_test.plot()\n" ]
[ [ "tensorflow.keras.layers.Conv1D", "sklearn.preprocessing.StandardScaler", "tensorflow.keras.layers.Flatten", "pandas.DataFrame", "tensorflow.optimizers.Adam", "matplotlib.pyplot.plot", "tensorflow.keras.layers.MaxPooling1D", "tensorflow.keras.Sequential", "tensorflow.keras.layers.Dense", "matplotlib.pyplot.style.use", "tensorflow.keras.callbacks.EarlyStopping" ] ]
akutta/hercules
[ "7fa89e8ac079ec8863675474009a1549d964dae6" ]
[ "swivel.py" ]
[ "#!/usr/bin/env python3\n#\n# Copyright 2016 Google Inc. All Rights Reserved.\n# Copyright 2017 Sourced Technologies S. L.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Submatrix-wise Vector Embedding Learner.\n\nImplementation of SwiVel algorithm described at:\nhttp://arxiv.org/abs/1602.02215\n\nThis program expects an input directory that contains the following files.\n\n row_vocab.txt, col_vocab.txt\n\n The row an column vocabulary files. Each file should contain one token per\n line; these will be used to generate a tab-separate file containing the\n trained embeddings.\n\n row_sums.txt, col_sum.txt\n\n The matrix row and column marginal sums. Each file should contain one\n decimal floating point number per line which corresponds to the marginal\n count of the matrix for that row or column.\n\n shards.recs\n\n A file containing the sub-matrix shards, stored as TFRecords. Each shard is\n expected to be a serialzed tf.Example protocol buffer with the following\n properties:\n\n global_row: the global row indicies contained in the shard\n global_col: the global column indicies contained in the shard\n sparse_local_row, sparse_local_col, sparse_value: three parallel arrays\n that are a sparse representation of the submatrix counts.\n\nIt will generate embeddings, training from the input directory for\nthe specified number of epochs. When complete, it will output the trained\nvectors to a tab-separated file that contains one line per embedding. Row and\ncolumn embeddings are stored in separate files.\n\n\"\"\"\n\nimport glob\nimport math\nimport os\nimport time\nimport threading\n\nimport numpy as np\nimport tensorflow as tf\nfrom tensorflow.python.client import device_lib\n\nflags = tf.app.flags\n\nflags.DEFINE_string(\"input_base_path\", None,\n \"Directory containing input shards, vocabularies, \"\n \"and marginals.\")\nflags.DEFINE_string(\"output_base_path\", None,\n \"Path where to write the trained embeddings.\")\nflags.DEFINE_integer(\"embedding_size\", 300, \"Size of the embeddings\")\nflags.DEFINE_boolean(\"trainable_bias\", False, \"Biases are trainable\")\nflags.DEFINE_integer(\"submatrix_rows\", 4096,\n \"Rows in each training submatrix. This must match \"\n \"the training data.\")\nflags.DEFINE_integer(\"submatrix_cols\", 4096,\n \"Rows in each training submatrix. This must match \"\n \"the training data.\")\nflags.DEFINE_float(\"loss_multiplier\", 1.0 / 4096,\n \"constant multiplier on loss.\")\nflags.DEFINE_float(\"confidence_exponent\", 0.5,\n \"Exponent for l2 confidence function\")\nflags.DEFINE_float(\"confidence_scale\", 0.25,\n \"Scale for l2 confidence function\")\nflags.DEFINE_float(\"confidence_base\", 0.1, \"Base for l2 confidence function\")\nflags.DEFINE_float(\"learning_rate\", 1.0, \"Initial learning rate\")\nflags.DEFINE_string(\"optimizer\", \"Adagrad\",\n \"SGD optimizer (tf.train.*Optimizer)\")\nflags.DEFINE_integer(\"num_concurrent_steps\", 2,\n \"Number of threads to train with\")\nflags.DEFINE_integer(\"num_readers\", 4,\n \"Number of threads to read the input data and feed it\")\nflags.DEFINE_float(\"num_epochs\", 40, \"Number epochs to train for\")\nflags.DEFINE_float(\"per_process_gpu_memory_fraction\", 0,\n \"Fraction of GPU memory to use, 0 means allow_growth\")\nflags.DEFINE_integer(\"num_gpus\", 0,\n \"Number of GPUs to use, 0 means all available\")\nflags.DEFINE_string(\"logs\", \"\",\n \"Path for TensorBoard logs (empty value disables them)\")\n\nFLAGS = flags.FLAGS\n\n\ndef log(message, *args, **kwargs):\n tf.logging.info(message, *args, **kwargs)\n\n\ndef get_available_gpus():\n return [d.name for d in device_lib.list_local_devices()\n if d.device_type == \"GPU\"]\n\n\ndef embeddings_with_init(vocab_size, embedding_dim, name):\n \"\"\"Creates and initializes the embedding tensors.\"\"\"\n return tf.get_variable(name=name,\n shape=[vocab_size, embedding_dim],\n initializer=tf.random_normal_initializer(\n stddev=math.sqrt(1.0 / embedding_dim)))\n\n\ndef count_matrix_input(filenames, submatrix_rows, submatrix_cols):\n \"\"\"Reads submatrix shards from disk.\"\"\"\n filename_queue = tf.train.string_input_producer(filenames)\n reader = tf.WholeFileReader()\n _, serialized_example = reader.read(filename_queue)\n features = tf.parse_single_example(\n serialized_example,\n features={\n \"global_row\": tf.FixedLenFeature([submatrix_rows], dtype=tf.int64),\n \"global_col\": tf.FixedLenFeature([submatrix_cols], dtype=tf.int64),\n \"sparse_local_row\": tf.VarLenFeature(dtype=tf.int64),\n \"sparse_local_col\": tf.VarLenFeature(dtype=tf.int64),\n \"sparse_value\": tf.VarLenFeature(dtype=tf.float32)\n })\n\n global_row = features[\"global_row\"]\n global_col = features[\"global_col\"]\n\n sparse_local_row = features[\"sparse_local_row\"].values\n sparse_local_col = features[\"sparse_local_col\"].values\n sparse_count = features[\"sparse_value\"].values\n\n sparse_indices = tf.concat(axis=1, values=[tf.expand_dims(sparse_local_row, 1),\n tf.expand_dims(sparse_local_col, 1)])\n count = tf.sparse_to_dense(sparse_indices, [submatrix_rows, submatrix_cols],\n sparse_count, validate_indices=False)\n\n queued_global_row, queued_global_col, queued_count = tf.train.batch(\n [global_row, global_col, count],\n batch_size=1,\n num_threads=FLAGS.num_readers,\n capacity=32)\n\n queued_global_row = tf.reshape(queued_global_row, [submatrix_rows])\n queued_global_col = tf.reshape(queued_global_col, [submatrix_cols])\n queued_count = tf.reshape(queued_count, [submatrix_rows, submatrix_cols])\n\n return queued_global_row, queued_global_col, queued_count\n\n\ndef read_marginals_file(filename):\n \"\"\"Reads text file with one number per line to an array.\"\"\"\n with open(filename) as lines:\n return [float(line) for line in lines]\n\n\ndef write_embedding_tensor_to_disk(vocab_path, output_path, sess, embedding):\n \"\"\"Writes tensor to output_path as tsv\"\"\"\n # Fetch the embedding values from the model\n embeddings = sess.run(embedding)\n\n with open(output_path, \"w\") as out_f:\n with open(vocab_path) as vocab_f:\n for index, word in enumerate(vocab_f):\n word = word.strip()\n embedding = embeddings[index]\n out_f.write(word + \"\\t\" + \"\\t\".join(\n [str(x) for x in embedding]) + \"\\n\")\n\n\ndef write_embeddings_to_disk(config, model, sess):\n \"\"\"Writes row and column embeddings disk\"\"\"\n # Row Embedding\n row_vocab_path = config.input_base_path + \"/row_vocab.txt\"\n row_embedding_output_path = config.output_base_path + \"/row_embedding.tsv\"\n log(\"Writing row embeddings to: %s\", row_embedding_output_path)\n write_embedding_tensor_to_disk(row_vocab_path, row_embedding_output_path,\n sess, model.row_embedding)\n\n # Column Embedding\n col_vocab_path = config.input_base_path + \"/col_vocab.txt\"\n col_embedding_output_path = config.output_base_path + \"/col_embedding.tsv\"\n log(\"Writing column embeddings to: %s\", col_embedding_output_path)\n write_embedding_tensor_to_disk(col_vocab_path, col_embedding_output_path,\n sess, model.col_embedding)\n\n\nclass SwivelModel:\n \"\"\"Small class to gather needed pieces from a Graph being built.\"\"\"\n\n def __init__(self, config):\n \"\"\"Construct graph for dmc.\"\"\"\n self._config = config\n\n # Create paths to input data files\n log(\"Reading model from: %s\", config.input_base_path)\n count_matrix_files = glob.glob(os.path.join(config.input_base_path, \"shard-*.pb\"))\n row_sums_path = os.path.join(config.input_base_path, \"row_sums.txt\")\n col_sums_path = os.path.join(config.input_base_path, \"col_sums.txt\")\n\n # Read marginals\n row_sums = read_marginals_file(row_sums_path)\n col_sums = read_marginals_file(col_sums_path)\n\n self.n_rows = len(row_sums)\n self.n_cols = len(col_sums)\n log(\"Matrix dim: (%d,%d) SubMatrix dim: (%d,%d)\",\n self.n_rows, self.n_cols, config.submatrix_rows,\n config.submatrix_cols)\n if self.n_cols < config.submatrix_cols:\n raise ValueError(\n \"submatrix_cols={0} can not be bigger than columns number={1} \"\n \"(specify submatrix_cols={1})\".format(config.submatrix_cols, self.n_cols))\n if self.n_rows < config.submatrix_rows:\n raise ValueError(\n \"submatrix_rows={0} can not be bigger than rows number={1} \"\n \"(specify submatrix_rows={1})\".format(config.submatrix_rows, self.n_cols))\n self.n_submatrices = (self.n_rows * self.n_cols /\n (config.submatrix_rows * config.submatrix_cols))\n log(\"n_submatrices: %d\", self.n_submatrices)\n\n with tf.device(\"/cpu:0\"):\n # ===== CREATE VARIABLES ======\n # Get input\n global_row, global_col, count = count_matrix_input(\n count_matrix_files, config.submatrix_rows,\n config.submatrix_cols)\n\n # Embeddings\n self.row_embedding = embeddings_with_init(\n embedding_dim=config.embedding_size,\n vocab_size=self.n_rows,\n name=\"row_embedding\")\n self.col_embedding = embeddings_with_init(\n embedding_dim=config.embedding_size,\n vocab_size=self.n_cols,\n name=\"col_embedding\")\n tf.summary.histogram(\"row_emb\", self.row_embedding)\n tf.summary.histogram(\"col_emb\", self.col_embedding)\n\n matrix_log_sum = math.log(np.sum(row_sums) + 1)\n row_bias_init = [math.log(x + 1) for x in row_sums]\n col_bias_init = [math.log(x + 1) for x in col_sums]\n self.row_bias = tf.Variable(\n row_bias_init, trainable=config.trainable_bias)\n self.col_bias = tf.Variable(\n col_bias_init, trainable=config.trainable_bias)\n tf.summary.histogram(\"row_bias\", self.row_bias)\n tf.summary.histogram(\"col_bias\", self.col_bias)\n\n # Add optimizer\n l2_losses = []\n sigmoid_losses = []\n self.global_step = tf.Variable(0, name=\"global_step\")\n learning_rate = tf.Variable(config.learning_rate,\n name=\"learning_rate\")\n opt = getattr(tf.train, FLAGS.optimizer + \"Optimizer\")(\n learning_rate)\n tf.summary.scalar(\"learning_rate\", learning_rate)\n\n all_grads = []\n\n devices = [\"/gpu:%d\" % i for i in range(FLAGS.num_gpus)] \\\n if FLAGS.num_gpus > 0 else get_available_gpus()\n self.devices_number = len(devices)\n if not self.devices_number:\n devices = [\"/cpu:0\"]\n self.devices_number = 1\n for dev in devices:\n with tf.device(dev):\n with tf.name_scope(dev[1:].replace(\":\", \"_\")):\n # ===== CREATE GRAPH =====\n # Fetch embeddings.\n selected_row_embedding = tf.nn.embedding_lookup(\n self.row_embedding, global_row)\n selected_col_embedding = tf.nn.embedding_lookup(\n self.col_embedding, global_col)\n\n # Fetch biases.\n selected_row_bias = tf.nn.embedding_lookup(\n [self.row_bias], global_row)\n selected_col_bias = tf.nn.embedding_lookup(\n [self.col_bias], global_col)\n\n # Multiply the row and column embeddings to generate\n # predictions.\n predictions = tf.matmul(\n selected_row_embedding, selected_col_embedding,\n transpose_b=True)\n\n # These binary masks separate zero from non-zero values.\n count_is_nonzero = tf.to_float(tf.cast(count, tf.bool))\n count_is_zero = 1 - count_is_nonzero\n\n objectives = count_is_nonzero * tf.log(count + 1e-30)\n objectives -= tf.reshape(\n selected_row_bias, [config.submatrix_rows, 1])\n objectives -= selected_col_bias\n objectives += matrix_log_sum\n\n err = predictions - objectives\n\n # The confidence function scales the L2 loss based on\n # the raw co-occurrence count.\n l2_confidence = (\n config.confidence_base +\n config.confidence_scale * tf.pow(\n count, config.confidence_exponent))\n\n l2_loss = config.loss_multiplier * tf.reduce_sum(\n 0.5 * l2_confidence * err * err * count_is_nonzero)\n l2_losses.append(tf.expand_dims(l2_loss, 0))\n\n sigmoid_loss = config.loss_multiplier * tf.reduce_sum(\n tf.nn.softplus(err) * count_is_zero)\n sigmoid_losses.append(tf.expand_dims(sigmoid_loss, 0))\n\n loss = l2_loss + sigmoid_loss\n grads = opt.compute_gradients(loss)\n all_grads.append(grads)\n\n with tf.device(\"/cpu:0\"):\n # ===== MERGE LOSSES =====\n l2_loss = tf.reduce_mean(tf.concat(axis=0, values=l2_losses), 0,\n name=\"l2_loss\")\n sigmoid_loss = tf.reduce_mean(\n tf.concat(axis=0, values=sigmoid_losses), 0,\n name=\"sigmoid_loss\")\n overall_loss = l2_loss + sigmoid_loss\n average = tf.train.ExponentialMovingAverage(0.999)\n loss_average_op = average.apply(\n (overall_loss, l2_loss, sigmoid_loss))\n self.loss = average.average(overall_loss)\n tf.summary.scalar(\"overall_loss\", self.loss)\n tf.summary.scalar(\"l2_loss\", average.average(l2_loss))\n tf.summary.scalar(\"sigmoid_loss\", average.average(sigmoid_loss))\n\n # Apply the gradients to adjust the shared variables.\n apply_gradient_ops = []\n for grads in all_grads:\n apply_gradient_ops.append(opt.apply_gradients(\n grads, global_step=self.global_step))\n\n self.train_op = tf.group(loss_average_op, *apply_gradient_ops)\n self.saver = tf.train.Saver(sharded=True)\n\n def initialize_summary(self, sess):\n log(\"creating TensorBoard stuff...\")\n self.summary = tf.summary.merge_all()\n self.writer = tf.summary.FileWriter(FLAGS.logs, sess.graph)\n projector_config = \\\n tf.contrib.tensorboard.plugins.projector.ProjectorConfig()\n embedding_config = projector_config.embeddings.add()\n length = min(10000, self.n_rows, self.n_cols)\n self.embedding10k = tf.Variable(\n tf.zeros((length, self._config.embedding_size)),\n name=\"top10k_embedding\")\n embedding_config.tensor_name = self.embedding10k.name\n embedding_config.metadata_path = os.path.join(\n self._config.input_base_path, \"row_vocab.txt\")\n tf.contrib.tensorboard.plugins.projector.visualize_embeddings(\n self.writer, projector_config)\n self.saver = tf.train.Saver((self.embedding10k,), max_to_keep=1)\n\n def write_summary(self, sess):\n log(\"writing the summary...\")\n length = min(10000, self.n_rows, self.n_cols)\n assignment = self.embedding10k.assign(\n (self.row_embedding[:length] + self.col_embedding[:length]) / 2)\n summary, _, global_step = sess.run(\n (self.summary, assignment, self.global_step))\n self.writer.add_summary(summary, global_step)\n self.saver.save(\n sess, os.path.join(FLAGS.logs, \"embeddings10k.checkpoint\"),\n global_step)\n\n\ndef main(_):\n tf.logging.set_verbosity(tf.logging.INFO)\n start_time = time.time()\n\n # Create the output path. If this fails, it really ought to fail now. :)\n if not os.path.isdir(FLAGS.output_base_path):\n os.makedirs(FLAGS.output_base_path)\n\n # Create and run model\n with tf.Graph().as_default():\n log(\"creating the model...\")\n model = SwivelModel(FLAGS)\n\n # Create a session for running Ops on the Graph.\n gpu_opts = {}\n if FLAGS.per_process_gpu_memory_fraction > 0:\n gpu_opts[\"per_process_gpu_memory_fraction\"] = \\\n FLAGS.per_process_gpu_memory_fraction\n else:\n gpu_opts[\"allow_growth\"] = True\n gpu_options = tf.GPUOptions(**gpu_opts)\n sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))\n if FLAGS.logs:\n model.initialize_summary(sess)\n\n # Run the Op to initialize the variables.\n log(\"initializing the variables...\")\n sess.run(tf.global_variables_initializer())\n\n # Start feeding input\n log(\"starting the input threads...\")\n coord = tf.train.Coordinator()\n threads = tf.train.start_queue_runners(sess=sess, coord=coord)\n\n # Calculate how many steps each thread should run\n n_total_steps = int(FLAGS.num_epochs * model.n_rows * model.n_cols) / (\n FLAGS.submatrix_rows * FLAGS.submatrix_cols)\n n_steps_per_thread = n_total_steps / (\n FLAGS.num_concurrent_steps * model.devices_number)\n n_submatrices_to_train = model.n_submatrices * FLAGS.num_epochs\n t0 = [time.time()]\n n_steps_between_status_updates = 100\n n_steps_between_summary_updates = 10000\n status_i = [0, 0]\n status_lock = threading.Lock()\n msg = (\"%%%dd/%%d submatrices trained (%%.1f%%%%), \"\n \"%%5.1f submatrices/sec | loss %%f\") % \\\n len(str(n_submatrices_to_train))\n\n def TrainingFn():\n for _ in range(int(n_steps_per_thread)):\n _, global_step, loss = sess.run((\n model.train_op, model.global_step, model.loss))\n\n show_status = False\n update_summary = False\n with status_lock:\n new_i = global_step // n_steps_between_status_updates\n if new_i > status_i[0]:\n status_i[0] = new_i\n show_status = True\n new_i = global_step // n_steps_between_summary_updates\n if new_i > status_i[1]:\n status_i[1] = new_i\n update_summary = True\n if show_status:\n elapsed = float(time.time() - t0[0])\n log(msg, global_step, n_submatrices_to_train,\n 100.0 * global_step / n_submatrices_to_train,\n n_steps_between_status_updates / elapsed, loss)\n t0[0] = time.time()\n if update_summary and FLAGS.logs:\n model.write_summary(sess)\n\n # Start training threads\n train_threads = []\n for _ in range(FLAGS.num_concurrent_steps):\n t = threading.Thread(target=TrainingFn)\n train_threads.append(t)\n t.start()\n\n # Wait for threads to finish.\n for t in train_threads:\n t.join()\n\n coord.request_stop()\n coord.join(threads)\n\n # Write out vectors\n write_embeddings_to_disk(FLAGS, model, sess)\n\n # Shutdown\n sess.close()\n log(\"Elapsed: %s\", time.time() - start_time)\n\n\nif __name__ == \"__main__\":\n tf.app.run()\n" ]
[ [ "tensorflow.train.start_queue_runners", "tensorflow.group", "tensorflow.matmul", "tensorflow.reshape", "tensorflow.contrib.tensorboard.plugins.projector.ProjectorConfig", "tensorflow.nn.embedding_lookup", "tensorflow.global_variables_initializer", "tensorflow.cast", "tensorflow.concat", "tensorflow.summary.histogram", "tensorflow.FixedLenFeature", "tensorflow.train.Saver", "tensorflow.logging.info", "tensorflow.Variable", "tensorflow.ConfigProto", "tensorflow.nn.softplus", "tensorflow.app.run", "tensorflow.logging.set_verbosity", "tensorflow.zeros", "tensorflow.train.batch", "tensorflow.train.Coordinator", "tensorflow.summary.scalar", "tensorflow.expand_dims", "tensorflow.train.string_input_producer", "tensorflow.python.client.device_lib.list_local_devices", "tensorflow.log", "tensorflow.reduce_sum", "tensorflow.summary.merge_all", "tensorflow.sparse_to_dense", "tensorflow.WholeFileReader", "tensorflow.Graph", "numpy.sum", "tensorflow.VarLenFeature", "tensorflow.contrib.tensorboard.plugins.projector.visualize_embeddings", "tensorflow.train.ExponentialMovingAverage", "tensorflow.device", "tensorflow.summary.FileWriter", "tensorflow.pow", "tensorflow.GPUOptions" ] ]
SumeetSinha/Signal_Analysis
[ "f8d3a6f81969b7512e2db1980f5cf8b782bc3eb3" ]
[ "Seismic_Motion.py" ]
[ "__author__ = \"Sumeet K. Sinha\"\r\n__credits__ = [\"\"]\r\n__license__ = \"GPL\"\r\n__version__ = \"2.0\"\r\n__maintainer__ = \"Sumeet K. Sinha\"\r\n__email__ = \"[email protected]\"\r\n\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport scipy.fftpack\r\nfrom scipy import integrate\r\nimport math\r\n\r\n\r\ndef FFT(x,dt,maxf,plot=True):\r\n\r\n\t\"\"\"\r\n\tFunction to calculate FFT of a signal\r\n\r\n\t...\r\n\r\n\tAttributes\r\n\t----------\r\n\tx : float\r\n\t\tseries data with equal spacing dt\r\n\tdt : float\r\n\t\tperiod of sampling data \r\n\tmaxf : int\r\n\t\tmaximum frequency up to which FFT is desired\r\n\tplot : bool\r\n\t\twhether a plot should be showed or not (default True)\r\n\r\n\tReturns\r\n\t-------\r\n\tFreq : list\r\n\t\tfrequency content in the signal\r\n\tAmp : list\r\n\t\tAmplitude for the corresponding frequencies\r\n \"\"\"\r\n\r\n\t# Number of sample points\r\n\tN = x.size;\r\n\t# Total Time \r\n\tT = N*dt;\r\n\t# sample spacing is dt\r\n\t# sampling frequency\r\n\tFs = 1/dt;\r\n\txfft = scipy.fftpack.fft(x);\r\n\txfreq = np.linspace(0.0, Fs/2, N/2);\r\n\r\n\txfftHalf = 2.0/N * np.abs(xfft[:N//2]);\r\n\txfftHalf[0] = xfftHalf[0]/2;\r\n\r\n\tif(plot):\r\n\t\tfig, ax = plt.subplots()\r\n\t\tax.plot(xfreq, xfftHalf,'-k')\r\n\t\tif(maxf is not None):\r\n\t\t\tplt.xlim(0, maxf)\r\n\t\tplt.ylabel('Fourier transform |FFT(x)| [unit(x)]')\r\n\t\tplt.xlabel('Frequency [1/unit(dt)]')\r\n\t\tplt.show()\r\n\r\n\tFreq = xfreq;\r\n\tAmp = xfftHalf\r\n\r\n\treturn Freq, Amp\r\n\r\n\r\ndef ResSpec(Acc,dt,damp,maxT,plot=True):\r\n\r\n\t\"\"\"\r\n\tFunction to calculate Response Spectrum of an earthquake motion\r\n\r\n\tAttributes\r\n\t----------\r\n\tAcc : float\r\n\t\tacceleration series \r\n\tdt : float\r\n\t\tperiod of sampling data \r\n\tdamp : float\r\n\t\tviscous damping in % \r\n\tmaxT : float\r\n\t\tmaximum time period of evaluation \r\n\tplot : bool\r\n\t\twhether a plot should be showed or not (default True)\r\n\r\n\tReturns\r\n\t-------\r\n\tT : list\r\n\t\ttime period\r\n\tSa : list\r\n\t\tmaximum acceleration \r\n \"\"\"\r\n\r\n\tu = 0*Acc;\r\n\tv = 0*Acc;\r\n\tac = 0*Acc;\r\n\r\n\tLengthAg = len(Acc);\r\n\r\n\tNumSteps = int(maxT/dt+1);\r\n\tT = np.linspace(0, maxT, num=NumSteps); # Time Period\r\n\tSd = 0*T;\t \t\t\t # Spectral Acceleration\r\n\tSv = 0*T; # Spectral Displacement\r\n\tSa = 0*T; # Spectral Acceleration\r\n\r\n\r\n\tfor j in range(1,NumSteps):\r\n\t\tomega = 2.0*math.pi/T[j];\r\n\t\tm = 1.0; # mass\r\n\t\tk = omega*omega*m; # stiffness\r\n\t\tc = 2.0*m*omega*damp/100.0 # viscous damping\r\n\t\tK = k+3.0*c/dt+6.0*m/(dt**2);\r\n\t\ta = 6.0*m/dt+3.0*c;\r\n\t\tb = 3.0*m+dt*c/2.0;\r\n\r\n\t\t# initial conditions \r\n\t\tac = 0*Acc;\r\n\t\tu = 0*Acc;\r\n\t\tv = 0*Acc;\r\n\r\n\t\tfor i in range(0,LengthAg-1):\r\n\t\t\tdf=-(Acc[i+1]-Acc[i])+a*v[i]+b*ac[i]; # delta Force\r\n\t\t\tdu=df/K;\r\n\t\t\tdv=3.0*du/dt-3.0*v[i]-dt*ac[i]/2.0;\r\n\t\t\tdac=6.0*(du-dt*v[i])/(dt)**2.0-3.0*ac[i];\r\n\t\t\tu[i+1]=u[i]+du;\r\n\t\t\tv[i+1]=v[i]+dv;\r\n\t\t\tac[i+1]=ac[i]+dac; \r\n\r\n\t\tSd[j]=np.amax( np.absolute(u));\r\n\t\tSv[j]=np.amax( np.absolute(v));\r\n\t\tSa[j]=np.amax( np.absolute(ac));\r\n\r\n\tSa[0]=np.amax( np.absolute(Acc));\r\n\r\n\tif(plot):\r\n\t\tfig, ax = plt.subplots()\r\n\t\tax.plot(T, Sa,'-k')\r\n\t\tplt.ylabel('Pseudo Response Acceleration (PSa) [unit(Acc)]')\r\n\t\tplt.xlabel('Time Period (T) [unit(dt)]')\r\n\t\tplt.show()\r\n\r\n\treturn T, Sa\r\n\r\n\r\ndef Arias_Intensity(Acc,Time,plot=True):\r\n\r\n\t\"\"\"\r\n\tFunction to calculate Arias intensity I\r\n\t\tI(t) = pi/(2g) (integral(0,t) Acc**2 dt)\r\n\t\tIa = max(I)\r\n\r\n\tAttributes\r\n\t----------\r\n\tAcc : float\r\n\t\tacceleration series \r\n\tTime : float\r\n\t\ttime data or series \r\n\tplot : bool\r\n\t\twhether a plot should be showed or not (default True)\r\n\r\n\tReturns\r\n\t-------\r\n\tIa : float\r\n\t\tmaximum Areas intensity \r\n\tI : list\r\n\t\tcumulative Arias intensity with time as % of Ia\r\n \"\"\"\r\n\tg = 9.81;\r\n\tpi = math.pi;\r\n\tAcc = np.power(Acc,2);\r\n\r\n\tI = pi/2/g*integrate.cumtrapz(y=Acc,x=Time,initial=0);\r\n\tIa = max(I);\r\n\r\n\tI = I/Ia*100;\r\n\r\n\tif(plot==True):\r\n\t\tfig, ax = plt.subplots()\r\n\t\tax.plot(Time,I,'-k')\r\n\t\tplt.ylabel('Arias Intensity % ($I_a$ = '+str(round(Ia,4))+' m/s)')\r\n\t\tplt.xlabel('Time (T) [s]')\r\n\t\tplt.show()\r\n\r\n\r\n\treturn Ia,I;" ]
[ [ "matplotlib.pyplot.xlim", "numpy.absolute", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.subplots", "scipy.integrate.cumtrapz", "numpy.power", "matplotlib.pyplot.ylabel", "numpy.abs", "matplotlib.pyplot.show", "numpy.linspace" ] ]
lipovsek/avalanche
[ "1f06502b12140b39f48adf5a5f3b5de8ec2a930b" ]
[ "tests/test_avalanche_dataset.py" ]
[ "import unittest\n\nfrom os.path import expanduser\n\nimport avalanche\nfrom avalanche.benchmarks.datasets import default_dataset_location\nfrom avalanche.models import SimpleMLP\nfrom torch.optim import SGD\nfrom torch.nn import CrossEntropyLoss\nfrom avalanche.training.supervised import Naive\nfrom avalanche.benchmarks.generators import dataset_benchmark\nimport PIL\nimport torch\nfrom PIL import ImageChops\nfrom PIL.Image import Image\nfrom torch import Tensor\nfrom torch.utils.data import TensorDataset, Subset, ConcatDataset\nfrom torchvision.datasets import MNIST\nfrom torchvision.transforms import (\n ToTensor,\n RandomCrop,\n ToPILImage,\n Compose,\n Lambda,\n CenterCrop,\n)\nfrom typing import List\n\nfrom avalanche.benchmarks.scenarios.generic_benchmark_creation import (\n create_generic_benchmark_from_tensor_lists,\n)\nfrom avalanche.benchmarks.utils import (\n AvalancheDataset,\n AvalancheSubset,\n AvalancheConcatDataset,\n AvalancheDatasetType,\n AvalancheTensorDataset,\n concat_datasets_sequentially,\n)\nfrom avalanche.benchmarks.utils.dataset_utils import ConstantSequence\nfrom avalanche.training.utils import load_all_dataset\nimport random\n\nimport numpy as np\n\n\ndef pil_images_equal(img_a, img_b):\n diff = ImageChops.difference(img_a, img_b)\n\n return not diff.getbbox()\n\n\ndef zero_if_label_2(img_tensor: Tensor, class_label):\n if int(class_label) == 2:\n torch.full(img_tensor.shape, 0.0, out=img_tensor)\n\n return img_tensor, class_label\n\n\nclass AvalancheDatasetTests(unittest.TestCase):\n def test_mnist_no_transforms(self):\n dataset = MNIST(\n root=expanduser(\"~\") + \"/.avalanche/data/mnist/\", download=True\n )\n x, y = dataset[0]\n self.assertIsInstance(x, Image)\n self.assertEqual([x.width, x.height], [28, 28])\n self.assertIsInstance(y, int)\n\n def test_mnist_native_transforms(self):\n dataset = MNIST(\n root=expanduser(\"~\") + \"/.avalanche/data/mnist/\",\n download=True,\n transform=ToTensor(),\n )\n x, y = dataset[0]\n self.assertIsInstance(x, Tensor)\n self.assertEqual(x.shape, (1, 28, 28))\n self.assertIsInstance(y, int)\n\n def test_avalanche_dataset_transform(self):\n dataset_mnist = MNIST(\n root=expanduser(\"~\") + \"/.avalanche/data/mnist/\", download=True\n )\n x, y = dataset_mnist[0]\n dataset = AvalancheDataset(dataset_mnist, transform=ToTensor())\n x2, y2, t2 = dataset[0]\n\n self.assertIsInstance(x2, Tensor)\n self.assertIsInstance(y2, int)\n self.assertIsInstance(t2, int)\n self.assertEqual(0, t2)\n self.assertTrue(torch.equal(ToTensor()(x), x2))\n self.assertEqual(y, y2)\n\n def test_avalanche_dataset_multi_param_transform(self):\n dataset_mnist = MNIST(\n root=expanduser(\"~\") + \"/.avalanche/data/mnist/\", download=True\n )\n\n ref_instance2_idx = None\n\n for instance_idx, (_, instance_y) in enumerate(dataset_mnist):\n if instance_y == 2:\n ref_instance2_idx = instance_idx\n break\n\n self.assertIsNotNone(ref_instance2_idx)\n\n ref_instance_idx = None\n for instance_idx, (_, instance_y) in enumerate(dataset_mnist):\n if instance_y != 2:\n ref_instance_idx = instance_idx\n break\n\n self.assertIsNotNone(ref_instance_idx)\n\n with self.assertWarns(\n avalanche.benchmarks.utils.ComposeMaxParamsWarning\n ):\n dataset_transform = avalanche.benchmarks.utils.Compose(\n [ToTensor(), zero_if_label_2]\n )\n\n self.assertEqual(1, dataset_transform.min_params)\n self.assertEqual(2, dataset_transform.max_params)\n\n x, y = dataset_mnist[ref_instance_idx]\n dataset = AvalancheDataset(dataset_mnist, transform=dataset_transform)\n x2, y2, t2 = dataset[ref_instance_idx]\n\n self.assertIsInstance(x2, Tensor)\n self.assertIsInstance(y2, int)\n self.assertIsInstance(t2, int)\n self.assertEqual(0, t2)\n self.assertTrue(torch.equal(ToTensor()(x), x2))\n self.assertEqual(y, y2)\n\n # Check that the multi-param transform was correctly called\n x3, y3, _ = dataset[ref_instance2_idx]\n\n self.assertEqual(2, y3)\n self.assertIsInstance(x3, Tensor)\n self.assertEqual(0.0, torch.min(x3))\n self.assertEqual(0.0, torch.max(x3))\n\n def test_avalanche_dataset_slice(self):\n dataset_mnist = MNIST(\n root=expanduser(\"~\") + \"/.avalanche/data/mnist/\", download=True\n )\n x0, y0 = dataset_mnist[0]\n x1, y1 = dataset_mnist[1]\n dataset = AvalancheDataset(dataset_mnist, transform=ToTensor())\n x2, y2, t2 = dataset[:2]\n self.assertIsInstance(x2, Tensor)\n self.assertIsInstance(y2, Tensor)\n self.assertIsInstance(t2, Tensor)\n self.assertTrue(torch.equal(ToTensor()(x0), x2[0]))\n self.assertTrue(torch.equal(ToTensor()(x1), x2[1]))\n self.assertEqual(y0, y2[0].item())\n self.assertEqual(y1, y2[1].item())\n self.assertEqual(0, t2[0].item())\n self.assertEqual(0, t2[1].item())\n\n def test_avalanche_dataset_indexing(self):\n dataset_mnist = MNIST(\n root=expanduser(\"~\") + \"/.avalanche/data/mnist/\", download=True\n )\n x0, y0 = dataset_mnist[0]\n x1, y1 = dataset_mnist[5]\n dataset = AvalancheDataset(dataset_mnist, transform=ToTensor())\n x2, y2, t2 = dataset[0, 5]\n self.assertIsInstance(x2, Tensor)\n self.assertIsInstance(y2, Tensor)\n self.assertIsInstance(t2, Tensor)\n self.assertTrue(torch.equal(ToTensor()(x0), x2[0]))\n self.assertTrue(torch.equal(ToTensor()(x1), x2[1]))\n self.assertEqual(y0, y2[0].item())\n self.assertEqual(y1, y2[1].item())\n self.assertEqual(0, t2[0].item())\n self.assertEqual(0, t2[1].item())\n\n def test_avalanche_dataset_composition(self):\n dataset_mnist = MNIST(\n root=expanduser(\"~\") + \"/.avalanche/data/mnist/\",\n download=True,\n transform=RandomCrop(16),\n )\n x, y = dataset_mnist[0]\n self.assertIsInstance(x, Image)\n self.assertEqual([x.width, x.height], [16, 16])\n self.assertIsInstance(y, int)\n\n dataset = AvalancheDataset(\n dataset_mnist,\n transform=ToTensor(),\n target_transform=lambda target: -1,\n )\n\n x2, y2, t2 = dataset[0]\n self.assertIsInstance(x2, Tensor)\n self.assertEqual(x2.shape, (1, 16, 16))\n self.assertIsInstance(y2, int)\n self.assertEqual(y2, -1)\n self.assertIsInstance(t2, int)\n self.assertEqual(0, t2)\n\n def test_avalanche_dataset_add(self):\n dataset_mnist = MNIST(\n root=expanduser(\"~\") + \"/.avalanche/data/mnist/\",\n download=True,\n transform=CenterCrop(16),\n )\n\n dataset1 = AvalancheDataset(\n dataset_mnist,\n transform=ToTensor(),\n target_transform=lambda target: -1,\n )\n\n dataset2 = AvalancheDataset(\n dataset_mnist,\n target_transform=lambda target: -2,\n task_labels=ConstantSequence(2, len(dataset_mnist)),\n )\n\n dataset3 = dataset1 + dataset2\n\n self.assertEqual(len(dataset_mnist) * 2, len(dataset3))\n\n x1, y1, t1 = dataset1[0]\n x2, y2, t2 = dataset2[0]\n\n x3, y3, t3 = dataset3[0]\n x3_2, y3_2, t3_2 = dataset3[len(dataset_mnist)]\n\n self.assertIsInstance(x1, Tensor)\n self.assertEqual(x1.shape, (1, 16, 16))\n self.assertEqual(-1, y1)\n self.assertEqual(0, t1)\n\n self.assertIsInstance(x2, PIL.Image.Image)\n self.assertEqual(x2.size, (16, 16))\n self.assertEqual(-2, y2)\n self.assertEqual(2, t2)\n\n self.assertEqual((y1, t1), (y3, t3))\n self.assertEqual(16 * 16, torch.sum(torch.eq(x1, x3)).item())\n\n self.assertEqual((y2, t2), (y3_2, t3_2))\n self.assertTrue(pil_images_equal(x2, x3_2))\n\n def test_avalanche_dataset_radd(self):\n dataset_mnist = MNIST(\n expanduser(\"~\") + \"/.avalanche/data/mnist/\",\n download=True,\n transform=CenterCrop(16),\n )\n\n dataset1 = AvalancheDataset(\n dataset_mnist,\n transform=ToTensor(),\n target_transform=lambda target: -1,\n )\n\n dataset2 = dataset_mnist + dataset1\n self.assertIsInstance(dataset2, AvalancheDataset)\n self.assertEqual(len(dataset_mnist) * 2, len(dataset2))\n\n dataset3 = dataset_mnist + dataset1 + dataset_mnist\n self.assertIsInstance(dataset3, AvalancheDataset)\n self.assertEqual(len(dataset_mnist) * 3, len(dataset3))\n\n dataset4 = dataset_mnist + dataset_mnist + dataset1\n self.assertIsInstance(dataset4, AvalancheDataset)\n self.assertEqual(len(dataset_mnist) * 3, len(dataset4))\n\n def test_dataset_add_monkey_patch_vanilla_behaviour(self):\n dataset_mnist = MNIST(\n expanduser(\"~\") + \"/.avalanche/data/mnist/\",\n download=True,\n transform=CenterCrop(16),\n )\n\n dataset_mnist2 = MNIST(\n expanduser(\"~\") + \"/.avalanche/data/mnist/\",\n download=True,\n transform=CenterCrop(16),\n )\n\n dataset = dataset_mnist + dataset_mnist2\n\n self.assertIsInstance(dataset, ConcatDataset)\n\n self.assertEqual(len(dataset_mnist) * 2, len(dataset))\n\n def test_avalanche_dataset_uniform_task_labels(self):\n dataset_mnist = MNIST(\n root=expanduser(\"~\") + \"/.avalanche/data/mnist/\", download=True\n )\n x, y = dataset_mnist[0]\n dataset = AvalancheDataset(\n dataset_mnist,\n transform=ToTensor(),\n task_labels=[1] * len(dataset_mnist),\n )\n x2, y2, t2 = dataset[0]\n\n self.assertIsInstance(x2, Tensor)\n self.assertIsInstance(y2, int)\n self.assertIsInstance(t2, int)\n self.assertEqual(1, t2)\n self.assertTrue(torch.equal(ToTensor()(x), x2))\n self.assertEqual(y, y2)\n\n self.assertListEqual(\n [1] * len(dataset_mnist), list(dataset.targets_task_labels)\n )\n\n subset_task1 = dataset.task_set[1]\n self.assertIsInstance(subset_task1, AvalancheDataset)\n self.assertEqual(len(dataset), len(subset_task1))\n\n with self.assertRaises(KeyError):\n subset_task0 = dataset.task_set[0]\n\n def test_avalanche_dataset_tensor_task_labels(self):\n x = torch.rand(32, 10)\n y = torch.rand(32, 10)\n t = torch.ones(32) # Single task\n dataset = AvalancheTensorDataset(x, y, targets=1, task_labels=t)\n\n x2, y2, t2 = dataset[:]\n\n self.assertIsInstance(x2, Tensor)\n self.assertIsInstance(y2, Tensor)\n self.assertIsInstance(t2, Tensor)\n self.assertTrue(torch.equal(x, x2))\n self.assertTrue(torch.equal(y, y2))\n self.assertTrue(torch.equal(t.to(int), t2))\n\n self.assertListEqual([1] * 32, list(dataset.targets_task_labels))\n\n # Regression test for #654\n self.assertEqual(1, len(dataset.task_set))\n\n subset_task1 = dataset.task_set[1]\n self.assertIsInstance(subset_task1, AvalancheDataset)\n self.assertEqual(len(dataset), len(subset_task1))\n\n with self.assertRaises(KeyError):\n subset_task0 = dataset.task_set[0]\n\n with self.assertRaises(KeyError):\n subset_task0 = dataset.task_set[2]\n\n # Check single instance types\n x2, y2, t2 = dataset[0]\n\n self.assertIsInstance(x2, Tensor)\n self.assertIsInstance(y2, Tensor)\n self.assertIsInstance(t2, int)\n\n def test_avalanche_dataset_uniform_task_labels_simple_def(self):\n dataset_mnist = MNIST(\n root=expanduser(\"~\") + \"/.avalanche/data/mnist/\", download=True\n )\n dataset = AvalancheDataset(\n dataset_mnist, transform=ToTensor(), task_labels=1\n )\n _, _, t2 = dataset[0]\n\n self.assertIsInstance(t2, int)\n self.assertEqual(1, t2)\n\n self.assertListEqual(\n [1] * len(dataset_mnist), list(dataset.targets_task_labels)\n )\n\n subset_task1 = dataset.task_set[1]\n self.assertIsInstance(subset_task1, AvalancheDataset)\n self.assertEqual(len(dataset), len(subset_task1))\n\n with self.assertRaises(KeyError):\n subset_task0 = dataset.task_set[0]\n\n def test_avalanche_dataset_mixed_task_labels(self):\n dataset_mnist = MNIST(\n root=expanduser(\"~\") + \"/.avalanche/data/mnist/\", download=True\n )\n x, y = dataset_mnist[0]\n\n random_task_labels = [\n random.randint(0, 10) for _ in range(len(dataset_mnist))\n ]\n dataset = AvalancheDataset(\n dataset_mnist, transform=ToTensor(), task_labels=random_task_labels\n )\n x2, y2, t2 = dataset[0]\n\n self.assertIsInstance(x2, Tensor)\n self.assertIsInstance(y2, int)\n self.assertIsInstance(t2, int)\n self.assertEqual(random_task_labels[0], t2)\n self.assertTrue(torch.equal(ToTensor()(x), x2))\n self.assertEqual(y, y2)\n\n self.assertListEqual(\n random_task_labels, list(dataset.targets_task_labels)\n )\n\n u_labels, counts = np.unique(random_task_labels, return_counts=True)\n for i, task_label in enumerate(u_labels.tolist()):\n subset_task = dataset.task_set[task_label]\n self.assertIsInstance(subset_task, AvalancheDataset)\n self.assertEqual(int(counts[i]), len(subset_task))\n\n unique_task_labels = list(subset_task.targets_task_labels)\n self.assertListEqual(\n [task_label] * int(counts[i]), unique_task_labels\n )\n\n with self.assertRaises(KeyError):\n subset_task11 = dataset.task_set[11]\n\n def test_avalanche_tensor_dataset_task_labels_train(self):\n tr_ds = [\n AvalancheTensorDataset(\n torch.randn(10, 4),\n torch.randint(0, 3, (10,)),\n dataset_type=AvalancheDatasetType.CLASSIFICATION,\n task_labels=torch.randint(0, 5, (10,)).tolist(),\n )\n for i in range(3)\n ]\n ts_ds = [\n AvalancheTensorDataset(\n torch.randn(10, 4),\n torch.randint(0, 3, (10,)),\n dataset_type=AvalancheDatasetType.CLASSIFICATION,\n task_labels=torch.randint(0, 5, (10,)).tolist(),\n )\n for i in range(3)\n ]\n benchmark = dataset_benchmark(train_datasets=tr_ds, test_datasets=ts_ds)\n model = SimpleMLP(input_size=4, num_classes=3)\n cl_strategy = Naive(\n model,\n SGD(model.parameters(), lr=0.001, momentum=0.9),\n CrossEntropyLoss(),\n train_mb_size=5,\n train_epochs=1,\n eval_mb_size=5,\n device=\"cpu\",\n evaluator=None,\n )\n exp = []\n for i, experience in enumerate(benchmark.train_stream):\n exp.append(i)\n cl_strategy.train(experience)\n self.assertEqual(len(exp), 3)\n\n def test_avalanche_dataset_task_labels_inheritance(self):\n dataset_mnist = MNIST(\n root=expanduser(\"~\") + \"/.avalanche/data/mnist/\", download=True\n )\n random_task_labels = [\n random.randint(0, 10) for _ in range(len(dataset_mnist))\n ]\n dataset_orig = AvalancheDataset(\n dataset_mnist, transform=ToTensor(), task_labels=random_task_labels\n )\n\n dataset_child = AvalancheDataset(dataset_orig)\n x2, y2, t2 = dataset_orig[0]\n x3, y3, t3 = dataset_child[0]\n\n self.assertIsInstance(t2, int)\n self.assertEqual(random_task_labels[0], t2)\n\n self.assertIsInstance(t3, int)\n self.assertEqual(random_task_labels[0], t3)\n\n self.assertListEqual(\n random_task_labels, list(dataset_orig.targets_task_labels)\n )\n\n self.assertListEqual(\n random_task_labels, list(dataset_child.targets_task_labels)\n )\n\n def test_avalanche_dataset_tensor_dataset_input(self):\n train_x = torch.rand(500, 3, 28, 28)\n train_y = torch.zeros(500)\n test_x = torch.rand(200, 3, 28, 28)\n test_y = torch.ones(200)\n\n train = TensorDataset(train_x, train_y)\n test = TensorDataset(test_x, test_y)\n train_dataset = AvalancheDataset(train)\n test_dataset = AvalancheDataset(test)\n\n self.assertEqual(500, len(train_dataset))\n self.assertEqual(200, len(test_dataset))\n\n x, y, t = train_dataset[0]\n self.assertIsInstance(x, Tensor)\n self.assertEqual(0, y)\n self.assertEqual(0, t)\n\n x2, y2, t2 = test_dataset[0]\n self.assertIsInstance(x2, Tensor)\n self.assertEqual(1, y2)\n self.assertEqual(0, t2)\n\n def test_avalanche_dataset_multiple_outputs_and_float_y(self):\n train_x = torch.rand(500, 3, 28, 28)\n train_y = torch.zeros(500)\n train_z = torch.ones(500)\n test_x = torch.rand(200, 3, 28, 28)\n test_y = torch.ones(200)\n test_z = torch.full((200,), 5)\n\n train = TensorDataset(train_x, train_y, train_z)\n test = TensorDataset(test_x, test_y, test_z)\n train_dataset = AvalancheDataset(train)\n test_dataset = AvalancheDataset(test)\n\n self.assertEqual(500, len(train_dataset))\n self.assertEqual(200, len(test_dataset))\n\n x, y, z, t = train_dataset[0]\n self.assertIsInstance(x, Tensor)\n self.assertEqual(0, y)\n self.assertEqual(1, z)\n self.assertEqual(0, t)\n\n x2, y2, z2, t2 = test_dataset[0]\n self.assertIsInstance(x2, Tensor)\n self.assertEqual(1, y2)\n self.assertEqual(5, z2)\n self.assertEqual(0, t2)\n\n def test_avalanche_dataset_from_pytorch_subset(self):\n tensor_x = torch.rand(500, 3, 28, 28)\n tensor_y = torch.randint(0, 100, (500,))\n\n whole_dataset = TensorDataset(tensor_x, tensor_y)\n\n train = Subset(whole_dataset, indices=list(range(400)))\n test = Subset(whole_dataset, indices=list(range(400, 500)))\n\n train_dataset = AvalancheDataset(train)\n test_dataset = AvalancheDataset(test)\n\n self.assertEqual(400, len(train_dataset))\n self.assertEqual(100, len(test_dataset))\n\n x, y, t = train_dataset[0]\n self.assertIsInstance(x, Tensor)\n self.assertTrue(torch.equal(tensor_x[0], x))\n self.assertTrue(torch.equal(tensor_y[0], y))\n self.assertEqual(0, t)\n\n self.assertTrue(\n torch.equal(torch.as_tensor(train_dataset.targets), tensor_y[:400])\n )\n\n x2, y2, t2 = test_dataset[0]\n self.assertIsInstance(x2, Tensor)\n self.assertTrue(torch.equal(tensor_x[400], x2))\n self.assertTrue(torch.equal(tensor_y[400], y2))\n self.assertEqual(0, t2)\n\n self.assertTrue(\n torch.equal(torch.as_tensor(test_dataset.targets), tensor_y[400:])\n )\n\n def test_avalanche_dataset_from_pytorch_concat_dataset(self):\n tensor_x = torch.rand(500, 3, 28, 28)\n tensor_x2 = torch.rand(300, 3, 28, 28)\n tensor_y = torch.randint(0, 100, (500,))\n tensor_y2 = torch.randint(0, 100, (300,))\n\n dataset1 = TensorDataset(tensor_x, tensor_y)\n dataset2 = TensorDataset(tensor_x2, tensor_y2)\n\n concat_dataset = ConcatDataset((dataset1, dataset2))\n\n av_dataset = AvalancheDataset(concat_dataset)\n\n self.assertEqual(500, len(dataset1))\n self.assertEqual(300, len(dataset2))\n\n x, y, t = av_dataset[0]\n x2, y2, t2 = av_dataset[500]\n self.assertIsInstance(x, Tensor)\n self.assertTrue(torch.equal(tensor_x[0], x))\n self.assertTrue(torch.equal(tensor_y[0], y))\n self.assertEqual(0, t)\n\n self.assertIsInstance(x2, Tensor)\n self.assertTrue(torch.equal(tensor_x2[0], x2))\n self.assertTrue(torch.equal(tensor_y2[0], y2))\n self.assertEqual(0, t2)\n\n self.assertTrue(\n torch.equal(\n torch.as_tensor(av_dataset.targets),\n torch.cat((tensor_y, tensor_y2)),\n )\n )\n\n def test_avalanche_dataset_from_chained_pytorch_concat_dataset(self):\n tensor_x = torch.rand(500, 3, 28, 28)\n tensor_x2 = torch.rand(300, 3, 28, 28)\n tensor_x3 = torch.rand(200, 3, 28, 28)\n tensor_y = torch.randint(0, 100, (500,))\n tensor_y2 = torch.randint(0, 100, (300,))\n tensor_y3 = torch.randint(0, 100, (200,))\n\n dataset1 = TensorDataset(tensor_x, tensor_y)\n dataset2 = TensorDataset(tensor_x2, tensor_y2)\n dataset3 = TensorDataset(tensor_x3, tensor_y3)\n\n concat_dataset = ConcatDataset((dataset1, dataset2))\n concat_dataset2 = ConcatDataset((concat_dataset, dataset3))\n\n av_dataset = AvalancheDataset(concat_dataset2)\n\n self.assertEqual(500, len(dataset1))\n self.assertEqual(300, len(dataset2))\n\n x, y, t = av_dataset[0]\n x2, y2, t2 = av_dataset[500]\n x3, y3, t3 = av_dataset[800]\n self.assertIsInstance(x, Tensor)\n self.assertTrue(torch.equal(tensor_x[0], x))\n self.assertTrue(torch.equal(tensor_y[0], y))\n self.assertEqual(0, t)\n\n self.assertIsInstance(x2, Tensor)\n self.assertTrue(torch.equal(tensor_x2[0], x2))\n self.assertTrue(torch.equal(tensor_y2[0], y2))\n self.assertEqual(0, t2)\n\n self.assertIsInstance(x3, Tensor)\n self.assertTrue(torch.equal(tensor_x3[0], x3))\n self.assertTrue(torch.equal(tensor_y3[0], y3))\n self.assertEqual(0, t3)\n\n self.assertTrue(\n torch.equal(\n torch.as_tensor(av_dataset.targets),\n torch.cat((tensor_y, tensor_y2, tensor_y3)),\n )\n )\n\n def test_avalanche_dataset_from_chained_pytorch_subsets(self):\n tensor_x = torch.rand(500, 3, 28, 28)\n tensor_y = torch.randint(0, 100, (500,))\n\n whole_dataset = TensorDataset(tensor_x, tensor_y)\n\n subset1 = Subset(whole_dataset, indices=list(range(400, 500)))\n subset2 = Subset(subset1, indices=[5, 7, 0])\n\n dataset = AvalancheDataset(subset2)\n\n self.assertEqual(3, len(dataset))\n\n x, y, t = dataset[0]\n self.assertIsInstance(x, Tensor)\n self.assertTrue(torch.equal(tensor_x[405], x))\n self.assertTrue(torch.equal(tensor_y[405], y))\n self.assertEqual(0, t)\n\n self.assertTrue(\n torch.equal(\n torch.as_tensor(dataset.targets),\n torch.as_tensor([tensor_y[405], tensor_y[407], tensor_y[400]]),\n )\n )\n\n def test_avalanche_dataset_from_chained_pytorch_concat_subset_dataset(self):\n tensor_x = torch.rand(200, 3, 28, 28)\n tensor_x2 = torch.rand(100, 3, 28, 28)\n tensor_y = torch.randint(0, 100, (200,))\n tensor_y2 = torch.randint(0, 100, (100,))\n\n dataset1 = TensorDataset(tensor_x, tensor_y)\n dataset2 = TensorDataset(tensor_x2, tensor_y2)\n\n indices = [random.randint(0, 299) for _ in range(1000)]\n\n concat_dataset = ConcatDataset((dataset1, dataset2))\n subset = Subset(concat_dataset, indices)\n\n av_dataset = AvalancheDataset(subset)\n\n self.assertEqual(200, len(dataset1))\n self.assertEqual(100, len(dataset2))\n self.assertEqual(1000, len(av_dataset))\n\n for idx in range(1000):\n orig_idx = indices[idx]\n if orig_idx < 200:\n expected_x, expected_y = dataset1[orig_idx]\n else:\n expected_x, expected_y = dataset2[orig_idx - 200]\n\n x, y, t = av_dataset[idx]\n self.assertIsInstance(x, Tensor)\n self.assertTrue(torch.equal(expected_x, x))\n self.assertTrue(torch.equal(expected_y, y))\n self.assertEqual(0, t)\n self.assertEqual(int(expected_y), int(av_dataset.targets[idx]))\n\n def test_avalanche_dataset_from_chained_pytorch_datasets(self):\n tensor_x = torch.rand(200, 3, 28, 28)\n tensor_x2 = torch.rand(100, 3, 28, 28)\n tensor_y = torch.randint(0, 100, (200,))\n tensor_y2 = torch.randint(0, 100, (100,))\n\n dataset1 = TensorDataset(tensor_x, tensor_y)\n dataset1_sub = Subset(dataset1, range(199, -1, -1))\n dataset2 = TensorDataset(tensor_x2, tensor_y2)\n\n indices = [random.randint(0, 299) for _ in range(1000)]\n\n concat_dataset = ConcatDataset((dataset1_sub, dataset2))\n subset = Subset(concat_dataset, indices)\n\n av_dataset = AvalancheDataset(subset)\n\n self.assertEqual(200, len(dataset1_sub))\n self.assertEqual(100, len(dataset2))\n self.assertEqual(1000, len(av_dataset))\n\n for idx in range(1000):\n orig_idx = indices[idx]\n if orig_idx < 200:\n orig_idx = range(199, -1, -1)[orig_idx]\n expected_x, expected_y = dataset1[orig_idx]\n else:\n expected_x, expected_y = dataset2[orig_idx - 200]\n\n x, y, t = av_dataset[idx]\n self.assertIsInstance(x, Tensor)\n self.assertTrue(torch.equal(expected_x, x))\n self.assertTrue(torch.equal(expected_y, y))\n self.assertEqual(0, t)\n self.assertEqual(int(expected_y), int(av_dataset.targets[idx]))\n\n def test_avalanche_dataset_from_chained_pytorch_datasets_task_labels(self):\n tensor_x = torch.rand(200, 3, 28, 28)\n tensor_x2 = torch.rand(100, 3, 28, 28)\n tensor_y = torch.randint(0, 100, (200,))\n tensor_y2 = torch.randint(0, 100, (100,))\n tensor_t = torch.randint(0, 100, (200,))\n tensor_t2 = torch.randint(0, 100, (100,))\n\n dataset1 = AvalancheTensorDataset(\n tensor_x, tensor_y, task_labels=tensor_t\n )\n dataset1_sub = Subset(dataset1, range(199, -1, -1))\n dataset2 = AvalancheDataset(\n TensorDataset(tensor_x2, tensor_y2), task_labels=tensor_t2\n )\n\n indices = [random.randint(0, 299) for _ in range(1000)]\n\n concat_dataset = ConcatDataset((dataset1_sub, dataset2))\n subset = Subset(concat_dataset, indices)\n\n av_dataset = AvalancheDataset(subset)\n\n self.assertEqual(200, len(dataset1_sub))\n self.assertEqual(100, len(dataset2))\n self.assertEqual(1000, len(av_dataset))\n\n for idx in range(1000):\n orig_idx = indices[idx]\n if orig_idx < 200:\n orig_idx = range(199, -1, -1)[orig_idx]\n expected_x = tensor_x[orig_idx]\n expected_y = tensor_y[orig_idx]\n expected_t = tensor_t[orig_idx]\n else:\n orig_idx -= 200\n expected_x = tensor_x2[orig_idx]\n expected_y = tensor_y2[orig_idx]\n expected_t = tensor_t2[orig_idx]\n\n x, y, t = av_dataset[idx]\n self.assertIsInstance(x, Tensor)\n self.assertTrue(torch.equal(expected_x, x))\n self.assertTrue(torch.equal(expected_y, y))\n self.assertIsInstance(t, int)\n self.assertEqual(int(expected_t), int(t))\n self.assertEqual(int(expected_y), int(av_dataset.targets[idx]))\n\n def test_avalanche_dataset_collate_fn(self):\n tensor_x = torch.rand(500, 3, 28, 28)\n tensor_y = torch.randint(0, 100, (500,))\n tensor_z = torch.randint(0, 100, (500,))\n\n def my_collate_fn(patterns):\n x_values = torch.stack([pat[0] for pat in patterns], 0)\n y_values = torch.tensor([pat[1] for pat in patterns]) + 1\n z_values = torch.tensor([-1 for _ in patterns])\n t_values = torch.tensor([pat[3] for pat in patterns])\n return x_values, y_values, z_values, t_values\n\n whole_dataset = TensorDataset(tensor_x, tensor_y, tensor_z)\n dataset = AvalancheDataset(whole_dataset, collate_fn=my_collate_fn)\n\n x, y, z, t = dataset[0]\n self.assertIsInstance(x, Tensor)\n self.assertTrue(torch.equal(tensor_x[0], x))\n self.assertTrue(torch.equal(tensor_y[0], y))\n self.assertEqual(0, t)\n\n x2, y2, z2, t2 = dataset[0:5]\n self.assertIsInstance(x2, Tensor)\n self.assertTrue(torch.equal(tensor_x[0:5], x2))\n self.assertTrue(torch.equal(tensor_y[0:5] + 1, y2))\n self.assertTrue(torch.equal(torch.full((5,), -1, dtype=torch.long), z2))\n self.assertTrue(torch.equal(torch.zeros(5, dtype=torch.long), t2))\n\n inherited = AvalancheDataset(dataset)\n\n x3, y3, z3, t3 = inherited[0:5]\n self.assertIsInstance(x3, Tensor)\n self.assertTrue(torch.equal(tensor_x[0:5], x3))\n self.assertTrue(torch.equal(tensor_y[0:5] + 1, y3))\n self.assertTrue(torch.equal(torch.full((5,), -1, dtype=torch.long), z3))\n self.assertTrue(torch.equal(torch.zeros(5, dtype=torch.long), t3))\n\n with self.assertRaises(ValueError):\n # Can't define a custom collate when dataset_type != UNDEFINED\n bad_definition = AvalancheDataset(\n dataset,\n dataset_type=AvalancheDatasetType.CLASSIFICATION,\n collate_fn=my_collate_fn,\n )\n\n def test_avalanche_dataset_collate_fn_inheritance(self):\n tensor_x = torch.rand(200, 3, 28, 28)\n tensor_y = torch.randint(0, 100, (200,))\n tensor_z = torch.randint(0, 100, (200,))\n\n def my_collate_fn(patterns):\n x_values = torch.stack([pat[0] for pat in patterns], 0)\n y_values = torch.tensor([pat[1] for pat in patterns]) + 1\n z_values = torch.tensor([-1 for _ in patterns])\n t_values = torch.tensor([pat[3] for pat in patterns])\n return x_values, y_values, z_values, t_values\n\n def my_collate_fn2(patterns):\n x_values = torch.stack([pat[0] for pat in patterns], 0)\n y_values = torch.tensor([pat[1] for pat in patterns]) + 2\n z_values = torch.tensor([-2 for _ in patterns])\n t_values = torch.tensor([pat[3] for pat in patterns])\n return x_values, y_values, z_values, t_values\n\n whole_dataset = TensorDataset(tensor_x, tensor_y, tensor_z)\n dataset = AvalancheDataset(whole_dataset, collate_fn=my_collate_fn)\n inherited = AvalancheDataset(dataset, collate_fn=my_collate_fn2) # Ok\n\n x, y, z, t = inherited[0:5]\n self.assertIsInstance(x, Tensor)\n self.assertTrue(torch.equal(tensor_x[0:5], x))\n self.assertTrue(torch.equal(tensor_y[0:5] + 2, y))\n self.assertTrue(torch.equal(torch.full((5,), -2, dtype=torch.long), z))\n self.assertTrue(torch.equal(torch.zeros(5, dtype=torch.long), t))\n\n classification_dataset = AvalancheDataset(\n whole_dataset, dataset_type=AvalancheDatasetType.CLASSIFICATION\n )\n\n with self.assertRaises(ValueError):\n bad_inherited = AvalancheDataset(\n classification_dataset, collate_fn=my_collate_fn\n )\n ok_inherited_classification = AvalancheDataset(classification_dataset)\n self.assertEqual(\n AvalancheDatasetType.CLASSIFICATION,\n ok_inherited_classification.dataset_type,\n )\n\n def test_avalanche_concat_dataset_collate_fn_inheritance(self):\n tensor_x = torch.rand(200, 3, 28, 28)\n tensor_y = torch.randint(0, 100, (200,))\n tensor_z = torch.randint(0, 100, (200,))\n\n tensor_x2 = torch.rand(200, 3, 28, 28)\n tensor_y2 = torch.randint(0, 100, (200,))\n tensor_z2 = torch.randint(0, 100, (200,))\n\n def my_collate_fn(patterns):\n x_values = torch.stack([pat[0] for pat in patterns], 0)\n y_values = torch.tensor([pat[1] for pat in patterns]) + 1\n z_values = torch.tensor([-1 for _ in patterns])\n t_values = torch.tensor([pat[3] for pat in patterns])\n return x_values, y_values, z_values, t_values\n\n def my_collate_fn2(patterns):\n x_values = torch.stack([pat[0] for pat in patterns], 0)\n y_values = torch.tensor([pat[1] for pat in patterns]) + 2\n z_values = torch.tensor([-2 for _ in patterns])\n t_values = torch.tensor([pat[3] for pat in patterns])\n return x_values, y_values, z_values, t_values\n\n dataset1 = TensorDataset(tensor_x, tensor_y, tensor_z)\n dataset2 = AvalancheTensorDataset(\n tensor_x2, tensor_y2, tensor_z2, collate_fn=my_collate_fn\n )\n concat = AvalancheConcatDataset(\n [dataset1, dataset2], collate_fn=my_collate_fn2\n ) # Ok\n\n x, y, z, t = dataset2[0:5]\n self.assertIsInstance(x, Tensor)\n self.assertTrue(torch.equal(tensor_x2[0:5], x))\n self.assertTrue(torch.equal(tensor_y2[0:5] + 1, y))\n self.assertTrue(torch.equal(torch.full((5,), -1, dtype=torch.long), z))\n self.assertTrue(torch.equal(torch.zeros(5, dtype=torch.long), t))\n\n x2, y2, z2, t2 = concat[0:5]\n self.assertIsInstance(x2, Tensor)\n self.assertTrue(torch.equal(tensor_x[0:5], x2))\n self.assertTrue(torch.equal(tensor_y[0:5] + 2, y2))\n self.assertTrue(torch.equal(torch.full((5,), -2, dtype=torch.long), z2))\n self.assertTrue(torch.equal(torch.zeros(5, dtype=torch.long), t2))\n\n dataset1_classification = AvalancheTensorDataset(\n tensor_x,\n tensor_y,\n tensor_z,\n dataset_type=AvalancheDatasetType.CLASSIFICATION,\n )\n\n dataset2_segmentation = AvalancheDataset(\n dataset2, dataset_type=AvalancheDatasetType.SEGMENTATION\n )\n\n with self.assertRaises(ValueError):\n bad_concat_types = dataset1_classification + dataset2_segmentation\n\n with self.assertRaises(ValueError):\n bad_concat_collate = AvalancheConcatDataset(\n [dataset1, dataset2_segmentation], collate_fn=my_collate_fn\n )\n\n ok_concat_classification = dataset1_classification + dataset2\n self.assertEqual(\n AvalancheDatasetType.CLASSIFICATION,\n ok_concat_classification.dataset_type,\n )\n\n ok_concat_classification2 = dataset2 + dataset1_classification\n self.assertEqual(\n AvalancheDatasetType.CLASSIFICATION,\n ok_concat_classification2.dataset_type,\n )\n\n def test_avalanche_concat_dataset_recursion(self):\n def gen_random_tensors(n):\n return (\n torch.rand(n, 3, 28, 28),\n torch.randint(0, 100, (n,)),\n torch.randint(0, 100, (n,)),\n )\n\n tensor_x, tensor_y, tensor_z = gen_random_tensors(200)\n\n tensor_x2, tensor_y2, tensor_z2 = gen_random_tensors(200)\n\n tensor_x3, tensor_y3, tensor_z3 = gen_random_tensors(200)\n\n tensor_x4, tensor_y4, tensor_z4 = gen_random_tensors(200)\n\n tensor_x5, tensor_y5, tensor_z5 = gen_random_tensors(200)\n\n tensor_x6, tensor_y6, tensor_z6 = gen_random_tensors(200)\n\n tensor_x7, tensor_y7, tensor_z7 = gen_random_tensors(200)\n\n dataset1 = TensorDataset(tensor_x, tensor_y, tensor_z)\n dataset2 = AvalancheTensorDataset(\n tensor_x2, tensor_y2, tensor_z2, task_labels=1\n )\n dataset3 = AvalancheTensorDataset(\n tensor_x3, tensor_y3, tensor_z3, task_labels=2\n )\n\n dataset4 = AvalancheTensorDataset(\n tensor_x4, tensor_y4, tensor_z4, task_labels=3\n )\n dataset5 = AvalancheTensorDataset(\n tensor_x5, tensor_y5, tensor_z5, task_labels=4\n )\n dataset6 = AvalancheTensorDataset(tensor_x6, tensor_y6, tensor_z6)\n dataset7 = AvalancheTensorDataset(tensor_x7, tensor_y7, tensor_z7)\n\n # This will test recursion on both PyTorch ConcatDataset and\n # AvalancheConcatDataset\n concat = ConcatDataset([dataset1, dataset2])\n\n # Beware of the explicit task_labels=5 that *must* override the\n # task labels set in dataset4 and dataset5\n\n def transform_target_to_constant(ignored_target_value):\n return 101\n\n def transform_target_to_constant2(ignored_target_value):\n return 102\n\n concat2 = AvalancheConcatDataset(\n [dataset4, dataset5],\n task_labels=5,\n target_transform=transform_target_to_constant,\n )\n\n concat3 = AvalancheConcatDataset(\n [dataset6, dataset7], target_transform=transform_target_to_constant2\n ).freeze_transforms()\n concat_uut = AvalancheConcatDataset(\n [concat, dataset3, concat2, concat3]\n )\n\n self.assertEqual(400, len(concat))\n self.assertEqual(400, len(concat2))\n self.assertEqual(400, len(concat3))\n self.assertEqual(1400, len(concat_uut))\n\n x, y, z, t = concat_uut[0]\n x2, y2, z2, t2 = concat_uut[200]\n x3, y3, z3, t3 = concat_uut[400]\n x4, y4, z4, t4 = concat_uut[600]\n x5, y5, z5, t5 = concat_uut[800]\n x6, y6, z6, t6 = concat_uut[1000]\n x7, y7, z7, t7 = concat_uut[1200]\n\n self.assertTrue(torch.equal(x, tensor_x[0]))\n self.assertTrue(torch.equal(y, tensor_y[0]))\n self.assertTrue(torch.equal(z, tensor_z[0]))\n self.assertEqual(0, t)\n\n self.assertTrue(torch.equal(x2, tensor_x2[0]))\n self.assertTrue(torch.equal(y2, tensor_y2[0]))\n self.assertTrue(torch.equal(z2, tensor_z2[0]))\n self.assertEqual(1, t2)\n\n self.assertTrue(torch.equal(x3, tensor_x3[0]))\n self.assertTrue(torch.equal(y3, tensor_y3[0]))\n self.assertTrue(torch.equal(z3, tensor_z3[0]))\n self.assertEqual(2, t3)\n\n self.assertTrue(torch.equal(x4, tensor_x4[0]))\n self.assertEqual(101, y4)\n self.assertTrue(torch.equal(z4, tensor_z4[0]))\n self.assertEqual(5, t4)\n\n self.assertTrue(torch.equal(x5, tensor_x5[0]))\n self.assertEqual(101, y5)\n self.assertTrue(torch.equal(z5, tensor_z5[0]))\n self.assertEqual(5, t5)\n\n self.assertTrue(torch.equal(x6, tensor_x6[0]))\n self.assertEqual(102, y6)\n self.assertTrue(torch.equal(z6, tensor_z6[0]))\n self.assertEqual(0, t6)\n\n self.assertTrue(torch.equal(x7, tensor_x7[0]))\n self.assertEqual(102, y7)\n self.assertTrue(torch.equal(z7, tensor_z7[0]))\n self.assertEqual(0, t7)\n\n def test_avalanche_pytorch_subset_recursion(self):\n dataset_mnist = MNIST(\n root=expanduser(\"~\") + \"/.avalanche/data/mnist/\", download=True\n )\n x, y = dataset_mnist[3000]\n x2, y2 = dataset_mnist[1010]\n\n subset = Subset(dataset_mnist, indices=[3000, 8, 4, 1010, 12])\n\n dataset = AvalancheSubset(subset, indices=[0, 3])\n\n self.assertEqual(5, len(subset))\n self.assertEqual(2, len(dataset))\n\n x3, y3, t3 = dataset[0]\n x4, y4, t4 = dataset[1]\n self.assertTrue(pil_images_equal(x, x3))\n self.assertEqual(y, y3)\n self.assertEqual(0, t3)\n self.assertTrue(pil_images_equal(x2, x4))\n self.assertEqual(y2, y4)\n self.assertEqual(0, t4)\n self.assertFalse(pil_images_equal(x, x4))\n self.assertFalse(pil_images_equal(x2, x3))\n\n def transform_target_to_constant(ignored_target_value):\n return 101\n\n subset = Subset(dataset_mnist, indices=[3000, 8, 4, 1010, 12])\n\n dataset = AvalancheSubset(\n subset,\n indices=[0, 3],\n target_transform=transform_target_to_constant,\n task_labels=5,\n )\n\n self.assertEqual(5, len(subset))\n self.assertEqual(2, len(dataset))\n\n x5, y5, t5 = dataset[0]\n x6, y6, t6 = dataset[1]\n self.assertTrue(pil_images_equal(x, x5))\n self.assertEqual(101, y5)\n self.assertEqual(5, t5)\n self.assertTrue(pil_images_equal(x2, x6))\n self.assertEqual(101, y6)\n self.assertEqual(5, t6)\n self.assertFalse(pil_images_equal(x, x6))\n self.assertFalse(pil_images_equal(x2, x5))\n\n def test_avalanche_pytorch_subset_recursion_no_indices(self):\n dataset_mnist = MNIST(\n root=expanduser(\"~\") + \"/.avalanche/data/mnist/\", download=True\n )\n x, y = dataset_mnist[3000]\n x2, y2 = dataset_mnist[8]\n\n subset = Subset(dataset_mnist, indices=[3000, 8, 4, 1010, 12])\n\n dataset = AvalancheSubset(subset)\n\n self.assertEqual(5, len(subset))\n self.assertEqual(5, len(dataset))\n\n x3, y3, t3 = dataset[0]\n x4, y4, t4 = dataset[1]\n self.assertTrue(pil_images_equal(x, x3))\n self.assertEqual(y, y3)\n self.assertTrue(pil_images_equal(x2, x4))\n self.assertEqual(y2, y4)\n self.assertFalse(pil_images_equal(x, x4))\n self.assertFalse(pil_images_equal(x2, x3))\n\n def test_avalanche_avalanche_subset_recursion_no_indices_transform(self):\n dataset_mnist = MNIST(\n root=expanduser(\"~\") + \"/.avalanche/data/mnist/\", download=True\n )\n x, y = dataset_mnist[3000]\n x2, y2 = dataset_mnist[8]\n\n def transform_target_to_constant(ignored_target_value):\n return 101\n\n def transform_target_plus_one(target_value):\n return target_value + 1\n\n subset = AvalancheSubset(\n dataset_mnist,\n indices=[3000, 8, 4, 1010, 12],\n transform=ToTensor(),\n target_transform=transform_target_to_constant,\n )\n\n dataset = AvalancheSubset(\n subset, target_transform=transform_target_plus_one\n )\n\n self.assertEqual(5, len(subset))\n self.assertEqual(5, len(dataset))\n\n x3, y3, t3 = dataset[0]\n x4, y4, t4 = dataset[1]\n self.assertIsInstance(x3, Tensor)\n self.assertIsInstance(x4, Tensor)\n self.assertTrue(torch.equal(ToTensor()(x), x3))\n self.assertEqual(102, y3)\n self.assertTrue(torch.equal(ToTensor()(x2), x4))\n self.assertEqual(102, y4)\n self.assertFalse(torch.equal(ToTensor()(x), x4))\n self.assertFalse(torch.equal(ToTensor()(x2), x3))\n\n def test_avalanche_avalanche_subset_recursion_transform(self):\n dataset_mnist = MNIST(\n root=default_dataset_location(\"mnist\"), download=True\n )\n x, y = dataset_mnist[3000]\n x2, y2 = dataset_mnist[1010]\n\n def transform_target_to_constant(ignored_target_value):\n return 101\n\n def transform_target_plus_one(target_value):\n return target_value + 2\n\n subset = AvalancheSubset(\n dataset_mnist,\n indices=[3000, 8, 4, 1010, 12],\n target_transform=transform_target_to_constant,\n )\n\n dataset = AvalancheSubset(\n subset,\n indices=[0, 3, 1],\n target_transform=transform_target_plus_one,\n )\n\n self.assertEqual(5, len(subset))\n self.assertEqual(3, len(dataset))\n\n x3, y3, t3 = dataset[0]\n x4, y4, t4 = dataset[1]\n\n self.assertTrue(pil_images_equal(x, x3))\n self.assertEqual(103, y3)\n self.assertTrue(pil_images_equal(x2, x4))\n self.assertEqual(103, y4)\n self.assertFalse(pil_images_equal(x, x4))\n self.assertFalse(pil_images_equal(x2, x3))\n\n def test_avalanche_avalanche_subset_recursion_frozen_transform(self):\n dataset_mnist = MNIST(\n root=default_dataset_location(\"mnist\"), download=True\n )\n x, y = dataset_mnist[3000]\n x2, y2 = dataset_mnist[1010]\n\n def transform_target_to_constant(ignored_target_value):\n return 101\n\n def transform_target_plus_two(target_value):\n return target_value + 2\n\n subset = AvalancheSubset(\n dataset_mnist,\n indices=[3000, 8, 4, 1010, 12],\n target_transform=transform_target_to_constant,\n )\n subset = subset.freeze_transforms()\n\n dataset = AvalancheSubset(\n subset,\n indices=[0, 3, 1],\n target_transform=transform_target_plus_two,\n )\n\n self.assertEqual(5, len(subset))\n self.assertEqual(3, len(dataset))\n\n x3, y3, t3 = dataset[0]\n x4, y4, t4 = dataset[1]\n\n self.assertTrue(pil_images_equal(x, x3))\n self.assertEqual(103, y3)\n self.assertTrue(pil_images_equal(x2, x4))\n self.assertEqual(103, y4)\n self.assertFalse(pil_images_equal(x, x4))\n self.assertFalse(pil_images_equal(x2, x3))\n\n dataset = AvalancheSubset(\n subset,\n indices=[0, 3, 1],\n target_transform=transform_target_plus_two,\n )\n dataset = dataset.replace_transforms(None, None)\n\n x5, y5, t5 = dataset[0]\n x6, y6, t6 = dataset[1]\n\n self.assertTrue(pil_images_equal(x, x5))\n self.assertEqual(101, y5)\n self.assertTrue(pil_images_equal(x2, x6))\n self.assertEqual(101, y6)\n self.assertFalse(pil_images_equal(x, x6))\n self.assertFalse(pil_images_equal(x2, x5))\n\n def test_avalanche_avalanche_subset_recursion_modified_transforms(self):\n dataset_mnist = MNIST(\n root=default_dataset_location(\"mnist\"), download=True\n )\n x, y = dataset_mnist[3000]\n x2, y2 = dataset_mnist[1010]\n\n def transform_target_to_constant(ignored_target_value):\n return 101\n\n def transform_target_to_constant2(ignored_target_value):\n return 102\n\n def transform_target_plus_two(target_value):\n return target_value + 2\n\n subset = AvalancheSubset(\n dataset_mnist,\n indices=[3000, 8, 4, 1010, 12],\n target_transform=transform_target_to_constant,\n )\n subset.target_transform = transform_target_to_constant2\n\n dataset = AvalancheSubset(\n subset,\n indices=[0, 3, 1],\n target_transform=transform_target_plus_two,\n )\n\n self.assertEqual(5, len(subset))\n self.assertEqual(3, len(dataset))\n\n x3, y3, t3 = dataset[0]\n x4, y4, t4 = dataset[1]\n\n self.assertTrue(pil_images_equal(x, x3))\n self.assertEqual(104, y3)\n self.assertTrue(pil_images_equal(x2, x4))\n self.assertEqual(104, y4)\n self.assertFalse(pil_images_equal(x, x4))\n self.assertFalse(pil_images_equal(x2, x3))\n\n def test_avalanche_avalanche_subset_recursion_sub_class_mapping(self):\n dataset_mnist = MNIST(\n root=default_dataset_location(\"mnist\"), download=True\n )\n x, y = dataset_mnist[3000]\n x2, y2 = dataset_mnist[1010]\n\n class_mapping = list(range(10))\n random.shuffle(class_mapping)\n\n subset = AvalancheSubset(\n dataset_mnist,\n indices=[3000, 8, 4, 1010, 12],\n class_mapping=class_mapping,\n )\n\n dataset = AvalancheSubset(subset, indices=[0, 3, 1])\n\n self.assertEqual(5, len(subset))\n self.assertEqual(3, len(dataset))\n\n x3, y3, t3 = dataset[0]\n x4, y4, t4 = dataset[1]\n\n self.assertTrue(pil_images_equal(x, x3))\n expected_y3 = class_mapping[y]\n self.assertEqual(expected_y3, y3)\n self.assertTrue(pil_images_equal(x2, x4))\n expected_y4 = class_mapping[y2]\n self.assertEqual(expected_y4, y4)\n self.assertFalse(pil_images_equal(x, x4))\n self.assertFalse(pil_images_equal(x2, x3))\n\n def test_avalanche_avalanche_subset_recursion_up_class_mapping(self):\n dataset_mnist = MNIST(\n root=default_dataset_location(\"mnist\"), download=True\n )\n x, y = dataset_mnist[3000]\n x2, y2 = dataset_mnist[1010]\n\n class_mapping = list(range(10))\n random.shuffle(class_mapping)\n\n subset = AvalancheSubset(dataset_mnist, indices=[3000, 8, 4, 1010, 12])\n\n dataset = AvalancheSubset(\n subset, indices=[0, 3, 1], class_mapping=class_mapping\n )\n\n self.assertEqual(5, len(subset))\n self.assertEqual(3, len(dataset))\n\n x3, y3, t3 = dataset[0]\n x4, y4, t4 = dataset[1]\n\n self.assertTrue(pil_images_equal(x, x3))\n expected_y3 = class_mapping[y]\n self.assertEqual(expected_y3, y3)\n self.assertTrue(pil_images_equal(x2, x4))\n expected_y4 = class_mapping[y2]\n self.assertEqual(expected_y4, y4)\n self.assertFalse(pil_images_equal(x, x4))\n self.assertFalse(pil_images_equal(x2, x3))\n\n def test_avalanche_avalanche_subset_recursion_mix_class_mapping(self):\n dataset_mnist = MNIST(\n root=default_dataset_location(\"mnist\"), download=True\n )\n x, y = dataset_mnist[3000]\n x2, y2 = dataset_mnist[1010]\n\n class_mapping = list(range(10))\n class_mapping2 = list(range(10))\n random.shuffle(class_mapping)\n random.shuffle(class_mapping2)\n\n subset = AvalancheSubset(\n dataset_mnist,\n indices=[3000, 8, 4, 1010, 12],\n class_mapping=class_mapping,\n )\n\n dataset = AvalancheSubset(\n subset, indices=[0, 3, 1], class_mapping=class_mapping2\n )\n\n self.assertEqual(5, len(subset))\n self.assertEqual(3, len(dataset))\n\n x3, y3, t3 = dataset[0]\n x4, y4, t4 = dataset[1]\n\n self.assertTrue(pil_images_equal(x, x3))\n expected_y3 = class_mapping2[class_mapping[y]]\n self.assertEqual(expected_y3, y3)\n self.assertTrue(pil_images_equal(x2, x4))\n expected_y4 = class_mapping2[class_mapping[y2]]\n self.assertEqual(expected_y4, y4)\n self.assertFalse(pil_images_equal(x, x4))\n self.assertFalse(pil_images_equal(x2, x3))\n\n def test_avalanche_avalanche_subset_concat_stack_overflow(self):\n d_sz = 25\n tensor_x = torch.rand(d_sz, 3, 28, 28)\n tensor_y = torch.randint(0, 10, (d_sz,))\n tensor_t = torch.randint(0, 10, (d_sz,))\n dataset = AvalancheTensorDataset(\n tensor_x, tensor_y, task_labels=tensor_t\n )\n dataset_hierarchy_depth = 500\n\n rolling_indices: List[List[int]] = []\n expect_indices: List[List[int]] = []\n\n for _ in range(dataset_hierarchy_depth):\n idx_permuted = list(range(d_sz))\n random.shuffle(idx_permuted)\n rolling_indices.append(idx_permuted)\n\n forward_indices = range(d_sz)\n expect_indices.append(list(forward_indices))\n for idx in range(dataset_hierarchy_depth):\n forward_indices = [forward_indices[x] for x in rolling_indices[idx]]\n expect_indices.append(forward_indices)\n\n expect_indices = list(reversed(expect_indices))\n\n leaf = dataset\n\n for idx in range(dataset_hierarchy_depth):\n intermediate_idx_test = (dataset_hierarchy_depth - 1) - idx\n subset = AvalancheSubset(leaf, indices=rolling_indices[idx])\n leaf = AvalancheConcatDataset((subset, leaf))\n\n # Regression test for #616 (second bug)\n # https://github.com/ContinualAI/avalanche/issues/616#issuecomment-848852287\n all_targets = []\n for c_dataset in leaf._dataset_list:\n all_targets += c_dataset.targets\n\n all_targets = torch.tensor(all_targets)\n\n for idx_internal in range(idx + 1):\n leaf_range = range(\n idx_internal * d_sz, (idx_internal + 1) * d_sz\n )\n permuted = expect_indices[idx_internal + intermediate_idx_test]\n self.assertTrue(\n torch.equal(tensor_y[permuted], all_targets[leaf_range])\n )\n\n self.assertTrue(torch.equal(tensor_y, all_targets[-d_sz:]))\n\n self.assertEqual(d_sz * dataset_hierarchy_depth + d_sz, len(leaf))\n\n for idx in range(dataset_hierarchy_depth):\n leaf_range = range(idx * d_sz, (idx + 1) * d_sz)\n permuted = expect_indices[idx]\n self.assertTrue(\n torch.equal(tensor_x[permuted], leaf[leaf_range][0])\n )\n self.assertTrue(\n torch.equal(tensor_y[permuted], leaf[leaf_range][1])\n )\n self.assertTrue(\n torch.equal(\n tensor_y[permuted], torch.tensor(leaf.targets)[leaf_range]\n )\n )\n self.assertTrue(\n torch.equal(tensor_t[permuted], leaf[leaf_range][2])\n )\n\n self.assertTrue(\n torch.equal(tensor_x, leaf[d_sz * dataset_hierarchy_depth :][0])\n )\n self.assertTrue(\n torch.equal(tensor_y, leaf[d_sz * dataset_hierarchy_depth :][1])\n )\n self.assertTrue(\n torch.equal(\n tensor_y,\n torch.tensor(leaf.targets)[d_sz * dataset_hierarchy_depth :],\n )\n )\n self.assertTrue(\n torch.equal(tensor_t, leaf[d_sz * dataset_hierarchy_depth :][2])\n )\n\n def test_avalanche_concat_datasets_sequentially(self):\n # create list of training datasets\n train = [\n AvalancheDataset(\n TensorDataset(torch.randn(20, 10), torch.randint(0, 2, (20,)))\n ),\n AvalancheDataset(\n TensorDataset(torch.randn(20, 10), torch.randint(2, 4, (20,)))\n ),\n AvalancheDataset(\n TensorDataset(torch.randn(20, 10), torch.randint(4, 6, (20,)))\n ),\n AvalancheDataset(\n TensorDataset(torch.randn(20, 10), torch.randint(0, 2, (20,)))\n ),\n ]\n\n # create list of test datasets\n test = [\n AvalancheDataset(\n TensorDataset(torch.randn(20, 10), torch.randint(0, 2, (20,)))\n ),\n AvalancheDataset(\n TensorDataset(torch.randn(20, 10), torch.randint(2, 4, (20,)))\n ),\n AvalancheDataset(\n TensorDataset(torch.randn(20, 10), torch.randint(4, 6, (20,)))\n ),\n AvalancheDataset(\n TensorDataset(torch.randn(20, 10), torch.randint(0, 2, (20,)))\n ),\n ]\n\n # concatenate datasets\n final_train, _, classes = concat_datasets_sequentially(train, test)\n\n # merge all classes into a single list\n classes_all = []\n for class_list in classes:\n classes_all.extend(class_list)\n\n # get the target set of classes\n target_classes = list(set(map(int, final_train.targets)))\n\n # test for correctness\n self.assertEqual(classes_all, target_classes)\n\n\nclass TransformationSubsetTests(unittest.TestCase):\n def test_avalanche_subset_transform(self):\n dataset_mnist = MNIST(\n root=default_dataset_location(\"mnist\"), download=True\n )\n x, y = dataset_mnist[0]\n dataset = AvalancheSubset(dataset_mnist, transform=ToTensor())\n x2, y2, t2 = dataset[0]\n self.assertIsInstance(x2, Tensor)\n self.assertIsInstance(y2, int)\n self.assertIsInstance(t2, int)\n self.assertTrue(torch.equal(ToTensor()(x), x2))\n self.assertEqual(y, y2)\n self.assertEqual(0, t2)\n\n def test_avalanche_subset_composition(self):\n dataset_mnist = MNIST(\n root=default_dataset_location(\"mnist\"),\n download=True,\n transform=RandomCrop(16),\n )\n x, y = dataset_mnist[0]\n self.assertIsInstance(x, Image)\n self.assertEqual([x.width, x.height], [16, 16])\n self.assertIsInstance(y, int)\n\n dataset = AvalancheSubset(\n dataset_mnist,\n transform=ToTensor(),\n target_transform=lambda target: -1,\n )\n\n x2, y2, t2 = dataset[0]\n self.assertIsInstance(x2, Tensor)\n self.assertEqual(x2.shape, (1, 16, 16))\n self.assertIsInstance(y2, int)\n self.assertIsInstance(t2, int)\n self.assertEqual(y2, -1)\n self.assertEqual(0, t2)\n\n def test_avalanche_subset_indices(self):\n dataset_mnist = MNIST(\n root=default_dataset_location(\"mnist\"), download=True\n )\n x, y = dataset_mnist[1000]\n x2, y2 = dataset_mnist[1007]\n\n dataset = AvalancheSubset(dataset_mnist, indices=[1000, 1007])\n\n x3, y3, t3 = dataset[0]\n x4, y4, t4 = dataset[1]\n self.assertTrue(pil_images_equal(x, x3))\n self.assertEqual(y, y3)\n self.assertTrue(pil_images_equal(x2, x4))\n self.assertEqual(y2, y4)\n self.assertFalse(pil_images_equal(x, x4))\n self.assertFalse(pil_images_equal(x2, x3))\n\n def test_avalanche_subset_mapping(self):\n dataset_mnist = MNIST(\n root=default_dataset_location(\"mnist\"), download=True\n )\n _, y = dataset_mnist[1000]\n\n mapping = list(range(10))\n other_classes = list(mapping)\n other_classes.remove(y)\n\n swap_y = random.choice(other_classes)\n\n mapping[y] = swap_y\n mapping[swap_y] = y\n\n dataset = AvalancheSubset(dataset_mnist, class_mapping=mapping)\n\n _, y2, _ = dataset[1000]\n self.assertEqual(y2, swap_y)\n\n def test_avalanche_subset_uniform_task_labels(self):\n dataset_mnist = MNIST(\n root=default_dataset_location(\"mnist\"), download=True\n )\n x, y = dataset_mnist[1000]\n x2, y2 = dataset_mnist[1007]\n\n # First, test by passing len(task_labels) == len(dataset_mnist)\n dataset = AvalancheSubset(\n dataset_mnist,\n indices=[1000, 1007],\n task_labels=[1] * len(dataset_mnist),\n )\n\n x3, y3, t3 = dataset[0]\n x4, y4, t4 = dataset[1]\n self.assertEqual(y, y3)\n self.assertEqual(1, t3)\n self.assertEqual(y2, y4)\n self.assertEqual(1, t4)\n\n # Secondly, test by passing len(task_labels) == len(indices)\n dataset = AvalancheSubset(\n dataset_mnist, indices=[1000, 1007], task_labels=[1, 1]\n )\n\n x3, y3, t3 = dataset[0]\n x4, y4, t4 = dataset[1]\n self.assertEqual(y, y3)\n self.assertEqual(1, t3)\n self.assertEqual(y2, y4)\n self.assertEqual(1, t4)\n\n def test_avalanche_subset_mixed_task_labels(self):\n dataset_mnist = MNIST(\n root=default_dataset_location(\"mnist\"), download=True\n )\n x, y = dataset_mnist[1000]\n x2, y2 = dataset_mnist[1007]\n\n full_task_labels = [1] * len(dataset_mnist)\n full_task_labels[1000] = 2\n # First, test by passing len(task_labels) == len(dataset_mnist)\n dataset = AvalancheSubset(\n dataset_mnist, indices=[1000, 1007], task_labels=full_task_labels\n )\n\n x3, y3, t3 = dataset[0]\n x4, y4, t4 = dataset[1]\n self.assertEqual(y, y3)\n self.assertEqual(2, t3)\n self.assertEqual(y2, y4)\n self.assertEqual(1, t4)\n\n # Secondly, test by passing len(task_labels) == len(indices)\n dataset = AvalancheSubset(\n dataset_mnist, indices=[1000, 1007], task_labels=[3, 5]\n )\n\n x3, y3, t3 = dataset[0]\n x4, y4, t4 = dataset[1]\n self.assertEqual(y, y3)\n self.assertEqual(3, t3)\n self.assertEqual(y2, y4)\n self.assertEqual(5, t4)\n\n def test_avalanche_subset_task_labels_inheritance(self):\n dataset_mnist = MNIST(\n root=default_dataset_location(\"mnist\"), download=True\n )\n random_task_labels = [\n random.randint(0, 10) for _ in range(len(dataset_mnist))\n ]\n dataset_orig = AvalancheDataset(\n dataset_mnist, transform=ToTensor(), task_labels=random_task_labels\n )\n\n dataset_child = AvalancheSubset(dataset_orig, indices=[1000, 1007])\n _, _, t2 = dataset_orig[1000]\n _, _, t5 = dataset_orig[1007]\n _, _, t3 = dataset_child[0]\n _, _, t6 = dataset_child[1]\n\n self.assertEqual(random_task_labels[1000], t2)\n self.assertEqual(random_task_labels[1007], t5)\n self.assertEqual(random_task_labels[1000], t3)\n self.assertEqual(random_task_labels[1007], t6)\n\n self.assertListEqual(\n random_task_labels, list(dataset_orig.targets_task_labels)\n )\n\n self.assertListEqual(\n [random_task_labels[1000], random_task_labels[1007]],\n list(dataset_child.targets_task_labels),\n )\n\n def test_avalanche_subset_collate_fn_inheritance(self):\n tensor_x = torch.rand(200, 3, 28, 28)\n tensor_y = torch.randint(0, 100, (200,))\n tensor_z = torch.randint(0, 100, (200,))\n\n def my_collate_fn(patterns):\n x_values = torch.stack([pat[0] for pat in patterns], 0)\n y_values = torch.tensor([pat[1] for pat in patterns]) + 1\n z_values = torch.tensor([-1 for _ in patterns])\n t_values = torch.tensor([pat[3] for pat in patterns])\n return x_values, y_values, z_values, t_values\n\n def my_collate_fn2(patterns):\n x_values = torch.stack([pat[0] for pat in patterns], 0)\n y_values = torch.tensor([pat[1] for pat in patterns]) + 2\n z_values = torch.tensor([-2 for _ in patterns])\n t_values = torch.tensor([pat[3] for pat in patterns])\n return x_values, y_values, z_values, t_values\n\n whole_dataset = TensorDataset(tensor_x, tensor_y, tensor_z)\n dataset = AvalancheDataset(whole_dataset, collate_fn=my_collate_fn)\n inherited = AvalancheSubset(\n dataset, indices=list(range(5, 150)), collate_fn=my_collate_fn2\n ) # Ok\n\n x, y, z, t = inherited[0:5]\n self.assertIsInstance(x, Tensor)\n self.assertTrue(torch.equal(tensor_x[5:10], x))\n self.assertTrue(torch.equal(tensor_y[5:10] + 2, y))\n self.assertTrue(torch.equal(torch.full((5,), -2, dtype=torch.long), z))\n self.assertTrue(torch.equal(torch.zeros(5, dtype=torch.long), t))\n\n classification_dataset = AvalancheDataset(\n whole_dataset, dataset_type=AvalancheDatasetType.CLASSIFICATION\n )\n\n with self.assertRaises(ValueError):\n bad_inherited = AvalancheSubset(\n classification_dataset,\n indices=list(range(5, 150)),\n collate_fn=my_collate_fn,\n )\n ok_inherited_classification = AvalancheSubset(\n classification_dataset, indices=list(range(5, 150))\n )\n self.assertEqual(\n AvalancheDatasetType.CLASSIFICATION,\n ok_inherited_classification.dataset_type,\n )\n\n\nclass TransformationTensorDatasetTests(unittest.TestCase):\n def test_tensor_dataset_helper_tensor_y(self):\n\n train_exps = [\n [torch.rand(50, 32, 32), torch.randint(0, 100, (50,))]\n for _ in range(5)\n ]\n test_exps = [\n [torch.rand(23, 32, 32), torch.randint(0, 100, (23,))]\n for _ in range(5)\n ]\n\n cl_benchmark = create_generic_benchmark_from_tensor_lists(\n train_tensors=train_exps,\n test_tensors=test_exps,\n task_labels=[0] * 5,\n )\n\n self.assertEqual(5, len(cl_benchmark.train_stream))\n self.assertEqual(5, len(cl_benchmark.test_stream))\n self.assertEqual(5, cl_benchmark.n_experiences)\n\n for exp_id in range(cl_benchmark.n_experiences):\n benchmark_train_x, benchmark_train_y, _ = load_all_dataset(\n cl_benchmark.train_stream[exp_id].dataset\n )\n benchmark_test_x, benchmark_test_y, _ = load_all_dataset(\n cl_benchmark.test_stream[exp_id].dataset\n )\n\n self.assertTrue(\n torch.all(torch.eq(train_exps[exp_id][0], benchmark_train_x))\n )\n self.assertTrue(\n torch.all(torch.eq(train_exps[exp_id][1], benchmark_train_y))\n )\n self.assertSequenceEqual(\n train_exps[exp_id][1].tolist(),\n cl_benchmark.train_stream[exp_id].dataset.targets,\n )\n self.assertEqual(0, cl_benchmark.train_stream[exp_id].task_label)\n\n self.assertTrue(\n torch.all(torch.eq(test_exps[exp_id][0], benchmark_test_x))\n )\n self.assertTrue(\n torch.all(torch.eq(test_exps[exp_id][1], benchmark_test_y))\n )\n self.assertSequenceEqual(\n test_exps[exp_id][1].tolist(),\n cl_benchmark.test_stream[exp_id].dataset.targets,\n )\n self.assertEqual(0, cl_benchmark.test_stream[exp_id].task_label)\n\n def test_tensor_dataset_helper_list_y(self):\n train_exps = [\n (torch.rand(50, 32, 32), torch.randint(0, 100, (50,)).tolist())\n for _ in range(5)\n ]\n test_exps = [\n (torch.rand(23, 32, 32), torch.randint(0, 100, (23,)).tolist())\n for _ in range(5)\n ]\n\n cl_benchmark = create_generic_benchmark_from_tensor_lists(\n train_tensors=train_exps,\n test_tensors=test_exps,\n task_labels=[0] * 5,\n )\n\n self.assertEqual(5, len(cl_benchmark.train_stream))\n self.assertEqual(5, len(cl_benchmark.test_stream))\n self.assertEqual(5, cl_benchmark.n_experiences)\n\n for exp_id in range(cl_benchmark.n_experiences):\n benchmark_train_x, benchmark_train_y, _ = load_all_dataset(\n cl_benchmark.train_stream[exp_id].dataset\n )\n benchmark_test_x, benchmark_test_y, _ = load_all_dataset(\n cl_benchmark.test_stream[exp_id].dataset\n )\n\n self.assertTrue(\n torch.all(torch.eq(train_exps[exp_id][0], benchmark_train_x))\n )\n self.assertSequenceEqual(\n train_exps[exp_id][1], benchmark_train_y.tolist()\n )\n self.assertSequenceEqual(\n train_exps[exp_id][1],\n cl_benchmark.train_stream[exp_id].dataset.targets,\n )\n self.assertEqual(0, cl_benchmark.train_stream[exp_id].task_label)\n\n self.assertTrue(\n torch.all(torch.eq(test_exps[exp_id][0], benchmark_test_x))\n )\n self.assertSequenceEqual(\n test_exps[exp_id][1], benchmark_test_y.tolist()\n )\n self.assertSequenceEqual(\n test_exps[exp_id][1],\n cl_benchmark.test_stream[exp_id].dataset.targets,\n )\n self.assertEqual(0, cl_benchmark.test_stream[exp_id].task_label)\n\n\nclass AvalancheDatasetTransformOpsTests(unittest.TestCase):\n def test_avalanche_inherit_groups(self):\n original_dataset = MNIST(\n root=default_dataset_location(\"mnist\"), download=True\n )\n\n def plus_one_target(target):\n return target + 1\n\n transform_groups = dict(\n train=(ToTensor(), None), eval=(None, plus_one_target)\n )\n x, y = original_dataset[0]\n dataset = AvalancheDataset(\n original_dataset, transform_groups=transform_groups\n )\n\n x2, y2, _ = dataset[0]\n self.assertIsInstance(x2, Tensor)\n self.assertIsInstance(y2, int)\n self.assertTrue(torch.equal(ToTensor()(x), x2))\n self.assertEqual(y, y2)\n\n dataset_eval = dataset.eval()\n\n x3, y3, _ = dataset_eval[0]\n self.assertIsInstance(x3, PIL.Image.Image)\n self.assertIsInstance(y3, int)\n self.assertEqual(y + 1, y3)\n\n # Regression test for #565\n dataset_inherit = AvalancheDataset(dataset_eval)\n\n x4, y4, _ = dataset_inherit[0]\n self.assertIsInstance(x4, PIL.Image.Image)\n self.assertIsInstance(y4, int)\n self.assertEqual(y + 1, y4)\n\n # Regression test for #566\n dataset_sub_train = AvalancheSubset(dataset)\n dataset_sub_eval = dataset_sub_train.eval()\n dataset_sub = AvalancheSubset(dataset_sub_eval, indices=[0])\n\n x5, y5, _ = dataset_sub[0]\n self.assertIsInstance(x5, PIL.Image.Image)\n self.assertIsInstance(y5, int)\n self.assertEqual(y + 1, y5)\n # End regression tests\n\n concat_dataset = AvalancheConcatDataset([dataset_sub_eval, dataset_sub])\n\n x6, y6, _ = concat_dataset[0]\n self.assertIsInstance(x6, PIL.Image.Image)\n self.assertIsInstance(y6, int)\n self.assertEqual(y + 1, y6)\n\n concat_dataset_no_inherit_initial = AvalancheConcatDataset(\n [dataset_sub_eval, dataset]\n )\n\n x7, y7, _ = concat_dataset_no_inherit_initial[0]\n self.assertIsInstance(x7, Tensor)\n self.assertIsInstance(y7, int)\n self.assertEqual(y, y7)\n\n def test_freeze_transforms(self):\n original_dataset = MNIST(\n root=default_dataset_location(\"mnist\"), download=True\n )\n x, y = original_dataset[0]\n dataset = AvalancheDataset(original_dataset, transform=ToTensor())\n dataset_frozen = dataset.freeze_transforms()\n dataset_frozen.transform = None\n\n x2, y2, _ = dataset_frozen[0]\n self.assertIsInstance(x2, Tensor)\n self.assertIsInstance(y2, int)\n self.assertTrue(torch.equal(ToTensor()(x), x2))\n self.assertEqual(y, y2)\n\n dataset.transform = None\n x2, y2, _ = dataset[0]\n self.assertIsInstance(x2, Image)\n\n x2, y2, _ = dataset_frozen[0]\n self.assertIsInstance(x2, Tensor)\n\n def test_freeze_transforms_chain(self):\n original_dataset = MNIST(\n root=default_dataset_location(\"mnist\"),\n download=True,\n transform=ToTensor(),\n )\n x, *_ = original_dataset[0]\n self.assertIsInstance(x, Tensor)\n\n dataset_transform = AvalancheDataset(\n original_dataset, transform=ToPILImage()\n )\n x, *_ = dataset_transform[0]\n self.assertIsInstance(x, Image)\n\n dataset_frozen = dataset_transform.freeze_transforms()\n\n x2, *_ = dataset_frozen[0]\n self.assertIsInstance(x2, Image)\n\n dataset_transform.transform = None\n\n x2, *_ = dataset_transform[0]\n self.assertIsInstance(x2, Tensor)\n\n dataset_frozen.transform = ToTensor()\n\n x2, *_ = dataset_frozen[0]\n self.assertIsInstance(x2, Tensor)\n\n dataset_frozen2 = dataset_frozen.freeze_transforms()\n\n x2, *_ = dataset_frozen2[0]\n self.assertIsInstance(x2, Tensor)\n\n dataset_frozen.transform = None\n\n x2, *_ = dataset_frozen2[0]\n self.assertIsInstance(x2, Tensor)\n x2, *_ = dataset_frozen[0]\n self.assertIsInstance(x2, Image)\n\n def test_add_transforms(self):\n original_dataset = MNIST(\n root=default_dataset_location(\"mnist\"), download=True\n )\n x, _ = original_dataset[0]\n dataset = AvalancheDataset(original_dataset, transform=ToTensor())\n dataset_added = dataset.add_transforms(ToPILImage())\n x2, *_ = dataset[0]\n x3, *_ = dataset_added[0]\n self.assertIsInstance(x, Image)\n self.assertIsInstance(x2, Tensor)\n self.assertIsInstance(x3, Image)\n\n def test_add_transforms_chain(self):\n original_dataset = MNIST(\n root=default_dataset_location(\"mnist\"), download=True\n )\n x, _ = original_dataset[0]\n dataset = AvalancheDataset(original_dataset, transform=ToTensor())\n dataset_added = AvalancheDataset(dataset, transform=ToPILImage())\n x2, *_ = dataset[0]\n x3, *_ = dataset_added[0]\n self.assertIsInstance(x, Image)\n self.assertIsInstance(x2, Tensor)\n self.assertIsInstance(x3, Image)\n\n def test_transforms_freeze_add_mix(self):\n original_dataset = MNIST(\n root=default_dataset_location(\"mnist\"), download=True\n )\n x, _ = original_dataset[0]\n dataset = AvalancheDataset(original_dataset, transform=ToTensor())\n dataset_frozen = dataset.freeze_transforms()\n dataset_added = dataset_frozen.add_transforms(ToPILImage())\n\n self.assertEqual(None, dataset_frozen.transform)\n\n x2, *_ = dataset[0]\n x3, *_ = dataset_added[0]\n self.assertIsInstance(x, Image)\n self.assertIsInstance(x2, Tensor)\n self.assertIsInstance(x3, Image)\n\n dataset_frozen = dataset_added.freeze_transforms()\n dataset_added.transform = None\n\n x4, *_ = dataset_frozen[0]\n x5, *_ = dataset_added[0]\n self.assertIsInstance(x4, Image)\n self.assertIsInstance(x5, Tensor)\n\n def test_replace_transforms(self):\n original_dataset = MNIST(\n root=default_dataset_location(\"mnist\"), download=True\n )\n x, y = original_dataset[0]\n dataset = AvalancheDataset(original_dataset, transform=ToTensor())\n x2, *_ = dataset[0]\n dataset_reset = dataset.replace_transforms(None, None)\n x3, *_ = dataset_reset[0]\n\n self.assertIsInstance(x, Image)\n self.assertIsInstance(x2, Tensor)\n self.assertIsInstance(x3, Image)\n\n dataset_reset.transform = ToTensor()\n\n x4, *_ = dataset_reset[0]\n self.assertIsInstance(x4, Tensor)\n\n dataset_reset.replace_transforms(None, None)\n\n x5, *_ = dataset_reset[0]\n self.assertIsInstance(x5, Tensor)\n\n dataset_other = AvalancheDataset(dataset_reset)\n dataset_other = dataset_other.replace_transforms(None, lambda l: l + 1)\n\n _, y6, _ = dataset_other[0]\n self.assertEqual(y + 1, y6)\n\n def test_transforms_replace_freeze_mix(self):\n original_dataset = MNIST(\n root=default_dataset_location(\"mnist\"), download=True\n )\n x, _ = original_dataset[0]\n dataset = AvalancheDataset(original_dataset, transform=ToTensor())\n x2, *_ = dataset[0]\n dataset_reset = dataset.replace_transforms(None, None)\n x3, *_ = dataset_reset[0]\n\n self.assertIsInstance(x, Image)\n self.assertIsInstance(x2, Tensor)\n self.assertIsInstance(x3, Image)\n\n dataset_frozen = dataset.freeze_transforms()\n\n x4, *_ = dataset_frozen[0]\n self.assertIsInstance(x4, Tensor)\n\n dataset_frozen_reset = dataset_frozen.replace_transforms(None, None)\n\n x5, *_ = dataset_frozen_reset[0]\n self.assertIsInstance(x5, Tensor)\n\n def test_transforms_groups_base_usage(self):\n original_dataset = MNIST(\n root=default_dataset_location(\"mnist\"), download=True\n )\n dataset = AvalancheDataset(\n original_dataset,\n transform_groups=dict(\n train=(ToTensor(), None),\n eval=(None, Lambda(lambda t: float(t))),\n ),\n )\n\n x, y, _ = dataset[0]\n self.assertIsInstance(x, Tensor)\n self.assertIsInstance(y, int)\n\n dataset_test = dataset.eval()\n\n x2, y2, _ = dataset_test[0]\n x3, y3, _ = dataset[0]\n self.assertIsInstance(x2, Image)\n self.assertIsInstance(y2, float)\n self.assertIsInstance(x3, Tensor)\n self.assertIsInstance(y3, int)\n\n dataset_train = dataset.train()\n dataset.transform = None\n\n x4, y4, _ = dataset_train[0]\n x5, y5, _ = dataset[0]\n self.assertIsInstance(x4, Tensor)\n self.assertIsInstance(y4, int)\n self.assertIsInstance(x5, Image)\n self.assertIsInstance(y5, int)\n\n def test_transforms_groups_constructor_error(self):\n original_dataset = MNIST(\n root=default_dataset_location(\"mnist\"), download=True\n )\n with self.assertRaises(Exception):\n # Test tuple has only one element\n dataset = AvalancheDataset(\n original_dataset,\n transform_groups=dict(\n train=(ToTensor(), None), eval=(Lambda(lambda t: float(t)))\n ),\n )\n\n with self.assertRaises(Exception):\n # Test is not a tuple has only one element\n dataset = AvalancheDataset(\n original_dataset,\n transform_groups=dict(\n train=(ToTensor(), None),\n eval=[None, Lambda(lambda t: float(t))],\n ),\n )\n\n with self.assertRaises(Exception):\n # Train is None\n dataset = AvalancheDataset(\n original_dataset,\n transform_groups=dict(\n train=None, eval=(None, Lambda(lambda t: float(t)))\n ),\n )\n\n with self.assertRaises(Exception):\n # transform_groups is not a dictionary\n dataset = AvalancheDataset(\n original_dataset, transform_groups=\"Hello world!\"\n )\n\n def test_transforms_groups_alternative_default_group(self):\n original_dataset = MNIST(\n root=default_dataset_location(\"mnist\"), download=True\n )\n dataset = AvalancheDataset(\n original_dataset,\n transform_groups=dict(train=(ToTensor(), None), eval=(None, None)),\n initial_transform_group=\"eval\",\n )\n\n x, *_ = dataset[0]\n self.assertIsInstance(x, Image)\n\n dataset_test = dataset.eval()\n\n x2, *_ = dataset_test[0]\n x3, *_ = dataset[0]\n self.assertIsInstance(x2, Image)\n self.assertIsInstance(x3, Image)\n\n def test_transforms_groups_partial_constructor(self):\n original_dataset = MNIST(\n root=default_dataset_location(\"mnist\"), download=True\n )\n dataset = AvalancheDataset(\n original_dataset, transform_groups=dict(train=(ToTensor(), None))\n )\n\n x, *_ = dataset[0]\n self.assertIsInstance(x, Tensor)\n\n dataset = dataset.eval()\n x2, *_ = dataset[0]\n self.assertIsInstance(x2, Tensor)\n\n def test_transforms_groups_multiple_groups(self):\n original_dataset = MNIST(\n root=default_dataset_location(\"mnist\"), download=True\n )\n dataset = AvalancheDataset(\n original_dataset,\n transform_groups=dict(\n train=(ToTensor(), None),\n eval=(None, None),\n other=(\n Compose(\n [ToTensor(), Lambda(lambda tensor: tensor.numpy())]\n ),\n None,\n ),\n ),\n )\n\n x, *_ = dataset[0]\n self.assertIsInstance(x, Tensor)\n\n dataset = dataset.eval()\n x2, *_ = dataset[0]\n self.assertIsInstance(x2, Image)\n\n dataset = dataset.with_transforms(\"other\")\n x3, *_ = dataset[0]\n self.assertIsInstance(x3, np.ndarray)\n\n def test_transforms_add_group(self):\n original_dataset = MNIST(\n root=default_dataset_location(\"mnist\"), download=True\n )\n dataset = AvalancheDataset(original_dataset)\n\n with self.assertRaises(Exception):\n # Can't add existing groups\n dataset = dataset.add_transforms_group(\"train\", ToTensor(), None)\n\n with self.assertRaises(Exception):\n # Can't add group with bad names (must be str)\n dataset = dataset.add_transforms_group(123, ToTensor(), None)\n\n # Can't add group with bad names (must be str)\n dataset = dataset.add_transforms_group(\"other\", ToTensor(), None)\n dataset_other = dataset.with_transforms(\"other\")\n\n x, *_ = dataset[0]\n x2, *_ = dataset_other[0]\n self.assertIsInstance(x, Image)\n self.assertIsInstance(x2, Tensor)\n\n dataset_other2 = AvalancheDataset(dataset_other)\n\n # Checks that the other group is used on dataset_other2\n x3, *_ = dataset_other2[0]\n self.assertIsInstance(x3, Tensor)\n\n with self.assertRaises(Exception):\n # Can't add group if it already exists\n dataset_other2 = dataset_other2.add_transforms_group(\n \"other\", ToTensor(), None\n )\n\n # Check that the above failed method didn't change the 'other' group\n x4, *_ = dataset_other2[0]\n self.assertIsInstance(x4, Tensor)\n\n def test_transformation_concat_dataset(self):\n original_dataset = MNIST(\n root=default_dataset_location(\"mnist\"), download=True\n )\n original_dataset2 = MNIST(\n root=default_dataset_location(\"mnist\"), download=True\n )\n\n dataset = AvalancheConcatDataset([original_dataset, original_dataset2])\n\n self.assertEqual(\n len(original_dataset) + len(original_dataset2), len(dataset)\n )\n\n def test_transformation_concat_dataset_groups(self):\n original_dataset = AvalancheDataset(\n MNIST(root=default_dataset_location(\"mnist\"), download=True),\n transform_groups=dict(eval=(None, None), train=(ToTensor(), None)),\n )\n original_dataset2 = AvalancheDataset(\n MNIST(root=default_dataset_location(\"mnist\"), download=True),\n transform_groups=dict(train=(None, None), eval=(ToTensor(), None)),\n )\n\n dataset = AvalancheConcatDataset([original_dataset, original_dataset2])\n\n self.assertEqual(\n len(original_dataset) + len(original_dataset2), len(dataset)\n )\n\n x, *_ = dataset[0]\n x2, *_ = dataset[len(original_dataset)]\n self.assertIsInstance(x, Tensor)\n self.assertIsInstance(x2, Image)\n\n dataset = dataset.eval()\n\n x3, *_ = dataset[0]\n x4, *_ = dataset[len(original_dataset)]\n self.assertIsInstance(x3, Image)\n self.assertIsInstance(x4, Tensor)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n" ]
[ [ "torch.utils.data.ConcatDataset", "torch.cat", "torch.stack", "torch.ones", "torch.nn.CrossEntropyLoss", "torch.randint", "torch.tensor", "torch.as_tensor", "torch.equal", "torch.zeros", "torch.min", "torch.max", "torch.full", "torch.utils.data.TensorDataset", "torch.rand", "torch.eq", "torch.utils.data.Subset", "torch.randn", "numpy.unique" ] ]
luiscameroo/soccer-matlab
[ "e6b0a0f722bda30b4b1c6298998508653be318e8" ]
[ "soccer-rl/pybullet/gym/pybullet_envs/ARS/shared_noise.py" ]
[ "\"\"\"\nCode in this file is copied and adapted from\nhttps://github.com/ray-project/ray/tree/master/python/ray/rllib/es\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nimport numpy as np\n\n\ndef create_shared_noise():\n \"\"\"\n Create a large array of noise to be shared by all workers. Used\n for avoiding the communication of the random perturbations delta.\n \"\"\"\n\n seed = 12345\n count = 250000000\n noise = np.random.RandomState(seed).randn(count).astype(np.float64)\n return noise\n\n\nclass SharedNoiseTable(object):\n def __init__(self, noise, seed = 11):\n\n self.rg = np.random.RandomState(seed)\n self.noise = noise\n assert self.noise.dtype == np.float64\n\n def get(self, i, dim):\n return self.noise[i:i + dim]\n\n def sample_index(self, dim):\n return self.rg.randint(0, len(self.noise) - dim + 1)\n\n def get_delta(self, dim):\n idx = self.sample_index(dim)\n return idx, self.get(idx, dim)\n\n" ]
[ [ "numpy.random.RandomState" ] ]
danielhanchen/hiperlearn
[ "7e2d7735bcb40854462decd5e5c8d70afd90aede" ]
[ "hyperlearn/decomposition/NMF.py" ]
[ "\nfrom ..numba import _min, _max, maximum, minimum, norm, njit, prange, squaresum\nfrom numpy import zeros, float32, float64\nfrom ..utils import _float, reflect, _XTX, _XXT\nfrom ..big_data.randomized import randomizedSVD\nfrom ..solvers import solveCholesky\n\n\ndef intialize_NMF(X, n_components = 2, eps = 1e-6, init = 'nndsvd', HT = True):\n\tU, S, VT = randomizedSVD(X, n_components = n_components)\n\n\tdtype = U.dtype\n\tW, H = zeros(U.shape, dtype), zeros(VT.shape, dtype)\n\tSa = S[0]**0.5\n\n\tW[:, 0] = Sa * abs(U[:, 0])\n\tH[0, :] = Sa * abs(VT[0, :])\n\n\tfor j in range(1, n_components):\n\t\ta, b = U[:,j], VT[j,:]\n\n\t\ta_p, b_p = maximum(a, 0), maximum(b, 0)\n\t\ta_n, b_n = abs(minimum(a, 0)), abs(minimum(b, 0))\n\n\t\ta_p_norm, b_p_norm = norm(a_p), norm(b_p)\n\t\ta_n_norm, b_n_norm = norm(a_n), norm(b_n)\n\n\t\tm_p, m_n = a_p_norm * b_p_norm, a_n_norm * b_n_norm\n\n\t\t# Update\n\t\tif m_p > m_n:\n\t\t\ta_p /= a_p_norm\n\t\t\tb_p /= b_p_norm\n\t\t\tu,v,sigma = a_p, b_p, m_p\n\t\telse:\n\t\t\ta_n /= a_n_norm\n\t\t\tb_n /= b_n_norm\n\t\t\tu,v,sigma = a_n, b_n, m_n\n\n\t\tlbd = (S[j] * sigma)**0.5\n\t\tW[:,j], H[j,:] = lbd*u, lbd*v\n\n\tW, H = maximum(W, 0), maximum(H, 0)\n\tif HT:\n\t\treturn W, H.T.copy(), X\n\treturn W, H, X\n\n\n\ndef update_CD_base(W, HHT, XHT, n, k, runs = 1):\n\tviolation = 0\n\tXHT *= -1\n\t\n\tfor t in prange(k):\n\t\t# Hessian\n\t\tH_part = HHT[t]\n\t\thess = H_part[t]\n\n\t\tif hess == 0:\n\t\t\tfor run in range(runs):\n\t\t\t\tfor i in prange(n):\n\t\t\t\t\tW_i = W[i]\n\t\t\t\t\tW_it = W_i[t]\n\t\t\t\t\t# gradient = GW[t, i] where GW = np.dot(W, HHt) - XHt\n\t\t\t\t\tgrad = XHT[i, t]\n\n\t\t\t\t\tfor r in prange(k): grad += H_part[r] * W_i[r]\n\t\t\t\t\t\n\t\t\t\t\t# projected gradient\n\t\t\t\t\tpg = _min(0., grad) if W_it == 0 else grad\n\t\t\t\t\tviolation += abs(pg)\n\n\t\telse:\n\t\t\tfor run in range(runs):\n\t\t\t\tfor i in prange(n):\n\t\t\t\t\tW_i = W[i]\n\t\t\t\t\tW_it = W_i[t]\n\t\t\t\t\t# gradient = GW[t, i] where GW = np.dot(W, HHt) - XHt\n\t\t\t\t\tgrad = XHT[i, t]\n\n\t\t\t\t\tfor r in prange(k): grad += H_part[r] * W_i[r]\n\t\t\t\t\t\n\t\t\t\t\t# projected gradient\n\t\t\t\t\tpg = _min(0., grad) if W_it == 0 else grad\n\t\t\t\t\tviolation += abs(pg)\n\t\t\t\t\t\n\t\t\t\t\tif grad != 0:\n\t\t\t\t\t\tW[i, t] = _max(W_it - grad / hess, 0.)\n\treturn violation\nupdate_CD = njit(update_CD_base, fastmath = True, nogil = True, cache = True)\nupdate_CD_parallel = njit(update_CD_base, fastmath = True, nogil = True, parallel = True)\n\n\n\ndef nmf_cd(X, n_components = 2, tol = 1e-4, max_iter = 200, init = 'nndsvd', speed = 1, n_jobs = 1):\n\tW, HT, X = intialize_NMF(X, n_components)\n\n\tXT = X.T\n\tn,k = W.shape\n\tp,k = HT.shape\n\n\tupdate_CD_i = update_CD_parallel if n_jobs != 1 else update_CD\n\n\tif speed != 1: \n\t\tmax_iter = _min(int(200/speed*1.5), 5)\n\n\tfor n_iter in range(max_iter):\n\t\t# Update W\n\t\t#HHT = reflect()\n\t\tviolation = update_CD_i(W, HT.T@HT, X@HT, n, k, speed)\n\t\t# Update H\n\t\tviolation += update_CD_i(HT, W.T@W, XT@W, p, k, speed)\n\t\t#loss.append(squareSum(X - [email protected]).sum())\n\t\t\n\t\tif n_iter == 0:\n\t\t\tviolation_init = violation\n\n\t\tif violation_init == 0:\n\t\t\tbreak\n\n\t\tif violation / violation_init <= tol:\n\t\t\tbreak\n\treturn W, HT.T\n\n\n\ndef nmf_als(X, n_components = 2, max_iter = 100, init = 'nndsvd', alpha = None):\n\tW, H, X = intialize_NMF(X, n_components, HT = False)\n\tXT = X.T\n\tn = X.shape[0]\n\tpast_error = 1e100\n\n\tfor i in range(max_iter):\n\t\tH = maximum(solveCholesky(W, X, alpha = alpha), 0)\n\t\tW = maximum(solveCholesky(H.T, XT, alpha = alpha), 0).T\n\t\tif i % 10 == 0:\n\t\t\terror = squaresum(X - W@H)/n\n\t\t\tif error/past_error > 0.9:\n\t\t\t\tbreak\n\t\t\tpast_error = error\n\treturn W, H\n\n\n\n_X = zeros((2,2), float32)\n_XX = nmf_cd(_X, 1, max_iter = 1)\n_X = nmf_cd(_X, 1, max_iter = 1, n_jobs = -1)\n_X = zeros((2,2), float64)\n_XX = nmf_cd(_X, 1, max_iter = 1)\n_X = nmf_cd(_X, 1, max_iter = 1, n_jobs = -1)\n_X = None\n_XX = None\n" ]
[ [ "numpy.zeros" ] ]
ozercevikaslan/MyMLWorkSpace
[ "973d8fa04bed7503c3e3061ac02a4d6022a92e61" ]
[ "HouseSalePricesRegression.py" ]
[ "import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ndataset = pd.read_csv('kc_house_data.csv')\n\ndataset.drop('zipcode', axis=1, inplace=True)\ndataset.drop('lat', axis=1, inplace=True)\ndataset.drop('long', axis=1, inplace=True)\n\nX = dataset.iloc[:, 3:18].values\n\ny = dataset.iloc[:, 2].values\n\nfrom sklearn.model_selection import train_test_split\n\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.20, random_state=0)\n\nfrom sklearn.ensemble import RandomForestRegressor\n\nRFRegressor = RandomForestRegressor(n_estimators=500, random_state=0)\nRFRegressor.fit(X_train, y_train)\n\nhaha = X_test[2, :]\n\nresult = RFRegressor.predict([haha])\n\nX_grid = np.arange(max(X_test), min(X_test), 0.01)\nX_grid = X_grid.reshape((len(X_grid), 15))\nplt.scatter(X_train[:, 0], y_train, color='red')\nplt.plot(X_grid, RFRegressor.predict(X_grid), color='blue')\nplt.title('Decision Tree')\nplt.xlabel('X Variables')\nplt.ylabel('Price')\nplt.show()\n" ]
[ [ "matplotlib.pyplot.xlabel", "matplotlib.pyplot.title", "matplotlib.pyplot.show", "sklearn.ensemble.RandomForestRegressor", "matplotlib.pyplot.ylabel", "sklearn.model_selection.train_test_split", "matplotlib.pyplot.scatter", "pandas.read_csv" ] ]
OlavH96/Master
[ "f98476063e579b7b2a80b81a2c0ca4005f5fce80" ]
[ "src/sign_detection/image_generation/create_records.py" ]
[ "import configparser\nimport os\nfrom pathlib import Path\n\nimport pandas as pd\nimport tensorflow as tf\n\nfrom object_detection.utils import dataset_util\n\nroot_dir = Path.cwd()\n\nconfig = configparser.ConfigParser()\nconfig.read(root_dir / 'config.ini')\n\nsize_x = int(config['Model']['size_generated_image_x'])\nsize_y = int(config['Model']['size_generated_image_y'])\nconfig_name = config['Model']['config_name']\nconfig_path = config['Model']['config_path']\ntraining_steps = config['Model']['training_steps']\nnum_generated_images = config['Model']['num_generated_images']\ntemplate_path = config['Model']['template_path']\n\npath_to_image_generation_data = config['Model']['path_to_image_generation_data']\n\nflags = tf.app.flags\nflags.DEFINE_string('output_path', '', 'Path to output TFRecord')\nFLAGS = flags.FLAGS\n\n\ndef create_tf_example(path, filename, labels):\n # TODO(user): Populate the following variables from your example.\n height = size_y # Image height\n width = size_x # Image width\n bfilename = filename.encode() # Filename of the image. Empty if image is not from file\n with tf.gfile.GFile(os.path.join(path, filename), 'rb') as f:\n encoded_image_data = f.read() # Encoded image bytes\n image_format = b'jpeg' # b'jpeg' or b'png'\n\n xmins = labels.xmin.tolist() # List of normalized left x coordinates in bounding box (1 per box)\n xmaxs = labels.xmax.tolist() # List of normalized right x coordinates in bounding box\n # (1 per box)\n ymins = labels.ymin.tolist() # List of normalized top y coordinates in bounding box (1 per box)\n ymaxs = labels.ymax.tolist() # List of normalized bottom y coordinates in bounding box\n # (1 per box)\n classes_text = list(map(str.encode, labels.classlabel)) # List of string class name of bounding box (1 per box)\n classes = labels.classid.tolist() # List of integer class id of bounding box (1 per box)\n\n tf_example = tf.train.Example(features=tf.train.Features(feature={\n 'image/height': dataset_util.int64_feature(height),\n 'image/width': dataset_util.int64_feature(width),\n 'image/filename': dataset_util.bytes_feature(bfilename),\n 'image/source_id': dataset_util.bytes_feature(bfilename),\n 'image/encoded': dataset_util.bytes_feature(encoded_image_data),\n 'image/format': dataset_util.bytes_feature(image_format),\n 'image/object/bbox/xmin': dataset_util.float_list_feature(xmins),\n 'image/object/bbox/xmax': dataset_util.float_list_feature(xmaxs),\n 'image/object/bbox/ymin': dataset_util.float_list_feature(ymins),\n 'image/object/bbox/ymax': dataset_util.float_list_feature(ymaxs),\n 'image/object/class/text': dataset_util.bytes_list_feature(classes_text),\n 'image/object/class/label': dataset_util.int64_list_feature(classes),\n }))\n return tf_example\n\n\ndef main(_):\n\n LABEL_FILE = f'{path_to_image_generation_data}/generated/train_labels.csv'\n IMAGES_PATH = f'{path_to_image_generation_data}/generated/images'\n writer = tf.python_io.TFRecordWriter(FLAGS.output_path)\n\n labels = pd.read_csv(LABEL_FILE)\n\n for filename in os.listdir(IMAGES_PATH):\n tf_example = create_tf_example(IMAGES_PATH, filename, labels[labels.filename == filename])\n writer.write(tf_example.SerializeToString())\n\n writer.close()\n\n\nif __name__ == '__main__':\n tf.app.run()\n" ]
[ [ "tensorflow.app.run", "pandas.read_csv", "tensorflow.python_io.TFRecordWriter" ] ]
opti-mix/glow
[ "4ba074df5da9822986a23a6679ab592c22660f6d" ]
[ "torch_glow/tests/nodes/addmm_test.py" ]
[ "from __future__ import absolute_import, division, print_function, unicode_literals\n\nimport torch\nfrom tests import utils\n\n\nclass SimpleAddMmModule(torch.nn.Module):\n def __init__(self, alpha=1, beta=1):\n super(SimpleAddMmModule, self).__init__()\n self.alpha = alpha\n self.beta = beta\n\n def forward(self, a, b, c):\n return (a + a).addmm(b, c)\n\n\nclass TestAddMM(utils.TorchGlowTestCase):\n def test_addmm_basic(self):\n \"\"\"Basic test of the PyTorch addmm Node on Glow.\"\"\"\n utils.compare_tracing_methods(\n SimpleAddMmModule(),\n torch.randn(6, 4),\n torch.randn(6, 10),\n torch.randn(10, 4),\n )\n\n def test_addmm_broadcast(self):\n \"\"\"Test of the PyTorch addmm with broadcasting add on Glow.\"\"\"\n utils.compare_tracing_methods(\n SimpleAddMmModule(), torch.randn(4), torch.randn(6, 10), torch.randn(10, 4)\n )\n\n def test_addmm_broadcast_with_alpha_and_beta(self):\n \"\"\"Test of the PyTorch addmm with broadcasting add on Glow.\"\"\"\n utils.compare_tracing_methods(\n SimpleAddMmModule(2.0, 3.0),\n torch.randn(4),\n torch.randn(6, 10),\n torch.randn(10, 4),\n )\n" ]
[ [ "torch.randn" ] ]
yuxuan1995liu/darkflowyolo_detection
[ "a7807e9b85833e3f877d46bb60e8fa7d0596a10b", "f4eb40b5ca3f49dfc929ff3ad2b4bb877e9663e2" ]
[ "venv/lib/python3.6/site-packages/tensorflow/python/ops/gen_stateless_random_ops.py", "venv/lib/python3.6/site-packages/tensorboard/plugins/projector/projector_plugin.py" ]
[ "\"\"\"Python wrappers around TensorFlow ops.\n\nThis file is MACHINE GENERATED! Do not edit.\nOriginal C++ source file: stateless_random_ops.cc\n\"\"\"\n\nimport collections as _collections\nimport six as _six\n\nfrom tensorflow.python import pywrap_tensorflow as _pywrap_tensorflow\nfrom tensorflow.python.eager import context as _context\nfrom tensorflow.python.eager import core as _core\nfrom tensorflow.python.eager import execute as _execute\nfrom tensorflow.python.framework import dtypes as _dtypes\nfrom tensorflow.python.framework import errors as _errors\nfrom tensorflow.python.framework import tensor_shape as _tensor_shape\n\nfrom tensorflow.core.framework import op_def_pb2 as _op_def_pb2\n# Needed to trigger the call to _set_call_cpp_shape_fn.\nfrom tensorflow.python.framework import common_shapes as _common_shapes\nfrom tensorflow.python.framework import op_def_registry as _op_def_registry\nfrom tensorflow.python.framework import ops as _ops\nfrom tensorflow.python.framework import op_def_library as _op_def_library\nfrom tensorflow.python.util.deprecation import deprecated_endpoints\nfrom tensorflow.python.util import dispatch as _dispatch\nfrom tensorflow.python.util.tf_export import tf_export\n\n\ndef stateless_multinomial(logits, num_samples, seed, output_dtype=_dtypes.int64, name=None):\n r\"\"\"Draws samples from a multinomial distribution.\n\n Args:\n logits: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `half`, `uint32`, `uint64`.\n 2-D Tensor with shape `[batch_size, num_classes]`. Each slice `[i, :]`\n represents the unnormalized log probabilities for all classes.\n num_samples: A `Tensor` of type `int32`.\n 0-D. Number of independent samples to draw for each row slice.\n seed: A `Tensor`. Must be one of the following types: `int32`, `int64`.\n 2 seeds (shape [2]).\n output_dtype: An optional `tf.DType` from: `tf.int32, tf.int64`. Defaults to `tf.int64`.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` of type `output_dtype`.\n \"\"\"\n _ctx = _context._context\n if _ctx is not None and _ctx._eager_context.is_eager:\n try:\n _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(\n _ctx._context_handle, _ctx._eager_context.device_name,\n \"StatelessMultinomial\", name, _ctx._post_execution_callbacks, logits,\n num_samples, seed, \"output_dtype\", output_dtype)\n return _result\n except _core._FallbackException:\n try:\n return stateless_multinomial_eager_fallback(\n logits, num_samples, seed, output_dtype=output_dtype, name=name,\n ctx=_ctx)\n except _core._SymbolicException:\n pass # Add nodes to the TensorFlow graph.\n except _core._NotOkStatusException as e:\n if name is not None:\n message = e.message + \" name: \" + name\n else:\n message = e.message\n _six.raise_from(_core._status_to_exception(e.code, message), None)\n # Add nodes to the TensorFlow graph.\n if output_dtype is None:\n output_dtype = _dtypes.int64\n output_dtype = _execute.make_type(output_dtype, \"output_dtype\")\n _, _, _op = _op_def_lib._apply_op_helper(\n \"StatelessMultinomial\", logits=logits, num_samples=num_samples,\n seed=seed, output_dtype=output_dtype,\n name=name)\n _result = _op.outputs[:]\n _inputs_flat = _op.inputs\n _attrs = (\"T\", _op.get_attr(\"T\"), \"Tseed\", _op.get_attr(\"Tseed\"),\n \"output_dtype\", _op.get_attr(\"output_dtype\"))\n _execute.record_gradient(\n \"StatelessMultinomial\", _inputs_flat, _attrs, _result, name)\n _result, = _result\n return _result\n\n\n\ndef stateless_multinomial_eager_fallback(logits, num_samples, seed, output_dtype=_dtypes.int64, name=None, ctx=None):\n r\"\"\"This is the slowpath function for Eager mode.\n This is for function stateless_multinomial\n \"\"\"\n _ctx = ctx if ctx else _context.context()\n if output_dtype is None:\n output_dtype = _dtypes.int64\n output_dtype = _execute.make_type(output_dtype, \"output_dtype\")\n _attr_T, (logits,) = _execute.args_to_matching_eager([logits], _ctx)\n _attr_Tseed, (seed,) = _execute.args_to_matching_eager([seed], _ctx, _dtypes.int64)\n num_samples = _ops.convert_to_tensor(num_samples, _dtypes.int32)\n _inputs_flat = [logits, num_samples, seed]\n _attrs = (\"T\", _attr_T, \"Tseed\", _attr_Tseed, \"output_dtype\", output_dtype)\n _result = _execute.execute(b\"StatelessMultinomial\", 1, inputs=_inputs_flat,\n attrs=_attrs, ctx=_ctx, name=name)\n _execute.record_gradient(\n \"StatelessMultinomial\", _inputs_flat, _attrs, _result, name)\n _result, = _result\n return _result\n\n\ndef stateless_random_normal(shape, seed, dtype=_dtypes.float32, name=None):\n r\"\"\"Outputs deterministic pseudorandom values from a normal distribution.\n\n The generated values will have mean 0 and standard deviation 1.\n\n The outputs are a deterministic function of `shape` and `seed`.\n\n Args:\n shape: A `Tensor`. Must be one of the following types: `int32`, `int64`.\n The shape of the output tensor.\n seed: A `Tensor`. Must be one of the following types: `int32`, `int64`.\n 2 seeds (shape [2]).\n dtype: An optional `tf.DType` from: `tf.half, tf.bfloat16, tf.float32, tf.float64`. Defaults to `tf.float32`.\n The type of the output.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` of type `dtype`.\n \"\"\"\n _ctx = _context._context\n if _ctx is not None and _ctx._eager_context.is_eager:\n try:\n _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(\n _ctx._context_handle, _ctx._eager_context.device_name,\n \"StatelessRandomNormal\", name, _ctx._post_execution_callbacks, shape,\n seed, \"dtype\", dtype)\n return _result\n except _core._FallbackException:\n try:\n return stateless_random_normal_eager_fallback(\n shape, seed, dtype=dtype, name=name, ctx=_ctx)\n except _core._SymbolicException:\n pass # Add nodes to the TensorFlow graph.\n except _core._NotOkStatusException as e:\n if name is not None:\n message = e.message + \" name: \" + name\n else:\n message = e.message\n _six.raise_from(_core._status_to_exception(e.code, message), None)\n # Add nodes to the TensorFlow graph.\n if dtype is None:\n dtype = _dtypes.float32\n dtype = _execute.make_type(dtype, \"dtype\")\n _, _, _op = _op_def_lib._apply_op_helper(\n \"StatelessRandomNormal\", shape=shape, seed=seed, dtype=dtype,\n name=name)\n _result = _op.outputs[:]\n _inputs_flat = _op.inputs\n _attrs = (\"dtype\", _op.get_attr(\"dtype\"), \"T\", _op.get_attr(\"T\"), \"Tseed\",\n _op.get_attr(\"Tseed\"))\n _execute.record_gradient(\n \"StatelessRandomNormal\", _inputs_flat, _attrs, _result, name)\n _result, = _result\n return _result\n\n\n\ndef stateless_random_normal_eager_fallback(shape, seed, dtype=_dtypes.float32, name=None, ctx=None):\n r\"\"\"This is the slowpath function for Eager mode.\n This is for function stateless_random_normal\n \"\"\"\n _ctx = ctx if ctx else _context.context()\n if dtype is None:\n dtype = _dtypes.float32\n dtype = _execute.make_type(dtype, \"dtype\")\n _attr_T, (shape,) = _execute.args_to_matching_eager([shape], _ctx, _dtypes.int32)\n _attr_Tseed, (seed,) = _execute.args_to_matching_eager([seed], _ctx, _dtypes.int64)\n _inputs_flat = [shape, seed]\n _attrs = (\"dtype\", dtype, \"T\", _attr_T, \"Tseed\", _attr_Tseed)\n _result = _execute.execute(b\"StatelessRandomNormal\", 1, inputs=_inputs_flat,\n attrs=_attrs, ctx=_ctx, name=name)\n _execute.record_gradient(\n \"StatelessRandomNormal\", _inputs_flat, _attrs, _result, name)\n _result, = _result\n return _result\n\n\ndef stateless_random_uniform(shape, seed, dtype=_dtypes.float32, name=None):\n r\"\"\"Outputs deterministic pseudorandom random values from a uniform distribution.\n\n The generated values follow a uniform distribution in the range `[0, 1)`. The\n lower bound 0 is included in the range, while the upper bound 1 is excluded.\n\n The outputs are a deterministic function of `shape` and `seed`.\n\n Args:\n shape: A `Tensor`. Must be one of the following types: `int32`, `int64`.\n The shape of the output tensor.\n seed: A `Tensor`. Must be one of the following types: `int32`, `int64`.\n 2 seeds (shape [2]).\n dtype: An optional `tf.DType` from: `tf.half, tf.bfloat16, tf.float32, tf.float64`. Defaults to `tf.float32`.\n The type of the output.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` of type `dtype`.\n \"\"\"\n _ctx = _context._context\n if _ctx is not None and _ctx._eager_context.is_eager:\n try:\n _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(\n _ctx._context_handle, _ctx._eager_context.device_name,\n \"StatelessRandomUniform\", name, _ctx._post_execution_callbacks, shape,\n seed, \"dtype\", dtype)\n return _result\n except _core._FallbackException:\n try:\n return stateless_random_uniform_eager_fallback(\n shape, seed, dtype=dtype, name=name, ctx=_ctx)\n except _core._SymbolicException:\n pass # Add nodes to the TensorFlow graph.\n except _core._NotOkStatusException as e:\n if name is not None:\n message = e.message + \" name: \" + name\n else:\n message = e.message\n _six.raise_from(_core._status_to_exception(e.code, message), None)\n # Add nodes to the TensorFlow graph.\n if dtype is None:\n dtype = _dtypes.float32\n dtype = _execute.make_type(dtype, \"dtype\")\n _, _, _op = _op_def_lib._apply_op_helper(\n \"StatelessRandomUniform\", shape=shape, seed=seed, dtype=dtype,\n name=name)\n _result = _op.outputs[:]\n _inputs_flat = _op.inputs\n _attrs = (\"dtype\", _op.get_attr(\"dtype\"), \"T\", _op.get_attr(\"T\"), \"Tseed\",\n _op.get_attr(\"Tseed\"))\n _execute.record_gradient(\n \"StatelessRandomUniform\", _inputs_flat, _attrs, _result, name)\n _result, = _result\n return _result\n\n\n\ndef stateless_random_uniform_eager_fallback(shape, seed, dtype=_dtypes.float32, name=None, ctx=None):\n r\"\"\"This is the slowpath function for Eager mode.\n This is for function stateless_random_uniform\n \"\"\"\n _ctx = ctx if ctx else _context.context()\n if dtype is None:\n dtype = _dtypes.float32\n dtype = _execute.make_type(dtype, \"dtype\")\n _attr_T, (shape,) = _execute.args_to_matching_eager([shape], _ctx, _dtypes.int32)\n _attr_Tseed, (seed,) = _execute.args_to_matching_eager([seed], _ctx, _dtypes.int64)\n _inputs_flat = [shape, seed]\n _attrs = (\"dtype\", dtype, \"T\", _attr_T, \"Tseed\", _attr_Tseed)\n _result = _execute.execute(b\"StatelessRandomUniform\", 1,\n inputs=_inputs_flat, attrs=_attrs, ctx=_ctx,\n name=name)\n _execute.record_gradient(\n \"StatelessRandomUniform\", _inputs_flat, _attrs, _result, name)\n _result, = _result\n return _result\n\n\ndef stateless_random_uniform_int(shape, seed, minval, maxval, name=None):\n r\"\"\"Outputs deterministic pseudorandom random integers from a uniform distribution.\n\n The generated values follow a uniform distribution in the range `[minval, maxval)`.\n\n The outputs are a deterministic function of `shape`, `seed`, `minval`, and `maxval`.\n\n Args:\n shape: A `Tensor`. Must be one of the following types: `int32`, `int64`.\n The shape of the output tensor.\n seed: A `Tensor`. Must be one of the following types: `int32`, `int64`.\n 2 seeds (shape [2]).\n minval: A `Tensor`. Must be one of the following types: `int32`, `int64`.\n Minimum value (inclusive, scalar).\n maxval: A `Tensor`. Must have the same type as `minval`.\n Maximum value (exclusive, scalar).\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor`. Has the same type as `minval`.\n \"\"\"\n _ctx = _context._context\n if _ctx is not None and _ctx._eager_context.is_eager:\n try:\n _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(\n _ctx._context_handle, _ctx._eager_context.device_name,\n \"StatelessRandomUniformInt\", name, _ctx._post_execution_callbacks,\n shape, seed, minval, maxval)\n return _result\n except _core._FallbackException:\n try:\n return stateless_random_uniform_int_eager_fallback(\n shape, seed, minval, maxval, name=name, ctx=_ctx)\n except _core._SymbolicException:\n pass # Add nodes to the TensorFlow graph.\n except _core._NotOkStatusException as e:\n if name is not None:\n message = e.message + \" name: \" + name\n else:\n message = e.message\n _six.raise_from(_core._status_to_exception(e.code, message), None)\n # Add nodes to the TensorFlow graph.\n _, _, _op = _op_def_lib._apply_op_helper(\n \"StatelessRandomUniformInt\", shape=shape, seed=seed, minval=minval,\n maxval=maxval, name=name)\n _result = _op.outputs[:]\n _inputs_flat = _op.inputs\n _attrs = (\"dtype\", _op.get_attr(\"dtype\"), \"T\", _op.get_attr(\"T\"), \"Tseed\",\n _op.get_attr(\"Tseed\"))\n _execute.record_gradient(\n \"StatelessRandomUniformInt\", _inputs_flat, _attrs, _result, name)\n _result, = _result\n return _result\n\n\n\ndef stateless_random_uniform_int_eager_fallback(shape, seed, minval, maxval, name=None, ctx=None):\n r\"\"\"This is the slowpath function for Eager mode.\n This is for function stateless_random_uniform_int\n \"\"\"\n _ctx = ctx if ctx else _context.context()\n _attr_dtype, _inputs_dtype = _execute.args_to_matching_eager([minval, maxval], _ctx)\n (minval, maxval) = _inputs_dtype\n _attr_T, (shape,) = _execute.args_to_matching_eager([shape], _ctx)\n _attr_Tseed, (seed,) = _execute.args_to_matching_eager([seed], _ctx, _dtypes.int64)\n _inputs_flat = [shape, seed, minval, maxval]\n _attrs = (\"dtype\", _attr_dtype, \"T\", _attr_T, \"Tseed\", _attr_Tseed)\n _result = _execute.execute(b\"StatelessRandomUniformInt\", 1,\n inputs=_inputs_flat, attrs=_attrs, ctx=_ctx,\n name=name)\n _execute.record_gradient(\n \"StatelessRandomUniformInt\", _inputs_flat, _attrs, _result, name)\n _result, = _result\n return _result\n\n\ndef stateless_truncated_normal(shape, seed, dtype=_dtypes.float32, name=None):\n r\"\"\"Outputs deterministic pseudorandom values from a truncated normal distribution.\n\n The generated values follow a normal distribution with mean 0 and standard\n deviation 1, except that values whose magnitude is more than 2 standard\n deviations from the mean are dropped and re-picked.\n\n The outputs are a deterministic function of `shape` and `seed`.\n\n Args:\n shape: A `Tensor`. Must be one of the following types: `int32`, `int64`.\n The shape of the output tensor.\n seed: A `Tensor`. Must be one of the following types: `int32`, `int64`.\n 2 seeds (shape [2]).\n dtype: An optional `tf.DType` from: `tf.half, tf.bfloat16, tf.float32, tf.float64`. Defaults to `tf.float32`.\n The type of the output.\n name: A name for the operation (optional).\n\n Returns:\n A `Tensor` of type `dtype`.\n \"\"\"\n _ctx = _context._context\n if _ctx is not None and _ctx._eager_context.is_eager:\n try:\n _result = _pywrap_tensorflow.TFE_Py_FastPathExecute(\n _ctx._context_handle, _ctx._eager_context.device_name,\n \"StatelessTruncatedNormal\", name, _ctx._post_execution_callbacks,\n shape, seed, \"dtype\", dtype)\n return _result\n except _core._FallbackException:\n try:\n return stateless_truncated_normal_eager_fallback(\n shape, seed, dtype=dtype, name=name, ctx=_ctx)\n except _core._SymbolicException:\n pass # Add nodes to the TensorFlow graph.\n except _core._NotOkStatusException as e:\n if name is not None:\n message = e.message + \" name: \" + name\n else:\n message = e.message\n _six.raise_from(_core._status_to_exception(e.code, message), None)\n # Add nodes to the TensorFlow graph.\n if dtype is None:\n dtype = _dtypes.float32\n dtype = _execute.make_type(dtype, \"dtype\")\n _, _, _op = _op_def_lib._apply_op_helper(\n \"StatelessTruncatedNormal\", shape=shape, seed=seed, dtype=dtype,\n name=name)\n _result = _op.outputs[:]\n _inputs_flat = _op.inputs\n _attrs = (\"dtype\", _op.get_attr(\"dtype\"), \"T\", _op.get_attr(\"T\"), \"Tseed\",\n _op.get_attr(\"Tseed\"))\n _execute.record_gradient(\n \"StatelessTruncatedNormal\", _inputs_flat, _attrs, _result, name)\n _result, = _result\n return _result\n\n\n\ndef stateless_truncated_normal_eager_fallback(shape, seed, dtype=_dtypes.float32, name=None, ctx=None):\n r\"\"\"This is the slowpath function for Eager mode.\n This is for function stateless_truncated_normal\n \"\"\"\n _ctx = ctx if ctx else _context.context()\n if dtype is None:\n dtype = _dtypes.float32\n dtype = _execute.make_type(dtype, \"dtype\")\n _attr_T, (shape,) = _execute.args_to_matching_eager([shape], _ctx, _dtypes.int32)\n _attr_Tseed, (seed,) = _execute.args_to_matching_eager([seed], _ctx, _dtypes.int64)\n _inputs_flat = [shape, seed]\n _attrs = (\"dtype\", dtype, \"T\", _attr_T, \"Tseed\", _attr_Tseed)\n _result = _execute.execute(b\"StatelessTruncatedNormal\", 1,\n inputs=_inputs_flat, attrs=_attrs, ctx=_ctx,\n name=name)\n _execute.record_gradient(\n \"StatelessTruncatedNormal\", _inputs_flat, _attrs, _result, name)\n _result, = _result\n return _result\n\ndef _InitOpDefLibrary(op_list_proto_bytes):\n op_list = _op_def_pb2.OpList()\n op_list.ParseFromString(op_list_proto_bytes)\n _op_def_registry.register_op_list(op_list)\n op_def_lib = _op_def_library.OpDefLibrary()\n op_def_lib.add_op_list(op_list)\n return op_def_lib\n# op {\n# name: \"StatelessMultinomial\"\n# input_arg {\n# name: \"logits\"\n# type_attr: \"T\"\n# }\n# input_arg {\n# name: \"num_samples\"\n# type: DT_INT32\n# }\n# input_arg {\n# name: \"seed\"\n# type_attr: \"Tseed\"\n# }\n# output_arg {\n# name: \"output\"\n# type_attr: \"output_dtype\"\n# }\n# attr {\n# name: \"T\"\n# type: \"type\"\n# allowed_values {\n# list {\n# type: DT_FLOAT\n# type: DT_DOUBLE\n# type: DT_INT32\n# type: DT_UINT8\n# type: DT_INT16\n# type: DT_INT8\n# type: DT_INT64\n# type: DT_BFLOAT16\n# type: DT_UINT16\n# type: DT_HALF\n# type: DT_UINT32\n# type: DT_UINT64\n# }\n# }\n# }\n# attr {\n# name: \"Tseed\"\n# type: \"type\"\n# default_value {\n# type: DT_INT64\n# }\n# allowed_values {\n# list {\n# type: DT_INT32\n# type: DT_INT64\n# }\n# }\n# }\n# attr {\n# name: \"output_dtype\"\n# type: \"type\"\n# default_value {\n# type: DT_INT64\n# }\n# allowed_values {\n# list {\n# type: DT_INT32\n# type: DT_INT64\n# }\n# }\n# }\n# }\n# op {\n# name: \"StatelessRandomNormal\"\n# input_arg {\n# name: \"shape\"\n# type_attr: \"T\"\n# }\n# input_arg {\n# name: \"seed\"\n# type_attr: \"Tseed\"\n# }\n# output_arg {\n# name: \"output\"\n# type_attr: \"dtype\"\n# }\n# attr {\n# name: \"dtype\"\n# type: \"type\"\n# default_value {\n# type: DT_FLOAT\n# }\n# allowed_values {\n# list {\n# type: DT_HALF\n# type: DT_BFLOAT16\n# type: DT_FLOAT\n# type: DT_DOUBLE\n# }\n# }\n# }\n# attr {\n# name: \"T\"\n# type: \"type\"\n# default_value {\n# type: DT_INT32\n# }\n# allowed_values {\n# list {\n# type: DT_INT32\n# type: DT_INT64\n# }\n# }\n# }\n# attr {\n# name: \"Tseed\"\n# type: \"type\"\n# default_value {\n# type: DT_INT64\n# }\n# allowed_values {\n# list {\n# type: DT_INT32\n# type: DT_INT64\n# }\n# }\n# }\n# }\n# op {\n# name: \"StatelessRandomUniform\"\n# input_arg {\n# name: \"shape\"\n# type_attr: \"T\"\n# }\n# input_arg {\n# name: \"seed\"\n# type_attr: \"Tseed\"\n# }\n# output_arg {\n# name: \"output\"\n# type_attr: \"dtype\"\n# }\n# attr {\n# name: \"dtype\"\n# type: \"type\"\n# default_value {\n# type: DT_FLOAT\n# }\n# allowed_values {\n# list {\n# type: DT_HALF\n# type: DT_BFLOAT16\n# type: DT_FLOAT\n# type: DT_DOUBLE\n# }\n# }\n# }\n# attr {\n# name: \"T\"\n# type: \"type\"\n# default_value {\n# type: DT_INT32\n# }\n# allowed_values {\n# list {\n# type: DT_INT32\n# type: DT_INT64\n# }\n# }\n# }\n# attr {\n# name: \"Tseed\"\n# type: \"type\"\n# default_value {\n# type: DT_INT64\n# }\n# allowed_values {\n# list {\n# type: DT_INT32\n# type: DT_INT64\n# }\n# }\n# }\n# }\n# op {\n# name: \"StatelessRandomUniformInt\"\n# input_arg {\n# name: \"shape\"\n# type_attr: \"T\"\n# }\n# input_arg {\n# name: \"seed\"\n# type_attr: \"Tseed\"\n# }\n# input_arg {\n# name: \"minval\"\n# type_attr: \"dtype\"\n# }\n# input_arg {\n# name: \"maxval\"\n# type_attr: \"dtype\"\n# }\n# output_arg {\n# name: \"output\"\n# type_attr: \"dtype\"\n# }\n# attr {\n# name: \"dtype\"\n# type: \"type\"\n# allowed_values {\n# list {\n# type: DT_INT32\n# type: DT_INT64\n# }\n# }\n# }\n# attr {\n# name: \"T\"\n# type: \"type\"\n# allowed_values {\n# list {\n# type: DT_INT32\n# type: DT_INT64\n# }\n# }\n# }\n# attr {\n# name: \"Tseed\"\n# type: \"type\"\n# default_value {\n# type: DT_INT64\n# }\n# allowed_values {\n# list {\n# type: DT_INT32\n# type: DT_INT64\n# }\n# }\n# }\n# }\n# op {\n# name: \"StatelessTruncatedNormal\"\n# input_arg {\n# name: \"shape\"\n# type_attr: \"T\"\n# }\n# input_arg {\n# name: \"seed\"\n# type_attr: \"Tseed\"\n# }\n# output_arg {\n# name: \"output\"\n# type_attr: \"dtype\"\n# }\n# attr {\n# name: \"dtype\"\n# type: \"type\"\n# default_value {\n# type: DT_FLOAT\n# }\n# allowed_values {\n# list {\n# type: DT_HALF\n# type: DT_BFLOAT16\n# type: DT_FLOAT\n# type: DT_DOUBLE\n# }\n# }\n# }\n# attr {\n# name: \"T\"\n# type: \"type\"\n# default_value {\n# type: DT_INT32\n# }\n# allowed_values {\n# list {\n# type: DT_INT32\n# type: DT_INT64\n# }\n# }\n# }\n# attr {\n# name: \"Tseed\"\n# type: \"type\"\n# default_value {\n# type: DT_INT64\n# }\n# allowed_values {\n# list {\n# type: DT_INT32\n# type: DT_INT64\n# }\n# }\n# }\n# }\n_op_def_lib = _InitOpDefLibrary(b\"\\n\\265\\001\\n\\024StatelessMultinomial\\022\\013\\n\\006logits\\\"\\001T\\022\\017\\n\\013num_samples\\030\\003\\022\\r\\n\\004seed\\\"\\005Tseed\\032\\026\\n\\006output\\\"\\014output_dtype\\\"\\033\\n\\001T\\022\\004type:\\020\\n\\0162\\014\\001\\002\\003\\004\\005\\006\\t\\016\\021\\023\\026\\027\\\"\\031\\n\\005Tseed\\022\\004type\\032\\0020\\t:\\006\\n\\0042\\002\\003\\t\\\" \\n\\014output_dtype\\022\\004type\\032\\0020\\t:\\006\\n\\0042\\002\\003\\t\\n\\222\\001\\n\\025StatelessRandomNormal\\022\\n\\n\\005shape\\\"\\001T\\022\\r\\n\\004seed\\\"\\005Tseed\\032\\017\\n\\006output\\\"\\005dtype\\\"\\033\\n\\005dtype\\022\\004type\\032\\0020\\001:\\010\\n\\0062\\004\\023\\016\\001\\002\\\"\\025\\n\\001T\\022\\004type\\032\\0020\\003:\\006\\n\\0042\\002\\003\\t\\\"\\031\\n\\005Tseed\\022\\004type\\032\\0020\\t:\\006\\n\\0042\\002\\003\\t\\n\\223\\001\\n\\026StatelessRandomUniform\\022\\n\\n\\005shape\\\"\\001T\\022\\r\\n\\004seed\\\"\\005Tseed\\032\\017\\n\\006output\\\"\\005dtype\\\"\\033\\n\\005dtype\\022\\004type\\032\\0020\\001:\\010\\n\\0062\\004\\023\\016\\001\\002\\\"\\025\\n\\001T\\022\\004type\\032\\0020\\003:\\006\\n\\0042\\002\\003\\t\\\"\\031\\n\\005Tseed\\022\\004type\\032\\0020\\t:\\006\\n\\0042\\002\\003\\t\\n\\256\\001\\n\\031StatelessRandomUniformInt\\022\\n\\n\\005shape\\\"\\001T\\022\\r\\n\\004seed\\\"\\005Tseed\\022\\017\\n\\006minval\\\"\\005dtype\\022\\017\\n\\006maxval\\\"\\005dtype\\032\\017\\n\\006output\\\"\\005dtype\\\"\\025\\n\\005dtype\\022\\004type:\\006\\n\\0042\\002\\003\\t\\\"\\021\\n\\001T\\022\\004type:\\006\\n\\0042\\002\\003\\t\\\"\\031\\n\\005Tseed\\022\\004type\\032\\0020\\t:\\006\\n\\0042\\002\\003\\t\\n\\225\\001\\n\\030StatelessTruncatedNormal\\022\\n\\n\\005shape\\\"\\001T\\022\\r\\n\\004seed\\\"\\005Tseed\\032\\017\\n\\006output\\\"\\005dtype\\\"\\033\\n\\005dtype\\022\\004type\\032\\0020\\001:\\010\\n\\0062\\004\\023\\016\\001\\002\\\"\\025\\n\\001T\\022\\004type\\032\\0020\\003:\\006\\n\\0042\\002\\003\\t\\\"\\031\\n\\005Tseed\\022\\004type\\032\\0020\\t:\\006\\n\\0042\\002\\003\\t\")\n", "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"The Embedding Projector plugin.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\nimport imghdr\nimport math\nimport os\nimport threading\n\nimport numpy as np\nfrom werkzeug import wrappers\n\nfrom google.protobuf import json_format\nfrom google.protobuf import text_format\n\nfrom tensorboard.backend.http_util import Respond\nfrom tensorboard.compat import tf\nfrom tensorboard.plugins import base_plugin\nfrom tensorboard.plugins.projector.projector_config_pb2 import ProjectorConfig\nfrom tensorboard.util import tb_logging\n\nlogger = tb_logging.get_logger()\n\n# The prefix of routes provided by this plugin.\n_PLUGIN_PREFIX_ROUTE = 'projector'\n\n# FYI - the PROJECTOR_FILENAME is hardcoded in the visualize_embeddings\n# method in tf.contrib.tensorboard.plugins.projector module.\n# TODO(@dandelionmane): Fix duplication when we find a permanent home for the\n# projector module.\nPROJECTOR_FILENAME = 'projector_config.pbtxt'\n_PLUGIN_NAME = 'org_tensorflow_tensorboard_projector'\n_PLUGINS_DIR = 'plugins'\n\n# Number of tensors in the LRU cache.\n_TENSOR_CACHE_CAPACITY = 1\n\n# HTTP routes.\nCONFIG_ROUTE = '/info'\nTENSOR_ROUTE = '/tensor'\nMETADATA_ROUTE = '/metadata'\nRUNS_ROUTE = '/runs'\nBOOKMARKS_ROUTE = '/bookmarks'\nSPRITE_IMAGE_ROUTE = '/sprite_image'\n\n_IMGHDR_TO_MIMETYPE = {\n 'bmp': 'image/bmp',\n 'gif': 'image/gif',\n 'jpeg': 'image/jpeg',\n 'png': 'image/png'\n}\n_DEFAULT_IMAGE_MIMETYPE = 'application/octet-stream'\n\n\nclass LRUCache(object):\n \"\"\"LRU cache. Used for storing the last used tensor.\"\"\"\n\n def __init__(self, size):\n if size < 1:\n raise ValueError('The cache size must be >=1')\n self._size = size\n self._dict = collections.OrderedDict()\n\n def get(self, key):\n try:\n value = self._dict.pop(key)\n self._dict[key] = value\n return value\n except KeyError:\n return None\n\n def set(self, key, value):\n if value is None:\n raise ValueError('value must be != None')\n try:\n self._dict.pop(key)\n except KeyError:\n if len(self._dict) >= self._size:\n self._dict.popitem(last=False)\n self._dict[key] = value\n\n\nclass EmbeddingMetadata(object):\n \"\"\"Metadata container for an embedding.\n\n The metadata holds different columns with values used for visualization\n (color by, label by) in the \"Embeddings\" tab in TensorBoard.\n \"\"\"\n\n def __init__(self, num_points):\n \"\"\"Constructs a metadata for an embedding of the specified size.\n\n Args:\n num_points: Number of points in the embedding.\n \"\"\"\n self.num_points = num_points\n self.column_names = []\n self.name_to_values = {}\n\n def add_column(self, column_name, column_values):\n \"\"\"Adds a named column of metadata values.\n\n Args:\n column_name: Name of the column.\n column_values: 1D array/list/iterable holding the column values. Must be\n of length `num_points`. The i-th value corresponds to the i-th point.\n\n Raises:\n ValueError: If `column_values` is not 1D array, or of length `num_points`,\n or the `name` is already used.\n \"\"\"\n # Sanity checks.\n if isinstance(column_values, list) and isinstance(column_values[0], list):\n raise ValueError('\"column_values\" must be a flat list, but we detected '\n 'that its first entry is a list')\n\n if isinstance(column_values, np.ndarray) and column_values.ndim != 1:\n raise ValueError('\"column_values\" should be of rank 1, '\n 'but is of rank %d' % column_values.ndim)\n if len(column_values) != self.num_points:\n raise ValueError('\"column_values\" should be of length %d, but is of '\n 'length %d' % (self.num_points, len(column_values)))\n if column_name in self.name_to_values:\n raise ValueError('The column name \"%s\" is already used' % column_name)\n\n self.column_names.append(column_name)\n self.name_to_values[column_name] = column_values\n\n\ndef _read_tensor_tsv_file(fpath):\n with tf.io.gfile.GFile(fpath, 'r') as f:\n tensor = []\n for line in f:\n line = line.rstrip('\\n')\n if line:\n tensor.append(list(map(float, line.split('\\t'))))\n return np.array(tensor, dtype='float32')\n\n\ndef _assets_dir_to_logdir(assets_dir):\n sub_path = os.path.sep + _PLUGINS_DIR + os.path.sep\n if sub_path in assets_dir:\n two_parents_up = os.pardir + os.path.sep + os.pardir\n return os.path.abspath(os.path.join(assets_dir, two_parents_up))\n return assets_dir\n\n\ndef _latest_checkpoints_changed(configs, run_path_pairs):\n \"\"\"Returns true if the latest checkpoint has changed in any of the runs.\"\"\"\n for run_name, assets_dir in run_path_pairs:\n if run_name not in configs:\n config = ProjectorConfig()\n config_fpath = os.path.join(assets_dir, PROJECTOR_FILENAME)\n if tf.io.gfile.exists(config_fpath):\n with tf.io.gfile.GFile(config_fpath, 'r') as f:\n file_content = f.read()\n text_format.Merge(file_content, config)\n else:\n config = configs[run_name]\n\n # See if you can find a checkpoint file in the logdir.\n logdir = _assets_dir_to_logdir(assets_dir)\n ckpt_path = _find_latest_checkpoint(logdir)\n if not ckpt_path:\n continue\n if config.model_checkpoint_path != ckpt_path:\n return True\n return False\n\n\ndef _parse_positive_int_param(request, param_name):\n \"\"\"Parses and asserts a positive (>0) integer query parameter.\n\n Args:\n request: The Werkzeug Request object\n param_name: Name of the parameter.\n\n Returns:\n Param, or None, or -1 if parameter is not a positive integer.\n \"\"\"\n param = request.args.get(param_name)\n if not param:\n return None\n try:\n param = int(param)\n if param <= 0:\n raise ValueError()\n return param\n except ValueError:\n return -1\n\n\ndef _rel_to_abs_asset_path(fpath, config_fpath):\n fpath = os.path.expanduser(fpath)\n if not os.path.isabs(fpath):\n return os.path.join(os.path.dirname(config_fpath), fpath)\n return fpath\n\n\ndef _using_tf():\n \"\"\"Return true if we're not using the fake TF API stub implementation.\"\"\"\n return tf.__version__ != 'stub'\n\n\nclass ProjectorPlugin(base_plugin.TBPlugin):\n \"\"\"Embedding projector.\"\"\"\n\n plugin_name = _PLUGIN_PREFIX_ROUTE\n\n def __init__(self, context):\n \"\"\"Instantiates ProjectorPlugin via TensorBoard core.\n\n Args:\n context: A base_plugin.TBContext instance.\n \"\"\"\n self.multiplexer = context.multiplexer\n self.logdir = context.logdir\n self._handlers = None\n self.readers = {}\n self.run_paths = None\n self._configs = {}\n self.old_num_run_paths = None\n self.config_fpaths = None\n self.tensor_cache = LRUCache(_TENSOR_CACHE_CAPACITY)\n\n # Whether the plugin is active (has meaningful data to process and serve).\n # Once the plugin is deemed active, we no longer re-compute the value\n # because doing so is potentially expensive.\n self._is_active = False\n\n # The running thread that is currently determining whether the plugin is\n # active. If such a thread exists, do not start a duplicate thread.\n self._thread_for_determining_is_active = None\n\n if self.multiplexer:\n self.run_paths = self.multiplexer.RunPaths()\n\n def get_plugin_apps(self):\n self._handlers = {\n RUNS_ROUTE: self._serve_runs,\n CONFIG_ROUTE: self._serve_config,\n TENSOR_ROUTE: self._serve_tensor,\n METADATA_ROUTE: self._serve_metadata,\n BOOKMARKS_ROUTE: self._serve_bookmarks,\n SPRITE_IMAGE_ROUTE: self._serve_sprite_image\n }\n return self._handlers\n\n def is_active(self):\n \"\"\"Determines whether this plugin is active.\n\n This plugin is only active if any run has an embedding.\n\n Returns:\n Whether any run has embedding data to show in the projector.\n \"\"\"\n if not self.multiplexer:\n return False\n\n if self._is_active:\n # We have already determined that the projector plugin should be active.\n # Do not re-compute that. We have no reason to later set this plugin to be\n # inactive.\n return True\n\n if self._thread_for_determining_is_active:\n # We are currently determining whether the plugin is active. Do not start\n # a separate thread.\n return self._is_active\n\n # The plugin is currently not active. The frontend might check again later.\n # For now, spin off a separate thread to determine whether the plugin is\n # active.\n new_thread = threading.Thread(\n target=self._determine_is_active,\n name='ProjectorPluginIsActiveThread')\n self._thread_for_determining_is_active = new_thread\n new_thread.start()\n return False\n\n def _determine_is_active(self):\n \"\"\"Determines whether the plugin is active.\n\n This method is run in a separate thread so that the plugin can offer an\n immediate response to whether it is active and determine whether it should\n be active in a separate thread.\n \"\"\"\n if self.configs:\n self._is_active = True\n self._thread_for_determining_is_active = None\n\n @property\n def configs(self):\n \"\"\"Returns a map of run paths to `ProjectorConfig` protos.\"\"\"\n run_path_pairs = list(self.run_paths.items())\n self._append_plugin_asset_directories(run_path_pairs)\n # If there are no summary event files, the projector should still work,\n # treating the `logdir` as the model checkpoint directory.\n if not run_path_pairs:\n run_path_pairs.append(('.', self.logdir))\n if (self._run_paths_changed() or\n _latest_checkpoints_changed(self._configs, run_path_pairs)):\n self.readers = {}\n self._configs, self.config_fpaths = self._read_latest_config_files(\n run_path_pairs)\n self._augment_configs_with_checkpoint_info()\n return self._configs\n\n def _run_paths_changed(self):\n num_run_paths = len(list(self.run_paths.keys()))\n if num_run_paths != self.old_num_run_paths:\n self.old_num_run_paths = num_run_paths\n return True\n return False\n\n def _augment_configs_with_checkpoint_info(self):\n for run, config in self._configs.items():\n for embedding in config.embeddings:\n # Normalize the name of the embeddings.\n if embedding.tensor_name.endswith(':0'):\n embedding.tensor_name = embedding.tensor_name[:-2]\n # Find the size of embeddings associated with a tensors file.\n if embedding.tensor_path and not embedding.tensor_shape:\n fpath = _rel_to_abs_asset_path(embedding.tensor_path,\n self.config_fpaths[run])\n tensor = self.tensor_cache.get((run, embedding.tensor_name))\n if tensor is None:\n tensor = _read_tensor_tsv_file(fpath)\n self.tensor_cache.set((run, embedding.tensor_name), tensor)\n embedding.tensor_shape.extend([len(tensor), len(tensor[0])])\n\n reader = self._get_reader_for_run(run)\n if not reader:\n continue\n # Augment the configuration with the tensors in the checkpoint file.\n special_embedding = None\n if config.embeddings and not config.embeddings[0].tensor_name:\n special_embedding = config.embeddings[0]\n config.embeddings.remove(special_embedding)\n var_map = reader.get_variable_to_shape_map()\n for tensor_name, tensor_shape in var_map.items():\n if len(tensor_shape) != 2:\n continue\n embedding = self._get_embedding(tensor_name, config)\n if not embedding:\n embedding = config.embeddings.add()\n embedding.tensor_name = tensor_name\n if special_embedding:\n embedding.metadata_path = special_embedding.metadata_path\n embedding.bookmarks_path = special_embedding.bookmarks_path\n if not embedding.tensor_shape:\n embedding.tensor_shape.extend(tensor_shape)\n\n # Remove configs that do not have any valid (2D) tensors.\n runs_to_remove = []\n for run, config in self._configs.items():\n if not config.embeddings:\n runs_to_remove.append(run)\n for run in runs_to_remove:\n del self._configs[run]\n del self.config_fpaths[run]\n\n def _read_latest_config_files(self, run_path_pairs):\n \"\"\"Reads and returns the projector config files in every run directory.\"\"\"\n configs = {}\n config_fpaths = {}\n for run_name, assets_dir in run_path_pairs:\n config = ProjectorConfig()\n config_fpath = os.path.join(assets_dir, PROJECTOR_FILENAME)\n if tf.io.gfile.exists(config_fpath):\n with tf.io.gfile.GFile(config_fpath, 'r') as f:\n file_content = f.read()\n text_format.Merge(file_content, config)\n has_tensor_files = False\n for embedding in config.embeddings:\n if embedding.tensor_path:\n if not embedding.tensor_name:\n embedding.tensor_name = os.path.basename(embedding.tensor_path)\n has_tensor_files = True\n break\n\n if not config.model_checkpoint_path:\n # See if you can find a checkpoint file in the logdir.\n logdir = _assets_dir_to_logdir(assets_dir)\n ckpt_path = _find_latest_checkpoint(logdir)\n if not ckpt_path and not has_tensor_files:\n continue\n if ckpt_path:\n config.model_checkpoint_path = ckpt_path\n\n # Sanity check for the checkpoint file.\n if (config.model_checkpoint_path and _using_tf() and\n not tf.compat.v1.train.checkpoint_exists(config.model_checkpoint_path)):\n logger.warn('Checkpoint file \"%s\" not found',\n config.model_checkpoint_path)\n continue\n configs[run_name] = config\n config_fpaths[run_name] = config_fpath\n return configs, config_fpaths\n\n def _get_reader_for_run(self, run):\n if run in self.readers:\n return self.readers[run]\n\n config = self._configs[run]\n reader = None\n if config.model_checkpoint_path and _using_tf():\n try:\n reader = tf.compat.v1.pywrap_tensorflow.NewCheckpointReader(\n config.model_checkpoint_path)\n except Exception: # pylint: disable=broad-except\n logger.warn('Failed reading \"%s\"', config.model_checkpoint_path)\n self.readers[run] = reader\n return reader\n\n def _get_metadata_file_for_tensor(self, tensor_name, config):\n embedding_info = self._get_embedding(tensor_name, config)\n if embedding_info:\n return embedding_info.metadata_path\n return None\n\n def _get_bookmarks_file_for_tensor(self, tensor_name, config):\n embedding_info = self._get_embedding(tensor_name, config)\n if embedding_info:\n return embedding_info.bookmarks_path\n return None\n\n def _canonical_tensor_name(self, tensor_name):\n if ':' not in tensor_name:\n return tensor_name + ':0'\n else:\n return tensor_name\n\n def _get_embedding(self, tensor_name, config):\n if not config.embeddings:\n return None\n for info in config.embeddings:\n if (self._canonical_tensor_name(info.tensor_name) ==\n self._canonical_tensor_name(tensor_name)):\n return info\n return None\n\n def _append_plugin_asset_directories(self, run_path_pairs):\n for run, assets in self.multiplexer.PluginAssets(_PLUGIN_NAME).items():\n if PROJECTOR_FILENAME not in assets:\n continue\n assets_dir = os.path.join(self.run_paths[run], _PLUGINS_DIR, _PLUGIN_NAME)\n assets_path_pair = (run, os.path.abspath(assets_dir))\n run_path_pairs.append(assets_path_pair)\n\n @wrappers.Request.application\n def _serve_runs(self, request):\n \"\"\"Returns a list of runs that have embeddings.\"\"\"\n return Respond(request, list(self.configs.keys()), 'application/json')\n\n @wrappers.Request.application\n def _serve_config(self, request):\n run = request.args.get('run')\n if run is None:\n return Respond(request, 'query parameter \"run\" is required', 'text/plain',\n 400)\n if run not in self.configs:\n return Respond(request, 'Unknown run: \"%s\"' % run, 'text/plain', 400)\n\n config = self.configs[run]\n return Respond(request,\n json_format.MessageToJson(config), 'application/json')\n\n @wrappers.Request.application\n def _serve_metadata(self, request):\n run = request.args.get('run')\n if run is None:\n return Respond(request, 'query parameter \"run\" is required', 'text/plain',\n 400)\n\n name = request.args.get('name')\n if name is None:\n return Respond(request, 'query parameter \"name\" is required',\n 'text/plain', 400)\n\n num_rows = _parse_positive_int_param(request, 'num_rows')\n if num_rows == -1:\n return Respond(request, 'query parameter num_rows must be integer > 0',\n 'text/plain', 400)\n\n if run not in self.configs:\n return Respond(request, 'Unknown run: \"%s\"' % run, 'text/plain', 400)\n\n config = self.configs[run]\n fpath = self._get_metadata_file_for_tensor(name, config)\n if not fpath:\n return Respond(\n request,\n 'No metadata file found for tensor \"%s\" in the config file \"%s\"' %\n (name, self.config_fpaths[run]), 'text/plain', 400)\n fpath = _rel_to_abs_asset_path(fpath, self.config_fpaths[run])\n if not tf.io.gfile.exists(fpath) or tf.io.gfile.isdir(fpath):\n return Respond(request, '\"%s\" not found, or is not a file' % fpath,\n 'text/plain', 400)\n\n num_header_rows = 0\n with tf.io.gfile.GFile(fpath, 'r') as f:\n lines = []\n # Stream reading the file with early break in case the file doesn't fit in\n # memory.\n for line in f:\n lines.append(line)\n if len(lines) == 1 and '\\t' in lines[0]:\n num_header_rows = 1\n if num_rows and len(lines) >= num_rows + num_header_rows:\n break\n return Respond(request, ''.join(lines), 'text/plain')\n\n @wrappers.Request.application\n def _serve_tensor(self, request):\n run = request.args.get('run')\n if run is None:\n return Respond(request, 'query parameter \"run\" is required', 'text/plain',\n 400)\n\n name = request.args.get('name')\n if name is None:\n return Respond(request, 'query parameter \"name\" is required',\n 'text/plain', 400)\n\n num_rows = _parse_positive_int_param(request, 'num_rows')\n if num_rows == -1:\n return Respond(request, 'query parameter num_rows must be integer > 0',\n 'text/plain', 400)\n\n if run not in self.configs:\n return Respond(request, 'Unknown run: \"%s\"' % run, 'text/plain', 400)\n\n config = self.configs[run]\n\n tensor = self.tensor_cache.get((run, name))\n if tensor is None:\n # See if there is a tensor file in the config.\n embedding = self._get_embedding(name, config)\n\n if embedding and embedding.tensor_path:\n fpath = _rel_to_abs_asset_path(embedding.tensor_path,\n self.config_fpaths[run])\n if not tf.io.gfile.exists(fpath):\n return Respond(request,\n 'Tensor file \"%s\" does not exist' % fpath,\n 'text/plain', 400)\n tensor = _read_tensor_tsv_file(fpath)\n else:\n reader = self._get_reader_for_run(run)\n if not reader or not reader.has_tensor(name):\n return Respond(request,\n 'Tensor \"%s\" not found in checkpoint dir \"%s\"' %\n (name, config.model_checkpoint_path), 'text/plain',\n 400)\n try:\n tensor = reader.get_tensor(name)\n except tf.errors.InvalidArgumentError as e:\n return Respond(request, str(e), 'text/plain', 400)\n\n self.tensor_cache.set((run, name), tensor)\n\n if num_rows:\n tensor = tensor[:num_rows]\n if tensor.dtype != 'float32':\n tensor = tensor.astype(dtype='float32', copy=False)\n data_bytes = tensor.tobytes()\n return Respond(request, data_bytes, 'application/octet-stream')\n\n @wrappers.Request.application\n def _serve_bookmarks(self, request):\n run = request.args.get('run')\n if not run:\n return Respond(request, 'query parameter \"run\" is required', 'text/plain',\n 400)\n\n name = request.args.get('name')\n if name is None:\n return Respond(request, 'query parameter \"name\" is required',\n 'text/plain', 400)\n\n if run not in self.configs:\n return Respond(request, 'Unknown run: \"%s\"' % run, 'text/plain', 400)\n\n config = self.configs[run]\n fpath = self._get_bookmarks_file_for_tensor(name, config)\n if not fpath:\n return Respond(\n request,\n 'No bookmarks file found for tensor \"%s\" in the config file \"%s\"' %\n (name, self.config_fpaths[run]), 'text/plain', 400)\n fpath = _rel_to_abs_asset_path(fpath, self.config_fpaths[run])\n if not tf.io.gfile.exists(fpath) or tf.io.gfile.isdir(fpath):\n return Respond(request, '\"%s\" not found, or is not a file' % fpath,\n 'text/plain', 400)\n\n bookmarks_json = None\n with tf.io.gfile.GFile(fpath, 'rb') as f:\n bookmarks_json = f.read()\n return Respond(request, bookmarks_json, 'application/json')\n\n @wrappers.Request.application\n def _serve_sprite_image(self, request):\n run = request.args.get('run')\n if not run:\n return Respond(request, 'query parameter \"run\" is required', 'text/plain',\n 400)\n\n name = request.args.get('name')\n if name is None:\n return Respond(request, 'query parameter \"name\" is required',\n 'text/plain', 400)\n\n if run not in self.configs:\n return Respond(request, 'Unknown run: \"%s\"' % run, 'text/plain', 400)\n\n config = self.configs[run]\n embedding_info = self._get_embedding(name, config)\n\n if not embedding_info or not embedding_info.sprite.image_path:\n return Respond(\n request,\n 'No sprite image file found for tensor \"%s\" in the config file \"%s\"' %\n (name, self.config_fpaths[run]), 'text/plain', 400)\n\n fpath = os.path.expanduser(embedding_info.sprite.image_path)\n fpath = _rel_to_abs_asset_path(fpath, self.config_fpaths[run])\n if not tf.io.gfile.exists(fpath) or tf.io.gfile.isdir(fpath):\n return Respond(request, '\"%s\" does not exist or is directory' % fpath,\n 'text/plain', 400)\n f = tf.io.gfile.GFile(fpath, 'rb')\n encoded_image_string = f.read()\n f.close()\n image_type = imghdr.what(None, encoded_image_string)\n mime_type = _IMGHDR_TO_MIMETYPE.get(image_type, _DEFAULT_IMAGE_MIMETYPE)\n return Respond(request, encoded_image_string, mime_type)\n\n\ndef _find_latest_checkpoint(dir_path):\n if not _using_tf():\n return None\n try:\n ckpt_path = tf.train.latest_checkpoint(dir_path)\n if not ckpt_path:\n # Check the parent directory.\n ckpt_path = tf.train.latest_checkpoint(os.path.join(dir_path, os.pardir))\n return ckpt_path\n except tf.errors.NotFoundError:\n return None\n" ]
[ [ "tensorflow.python.eager.execute.make_type", "tensorflow.python.eager.context.context", "tensorflow.python.eager.execute.args_to_matching_eager", "tensorflow.python.eager.execute.execute", "tensorflow.python.framework.ops.convert_to_tensor", "tensorflow.python.eager.execute.record_gradient", "tensorflow.core.framework.op_def_pb2.OpList", "tensorflow.python.pywrap_tensorflow.TFE_Py_FastPathExecute", "tensorflow.python.framework.op_def_library.OpDefLibrary", "tensorflow.python.eager.core._status_to_exception", "tensorflow.python.framework.op_def_registry.register_op_list" ], [ "numpy.array" ] ]
Refinitiv-API-Samples/Examples.RDPLibrary.Python.July2020Webinar
[ "53fad3d2632d503cd5526a0118c9b4c4dcac09af" ]
[ "DashStreaming.py" ]
[ "import dash\nimport dash_core_components as dcc\nimport dash_html_components as html\nimport dash_table as dte\nimport pandas as pd\nimport plotly\nimport plotly.graph_objs as go\nfrom dash.dependencies import Output, Input\n\nimport refinitiv.dataplatform as rdp\nimport configparser as cp\n\nglobal esg_df, tick_list, streaming_price, streaming_news, news_history\n# Streaming related\nric_list = ['EUR=', 'AUD=','JPY=', 'BP.L', 'BT.L']\nstream_fields = ['DSPLY_NAME','OPEN_PRC', 'HST_CLOSE', 'BID', 'BIDSIZE' ,'ASK', 'ASKSIZE', 'ACVOL_1', 'TRDPRC_1', 'TRDTIM_1', 'MID_PRICE']\ntick_field = 'MID_PRICE'\nprev_ric = ric_list[0]\n\n# News related\nnews_ric = 'NFCP_UBMS'\nnews_fields = ['PNAC', 'HEADLINE1', 'NEWSCODE01']\n\n# Called first time the app is run and also when the dropdown list is changed\ndef get_data(ric, initial_run=False):\n global esg_df, tick_list, streaming_price, streaming_news, news_history\n \n if initial_run:\n # ESG DATA\n esg_df = rdp.get_esg_standard_scores(universe='VOD.L')\n # Streaming News\n streaming_news = rdp.StreamingPrices(universe=[news_ric], fields=news_fields)\n streaming_news.open()\n news_history = streaming_news.get_snapshot()\n \n # Price History\n tick_hist_df = rdp.get_historical_price_events(ric, fields=[tick_field], count=200)\n tick_list = pd.to_numeric(tick_hist_df[tick_field]).to_list()\n tick_list.reverse()\n # Streaming Price\n streaming_price = rdp.StreamingPrices(universe=[ric], fields=stream_fields)\n streaming_price.open()\n\n# Open session to Refinitiv Data Platform (Cloud) Server\nconfig = cp.ConfigParser()\nconfig.read(\"config.cfg\")\nrdp.open_platform_session(\n config['session']['app_key'],\n rdp.GrantPassword(\n username=config['session']['user'],\n password=config['session']['password']\n )\n)\nget_data(ric_list[0], True)\n\n# DASH FRAMEWORK CODE\napp = dash.Dash(\"RDP Dashboard\")\napp.layout = html.Div([\n\n html.H2('Streaming Dashboard Example', style={'color': 'blue'}),\n html.Div(id='nop1'),\n dcc.Dropdown(id='ric-dropdown',\n options=[{'label': i, 'value': i} for i in ric_list],\n value=ric_list[0]),\n html.Div(id='nop2'),\n html.H4('Streaming Graph'),\n dcc.Graph(id='live-graph', animate=True),\n dcc.Interval(id='stream-update', interval=1 * 1000),\n\n html.H4('Streaming Fields'),\n dte.DataTable(id='tickData',\n columns=[{'name': i, 'id': i} for i in stream_fields]),\n\n html.H4('Streaming News'),\n dte.DataTable(id='newsData',\n columns=[{'name': i, 'id': i} for i in news_fields],\n style_cell={'textAlign': 'left'},\n page_size=5),\n \n html.H4('Non-Streaming data e.g. ESG Standard Scores for Vodafone'),\n dte.DataTable(id='esgData',\n columns=[{\"name\": a, \"id\": a} for a in esg_df],\n data=esg_df.to_dict('records'),\n style_table={'overflowX': 'auto'}\n ),\n\n])\n\[email protected]([Output('live-graph', 'figure'),\n Output('tickData', 'data'),\n Output('newsData', 'data') ],\n [Input('ric-dropdown', 'value'),\n Input('stream-update', 'n_intervals')])\ndef update_ric(selected_ric, input_data):\n global prev_ric, news_history, tick_list\n # could have used callback-context?\n if selected_ric == prev_ric: \n tick_list.pop(0)\n tick_list.append(streaming_price.get_snapshot()[tick_field].iloc[0])\n else:\n print(\"RIC change from {} to {}\".format(prev_ric, selected_ric))\n prev_ric = selected_ric\n get_data(selected_ric)\n \n streaming_fields = streaming_price.get_snapshot()\n\n latest_news = streaming_news.get_snapshot()\n if not latest_news['PNAC'].iloc[0] == news_history['PNAC'].iloc[0]:\n news_history = latest_news.append(news_history)\n\n data = plotly.graph_objs.Scatter(\n y=tick_list,\n name='Scatter',\n mode='lines+markers'\n )\n return {'data': [data], 'layout': go.Layout(yaxis={'title': 'MID',\n 'range': [min(tick_list) * 0.9994, max(tick_list) * 1.0006]})}, \\\n streaming_fields.to_dict('records'), news_history.to_dict('records')\n\n\nif __name__ == '__main__':\n app.run_server(debug=True)\n" ]
[ [ "pandas.to_numeric" ] ]
liaorongfan/center_net
[ "4d2f8219332b5c22094b8e90dd8e2f51c9d9605b" ]
[ "FPS_test.py" ]
[ "import colorsys\nimport os\nimport pickle\n\nimport cv2\nimport numpy as np\nimport torch\nfrom PIL import Image, ImageDraw, ImageFont\nfrom torch import nn\nfrom torch.autograd import Variable\nfrom tqdm import tqdm\n\nfrom centernet import CenterNet\nfrom nets.centernet import CenterNet_HourglassNet, CenterNet_Resnet50\nfrom utils.utils import (centernet_correct_boxes, decode_bbox, letterbox_image,\n nms)\nimport time\n'''\n该FPS测试不包括前处理(归一化与resize部分)、绘图。\n包括的内容为:网络推理、得分门限筛选、非极大抑制。\n使用'img/street.jpg'图片进行测试,该测试方法参考库https://github.com/zylo117/Yet-Another-EfficientDet-Pytorch\n\nvideo.py里面测试的FPS会低于该FPS,因为摄像头的读取频率有限,而且处理过程包含了前处理和绘图部分。\n'''\n\ndef preprocess_image(image):\n mean = [0.40789655, 0.44719303, 0.47026116]\n std = [0.2886383, 0.27408165, 0.27809834]\n return ((np.float32(image) / 255.) - mean) / std\n \nclass FPS_CenterNet(CenterNet):\n def get_FPS(self, image, test_interval):\n image_shape = np.array(np.shape(image)[0:2])\n #---------------------------------------------------------#\n # 给图像增加灰条,实现不失真的resize\n #---------------------------------------------------------#\n crop_img = letterbox_image(image, [self.image_size[0],self.image_size[1]])\n #----------------------------------------------------------------------------------#\n # 将RGB转化成BGR,这是因为原始的centernet_hourglass权值是使用BGR通道的图片训练的\n #----------------------------------------------------------------------------------#\n photo = np.array(crop_img,dtype = np.float32)[:,:,::-1]\n #-----------------------------------------------------------#\n # 图片预处理,归一化。获得的photo的shape为[1, 512, 512, 3]\n #-----------------------------------------------------------#\n photo = np.reshape(np.transpose(preprocess_image(photo), (2, 0, 1)), [1, self.image_size[2], self.image_size[0], self.image_size[1]])\n \n with torch.no_grad():\n images = Variable(torch.from_numpy(np.asarray(photo)).type(torch.FloatTensor))\n if self.cuda:\n images = images.cuda()\n outputs = self.centernet(images)\n\n if self.backbone=='hourglass':\n outputs = [outputs[-1][\"hm\"].sigmoid(), outputs[-1][\"wh\"], outputs[-1][\"reg\"]]\n outputs = decode_bbox(outputs[0],outputs[1],outputs[2],self.image_size,self.confidence,self.cuda)\n \n try:\n if self.nms:\n outputs = np.array(nms(outputs,self.nms_threhold))\n\n output = outputs[0]\n if len(output)>0:\n batch_boxes, det_conf, det_label = output[:,:4], output[:,4], output[:,5]\n\n det_xmin, det_ymin, det_xmax, det_ymax = batch_boxes[:, 0], batch_boxes[:, 1], batch_boxes[:, 2], batch_boxes[:, 3]\n top_indices = [i for i, conf in enumerate(det_conf) if conf >= self.confidence]\n top_conf = det_conf[top_indices]\n top_label_indices = det_label[top_indices].tolist()\n top_xmin, top_ymin, top_xmax, top_ymax = np.expand_dims(det_xmin[top_indices],-1),np.expand_dims(det_ymin[top_indices],-1),np.expand_dims(det_xmax[top_indices],-1),np.expand_dims(det_ymax[top_indices],-1)\n \n boxes = centernet_correct_boxes(top_ymin,top_xmin,top_ymax,top_xmax,np.array([self.image_size[0],self.image_size[1]]),image_shape)\n except:\n pass\n\n t1 = time.time()\n for _ in range(test_interval):\n with torch.no_grad():\n outputs = self.centernet(images)\n\n if self.backbone=='hourglass':\n outputs = [outputs[-1][\"hm\"].sigmoid(), outputs[-1][\"wh\"], outputs[-1][\"reg\"]]\n outputs = decode_bbox(outputs[0],outputs[1],outputs[2],self.image_size,self.confidence,self.cuda)\n \n try:\n if self.nms:\n outputs = np.array(nms(outputs,self.nms_threhold))\n\n output = outputs[0]\n if len(output)>0:\n batch_boxes, det_conf, det_label = output[:,:4], output[:,4], output[:,5]\n\n det_xmin, det_ymin, det_xmax, det_ymax = batch_boxes[:, 0], batch_boxes[:, 1], batch_boxes[:, 2], batch_boxes[:, 3]\n top_indices = [i for i, conf in enumerate(det_conf) if conf >= self.confidence]\n top_conf = det_conf[top_indices]\n top_label_indices = det_label[top_indices].tolist()\n top_xmin, top_ymin, top_xmax, top_ymax = np.expand_dims(det_xmin[top_indices],-1),np.expand_dims(det_ymin[top_indices],-1),np.expand_dims(det_xmax[top_indices],-1),np.expand_dims(det_ymax[top_indices],-1)\n \n boxes = centernet_correct_boxes(top_ymin,top_xmin,top_ymax,top_xmax,np.array([self.image_size[0],self.image_size[1]]),image_shape)\n except:\n pass\n t2 = time.time()\n tact_time = (t2 - t1) / test_interval\n return tact_time\n \ncenternet = FPS_CenterNet()\ntest_interval = 100\nimg = Image.open('img/street.jpg')\ntact_time = centernet.get_FPS(img, test_interval)\nprint(str(tact_time) + ' seconds, ' + str(1/tact_time) + 'FPS, @batch_size 1')\n" ]
[ [ "numpy.array", "numpy.asarray", "torch.no_grad", "numpy.shape", "numpy.float32", "numpy.expand_dims" ] ]
proux01/rupicola
[ "60180c1975f678443b02742e812dc183cf01631d" ]
[ "src/Rupicola/Examples/plot.py" ]
[ "#!/usr/bin/python3\n\nimport pandas, seaborn, matplotlib.pyplot\nimport latest_benchmark_results\n\nBENCHMARK_ALIASES = [\n ('crc32', 'crc32'),\n ('utf8_decode', 'utf8'),\n ('murmur3', 'm3s'),\n ('upstr', 'upstr'),\n ('ip_checksum', 'ip'),\n ('revcomp', 'fasta'),\n ('fnv1a64', 'fnv1a'),\n]\n\nCOMPILER_ALIASES = [\n (\"gcc-9.4.0\", \"GCC 9.4\"),\n (\"gcc-10.3.0\", \"GCC 10.3\"),\n (\"gcc-11.1.0\", \"GCC 11.1\"),\n # (\"clang-10.0.0\", \"Clang 10.0\"),\n (\"clang-11.0.0\", \"Clang 11.0\"),\n (\"clang-12.0.0\", \"Clang 12.0\"),\n (\"clang-13.0.1\", \"Clang 13.0\"),\n]\n\nLANGUAGE_ALIASES = [\n (\"rupicola\", \"Rupicola\"),\n (\"c\", \"C\"),\n]\n\ndef main():\n df = pandas.DataFrame(latest_benchmark_results.data).explode(3)\n df[3] = df[3].apply(lambda x: x/1024/1024)\n df.columns=['benchmark', 'language', 'compiler', 'cycles/byte']\n\n df['benchmark'].replace(*zip(*BENCHMARK_ALIASES), inplace=True)\n df['compiler'].replace(*zip(*COMPILER_ALIASES), inplace=True)\n df['language'].replace(*zip(*LANGUAGE_ALIASES), inplace=True)\n\n df['bench'] = df['language'] + \"/\" + df['benchmark']\n df['comp'] = df['language'] + \" \" + df['compiler']\n\n COMPILERS = [k for _, k in COMPILER_ALIASES]\n LANGUAGES = [k for _, k in LANGUAGE_ALIASES]\n KEYS = [(c, l) for l in LANGUAGES for c in COMPILERS]\n COMPS = [l + \" \" + c for (c, l) in KEYS]\n\n colors = [[\"#e9b96e\", \"#c17d11\", \"#8f5902\", \"#fcaf3e\", \"#f57900\", \"#ce5c00\"],\n [\"#ad7fa8\", \"#75507b\", \"#5c3566\", \"#729fcf\", \"#3465a4\", \"#204a87\"]]\n PALETTE = [colors[LANGUAGES.index(l)][COMPILERS.index(c)]\n for (c, l) in KEYS]\n\n palette = seaborn.color_palette(PALETTE)\n seaborn.set_theme(font=\"Inconsolata\", font_scale=1.5, style='ticks', palette=palette)\n # seaborn.set_context(\"paper\", rc={\"font.size\":10, \"axes.titlesize\":10, \"axes.labelsize\":8})\n\n # Create an array with the colors you want to use\n\n width, height = 4.7, 8\n plot = seaborn.catplot(\n data=df,\n kind='bar',\n sharex=True,\n x='cycles/byte', y='benchmark',\n order=[k for _,k in BENCHMARK_ALIASES],\n hue='comp', hue_order=COMPS,\n legend = False,\n linewidth=0,\n height=height, aspect=width/height,\n )\n plot.set(xlim=(0, None))\n plot.set_axis_labels(\"Cycles per byte on 1MiB input (lower is better)\", \"\")\n plot.add_legend(title=\"\", label_order=COMPS, labelspacing=0.2, loc='center right')\n plot.set_titles(\"\") #\"{row_name}\")\n\n plot.figure.tight_layout()\n plot.figure.savefig(\"plot.pdf\")\n # matplotlib.pyplot.tight_layout()\n # matplotlib.pyplot.show()\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "pandas.DataFrame" ] ]
DEVESHTARASIA/pyro
[ "7fce5508fe4f15a1a65a267e8d6df3aeead1a3ec", "7fce5508fe4f15a1a65a267e8d6df3aeead1a3ec" ]
[ "tests/distributions/test_categorical.py", "pyro/contrib/gp/kernels/rbf.py" ]
[ "from __future__ import absolute_import, division, print_function\n\nfrom unittest import TestCase\n\nimport numpy as np\nimport pytest\nimport scipy.stats as sp\nimport torch\nfrom torch.autograd import Variable\n\nimport pyro.distributions as dist\nfrom tests.common import assert_equal\n\n\nclass TestCategorical(TestCase):\n \"\"\"\n Tests methods specific to the Categorical distribution\n \"\"\"\n\n def setUp(self):\n n = 1\n self.ps = Variable(torch.Tensor([0.1, 0.6, 0.3]))\n self.batch_ps = Variable(torch.Tensor([[0.1, 0.6, 0.3], [0.2, 0.4, 0.4]]))\n self.n = Variable(torch.Tensor([n]))\n self.test_data = Variable(torch.Tensor([2]))\n self.analytic_mean = n * self.ps\n one = Variable(torch.ones(3))\n self.analytic_var = n * torch.mul(self.ps, one.sub(self.ps))\n\n # Discrete Distribution\n self.d_ps = Variable(torch.Tensor([[0.2, 0.3, 0.5], [0.1, 0.1, 0.8]]))\n self.d_test_data = Variable(torch.Tensor([[0], [5]]))\n\n self.n_samples = 50000\n\n self.support_non_vec = torch.Tensor([[0], [1], [2]])\n self.support = torch.Tensor([[[0], [0]], [[1], [1]], [[2], [2]]])\n\n def test_log_pdf(self):\n log_px_torch = dist.categorical.batch_log_pdf(self.test_data, self.ps).data[0]\n log_px_np = float(sp.multinomial.logpmf(np.array([0, 0, 1]), 1, self.ps.data.cpu().numpy()))\n assert_equal(log_px_torch, log_px_np, prec=1e-4)\n\n def test_mean_and_var(self):\n torch_samples = [dist.categorical(self.ps).data.cpu().numpy()\n for _ in range(self.n_samples)]\n _, counts = np.unique(torch_samples, return_counts=True)\n computed_mean = float(counts[0]) / self.n_samples\n assert_equal(computed_mean, self.analytic_mean.data.cpu().numpy()[0], prec=0.05)\n\n def test_support_non_vectorized(self):\n s = dist.categorical.enumerate_support(self.d_ps[0].squeeze(0))\n assert_equal(s.data, self.support_non_vec)\n\n def test_support(self):\n s = dist.categorical.enumerate_support(self.d_ps)\n assert_equal(s.data, self.support)\n\n\ndef wrap_nested(x, dim):\n if dim == 0:\n return x\n return wrap_nested([x], dim-1)\n\n\[email protected](params=[1, 2, 3], ids=lambda x: \"dim=\" + str(x))\ndef dim(request):\n return request.param\n\n\[email protected](params=[[0.3, 0.5, 0.2]], ids=None)\ndef ps(request):\n return request.param\n\n\ndef modify_params_using_dims(ps, dim):\n return Variable(torch.Tensor(wrap_nested(ps, dim-1)))\n\n\ndef test_support_dims(dim, ps):\n ps = modify_params_using_dims(ps, dim)\n support = dist.categorical.enumerate_support(ps)\n assert_equal(support.size(), torch.Size((ps.size(-1),) + ps.size()[:-1] + (1,)))\n\n\ndef test_sample_dims(dim, ps):\n ps = modify_params_using_dims(ps, dim)\n sample = dist.categorical.sample(ps)\n assert_equal(sample.size(), ps.size()[:-1] + (1,))\n\n\ndef test_batch_log_dims(dim, ps):\n batch_pdf_shape = (3,) + (1,) * dim\n expected_log_pdf = np.array(wrap_nested(list(np.log(ps)), dim-1)).reshape(*batch_pdf_shape)\n ps = modify_params_using_dims(ps, dim)\n support = dist.categorical.enumerate_support(ps)\n batch_log_pdf = dist.categorical.batch_log_pdf(support, ps)\n assert_equal(batch_log_pdf.data.cpu().numpy(), expected_log_pdf)\n", "from __future__ import absolute_import, division, print_function\n\nimport torch\nfrom torch.nn import Parameter\n\nfrom .kernel import Kernel\n\n\nclass RBF(Kernel):\n \"\"\"\n Implementation of Radial Basis Function kernel.\n\n By default, parameters will be `torch.nn.Parameter`s containing `torch.FloatTensor`s.\n To cast them to the correct data type or GPU device, we can call methods such as\n `.double()`, `.cuda(device=None)`,... See\n `torch.nn.Module <http://pytorch.org/docs/master/nn.html#torch.nn.Module>`_ for more information.\n\n :param int input_dim: Dimension of inputs for this kernel.\n :param torch.Tensor variance: Variance parameter of this kernel.\n :param torch.Tensor lengthscale: Length scale parameter of this kernel.\n \"\"\"\n\n def __init__(self, input_dim, variance=None, lengthscale=None, active_dims=None, name=\"RBF\"):\n super(RBF, self).__init__(input_dim, active_dims, name)\n if variance is None:\n variance = torch.ones(1)\n self.variance = Parameter(variance)\n if lengthscale is None:\n lengthscale = torch.ones(input_dim)\n self.lengthscale = Parameter(lengthscale)\n\n def forward(self, X, Z=None):\n if Z is None:\n Z = X\n X = self._slice_X(X)\n Z = self._slice_X(Z)\n if X.size(1) != Z.size(1):\n raise ValueError(\"Inputs must have the same number of features.\")\n\n scaled_X = X / self.lengthscale\n scaled_Z = Z / self.lengthscale\n X2 = (scaled_X ** 2).sum(1, keepdim=True)\n Z2 = (scaled_Z ** 2).sum(1, keepdim=True)\n XZ = scaled_X.matmul(scaled_Z.t())\n d2 = X2 - 2 * XZ + Z2.t()\n return self.variance * torch.exp(-0.5 * d2)\n" ]
[ [ "numpy.array", "numpy.log", "torch.ones", "torch.Tensor", "numpy.unique" ], [ "torch.ones", "torch.exp", "torch.nn.Parameter" ] ]
Learning-and-Intelligent-Systems/LISdf
[ "55faac02bfa462d5ae665b703305fc265feb8e0c" ]
[ "tests/test_planner_output/test_command.py" ]
[ "from dataclasses import dataclass\nfrom typing import Dict, List\n\nimport numpy as np\nimport pytest\n\nfrom lisdf.planner_output.command import (\n ActuateGripper,\n Command,\n GripperPosition,\n JointName,\n JointSpacePath,\n)\n\n\n@dataclass(frozen=True)\nclass _ConcreteCommand(Command, type=\"_ConcreteCommand\"):\n other_attr: str = \"willshen\"\n\n def validate(self):\n pass\n\n @classmethod\n def _from_json_dict(cls, json_dict: Dict) -> \"_ConcreteCommand\":\n raise NotImplementedError\n\n\ndef test_command_to_dict():\n assert _ConcreteCommand().to_dict() == {\n \"type\": \"_ConcreteCommand\",\n \"other_attr\": \"willshen\",\n }\n\n\[email protected](\n \"waypoints, duration\",\n [\n pytest.param([], 1.0, id=\"empty waypoints\"),\n pytest.param({\"joint_1\": 0.0}, 1.0, id=\"waypoint is a float\"),\n pytest.param({\"joint_1\": []}, 2.5, id=\"waypoint is empty list\"),\n pytest.param(\n {\"joint_1\": [0.0]},\n 2.5,\n id=\"only one waypoint\",\n ),\n pytest.param(\n {\"joint_1\": [0.0, \"lis is cool\", 1.0]},\n 3.0,\n id=\"waypoint is not a list of numbers\",\n ),\n pytest.param(\n {\"joint_1\": [0.0, 1.0, 1.0], \"joint_2\": [0.5, 0.6]},\n 42.0,\n id=\"waypoints are not the same length\",\n ),\n pytest.param(\n {\"joint_1\": [0.0, 0.5, 1.0]},\n 0.0,\n id=\"zero duration\",\n ),\n pytest.param(\n {\"joint_1\": [0.0, 0.5, 1.0]},\n -999,\n id=\"negative duration\",\n ),\n ],\n)\ndef test_joint_space_path_raises_value_error(waypoints, duration):\n with pytest.raises(ValueError):\n JointSpacePath(waypoints, duration)\n\n\[email protected](\n \"waypoints, duration, expected_joint_names, expected_dimensionality, \"\n \"expected_num_waypoints\",\n [\n ({\"joint_1\": [0.0, 0.5, 1.0]}, 5.0, [\"joint_1\"], 1, 3),\n (\n {\"joint_1\": list(range(10)), \"joint_2\": list(reversed(range(10)))},\n 3.0,\n [\"joint_1\", \"joint_2\"],\n 2,\n 10,\n ),\n ],\n)\ndef test_joint_space_path(\n waypoints,\n duration,\n expected_joint_names,\n expected_dimensionality,\n expected_num_waypoints,\n):\n path = JointSpacePath(waypoints, duration, label=\"my_label\")\n assert path.waypoints == waypoints\n assert path.duration == duration\n assert path.type == \"JointSpacePath\"\n assert path.label == \"my_label\"\n\n # Other properties\n assert path.joint_names == expected_joint_names\n assert path.dimensionality == expected_dimensionality\n assert path.num_waypoints == expected_num_waypoints\n\n\[email protected](\n \"waypoints, duration, label, raises_error\",\n [\n (\n {\n \"joint_1\": [0.0, 0.5, 1.0],\n \"joint_2\": [0.0, 0.5, 1.0],\n },\n 5.0,\n \"my_label\",\n False,\n ),\n (\n {\n \"joint_1\": [0.0, 0.5, 1.0],\n },\n 2.0,\n \"wrong_type\",\n True,\n ),\n ],\n)\ndef test_joint_space_path_from_json_dict(waypoints, duration, label, raises_error):\n \"\"\"Test that we can create a JointSpacePath from a JSON dict\"\"\"\n\n def gen_json_dict():\n # We need different dict objects as the JSON dicts are mutable\n return {\n \"type\": JointSpacePath.type if not raises_error else \"wrong_type\",\n \"waypoints\": waypoints,\n \"duration\": duration,\n \"label\": label,\n }\n\n if raises_error:\n with pytest.raises(ValueError):\n JointSpacePath.from_json_dict(gen_json_dict())\n else:\n assert (\n Command.from_json_dict(gen_json_dict())\n == JointSpacePath.from_json_dict(gen_json_dict())\n == JointSpacePath(waypoints, duration, label)\n )\n\n\[email protected]\ndef complex_path() -> JointSpacePath:\n return JointSpacePath(\n waypoints={\n \"joint_1\": [0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0],\n \"joint_2\": [0.0, 0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.45, 0.5],\n \"joint_3\": [-0.2, -0.1, 0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8],\n \"joint_4\": [\n -1.0,\n -0.85,\n -0.7,\n -0.55,\n -0.4,\n -0.25,\n -0.1,\n 0.05,\n 0.2,\n 0.35,\n 0.5,\n ],\n \"joint_5\": [-0.5, -0.4, -0.3, -0.2, -0.1, 0.0, 0.1, 0.2, 0.3, 0.4, 0.5],\n \"joint_6\": [1.0, 0.95, 0.9, 0.85, 0.8, 0.75, 0.7, 0.65, 0.6, 0.55, 0.5],\n \"joint_7\": [1.0, 0.85, 0.7, 0.55, 0.4, 0.25, 0.1, -0.05, -0.2, -0.35, -0.5],\n },\n duration=42.0,\n label=\"complex_joint_space_path\",\n )\n\n\ndef _expected_waypoint_at_idx(\n waypoints: Dict[JointName, List[float]], idx: int\n) -> Dict[JointName, float]:\n return {joint_name: positions[idx] for joint_name, positions in waypoints.items()}\n\n\ndef test_joint_space_path_waypoints_derived_properties(complex_path):\n assert complex_path.joint_names == [f\"joint_{num}\" for num in range(1, 8)]\n assert complex_path.dimensionality == 7\n assert complex_path.num_waypoints == 11\n\n\ndef test_joint_space_path_waypoints_for_joint(complex_path):\n \"\"\"Getting the waypoints for a given joint\"\"\"\n assert (\n complex_path.waypoints_for_joint(\"joint_5\") == complex_path.waypoints[\"joint_5\"]\n )\n with pytest.raises(ValueError):\n complex_path.waypoints_for_joint(\"joint_999\")\n\n\ndef test_joint_space_path_waypoint(complex_path):\n \"\"\"Test getting the waypoints at a given index\"\"\"\n assert complex_path.waypoint(0) == _expected_waypoint_at_idx(\n complex_path.waypoints, 0\n )\n assert complex_path.waypoint(10) == _expected_waypoint_at_idx(\n complex_path.waypoints, 10\n )\n # Check we can do negative indexing\n assert complex_path.waypoint(10) == complex_path.waypoint(-1)\n assert complex_path.waypoint(-11) == complex_path.waypoint(0)\n\n with pytest.raises(ValueError):\n complex_path.waypoint(999)\n\n\ndef test_joint_space_path_waypoint_as_np_array(complex_path):\n \"\"\"Testing getting the waypoints at a given index as a np.array\"\"\"\n # joint_1, joint_2, ..., joint_7\n joint_name_ordering = [f\"joint_{num}\" for num in range(1, 8)]\n expected_waypoint_at_idx_0 = list(\n _expected_waypoint_at_idx(complex_path.waypoints, 0).values()\n )\n expected_waypoint_at_idx_10 = list(\n _expected_waypoint_at_idx(complex_path.waypoints, 10).values()\n )\n assert len(expected_waypoint_at_idx_0) == len(expected_waypoint_at_idx_10) == 7\n\n assert np.isclose(\n complex_path.waypoint_as_np_array(0, joint_name_ordering),\n np.array(expected_waypoint_at_idx_0),\n ).all()\n\n assert np.isclose(\n complex_path.waypoint_as_np_array(10, joint_name_ordering),\n np.array(expected_waypoint_at_idx_10),\n ).all()\n\n # Reverse ordering: joint_7, joint_6, ..., joint_1\n reversed_joint_name_ordering = list(reversed(joint_name_ordering))\n assert np.isclose(\n complex_path.waypoint_as_np_array(0, reversed_joint_name_ordering),\n np.array(list(reversed(expected_waypoint_at_idx_0))),\n ).all()\n assert np.isclose(\n complex_path.waypoint_as_np_array(10, reversed_joint_name_ordering),\n np.array(list(reversed(expected_waypoint_at_idx_10))),\n ).all()\n\n # Invalid joint name ordering 1_joint, 2_joint, etc.\n with pytest.raises(ValueError):\n complex_path.waypoint_as_np_array(\n 0, [f\"{idx + 1}_joint\" for idx in range(1, 8)]\n )\n\n\ndef test_joint_space_paths_waypoints_as_np_array(complex_path):\n \"\"\"Test getting all the waypoints as a np.array\"\"\"\n # joint_1, joint_2, ..., joint_7\n joint_name_ordering = [f\"joint_{num}\" for num in range(1, 8)]\n expected_np_array = np.array(\n [complex_path.waypoints[joint_name] for joint_name in joint_name_ordering]\n ).T\n assert expected_np_array.shape == (11, 7)\n\n assert np.isclose(\n complex_path.waypoints_as_np_array(joint_name_ordering), expected_np_array\n ).all()\n\n # Reverse ordering: joint_7, joint_6, ..., joint_1\n # Check matrix is flipped as well\n reversed_joint_name_ordering = list(reversed(joint_name_ordering))\n assert np.isclose(\n complex_path.waypoints_as_np_array(reversed_joint_name_ordering),\n np.flip(expected_np_array, axis=1),\n ).all()\n\n # Invalid joint name ordering 1_joint, 2_joint, etc.\n with pytest.raises(ValueError):\n complex_path.waypoint_as_np_array(\n 0, [f\"{idx + 1}_joint\" for idx in range(1, 8)]\n )\n\n\[email protected](\n \"configurations\",\n [{}, {\"gripper_1\": \"lis is cool\"}, {\"gripper_n\": \"my gripper might be open?\"}],\n)\ndef test_actuate_gripper_raises_value_error(configurations):\n with pytest.raises(ValueError):\n ActuateGripper(configurations)\n\n\[email protected](\n \"configurations, expected_joint_names\",\n [\n ({\"gripper_1\": GripperPosition.open}, [\"gripper_1\"]),\n (\n {\n \"gripper_left\": GripperPosition.close,\n \"gripper_right\": GripperPosition.open,\n },\n [\"gripper_left\", \"gripper_right\"],\n ),\n ],\n)\ndef test_actuate_gripper(configurations, expected_joint_names):\n actuate_gripper = ActuateGripper(configurations)\n assert actuate_gripper.configurations == configurations\n assert actuate_gripper.type == \"ActuateGripper\"\n\n # Derived properties\n assert actuate_gripper.joint_names == expected_joint_names\n\n # Check we can get position via gripper joint name\n test_gripper_joint = next(iter(configurations.keys()))\n expected_gripper_position = configurations[test_gripper_joint]\n assert (\n actuate_gripper.position_for_gripper_joint(test_gripper_joint)\n == expected_gripper_position\n )\n\n # Non-existent joint raises error\n with pytest.raises(ValueError):\n actuate_gripper.position_for_gripper_joint(\"non-existent-gripper-joint\")\n\n\[email protected](\n \"configurations, label, raise_error\",\n [\n (\n {\n \"gripper_left\": \"open\",\n \"gripper_right\": \"close\",\n },\n \"two_grippers\",\n True,\n ),\n ({\"gripper_0\": \"close\"}, \"one_gripper\", False),\n ],\n)\ndef test_actuate_gripper_from_json_dict(configurations, label, raise_error):\n def gen_json_dict():\n # We need different dict objects as the JSON dicts are mutable\n return {\n \"type\": ActuateGripper.type if not raise_error else \"wrong_type\",\n \"configurations\": configurations,\n \"label\": label,\n }\n\n if raise_error:\n with pytest.raises(ValueError):\n ActuateGripper.from_json_dict(gen_json_dict())\n else:\n assert (\n JointSpacePath.from_json_dict(gen_json_dict())\n == Command.from_json_dict(gen_json_dict())\n == ActuateGripper(\n configurations={\n joint: GripperPosition[pos] for joint, pos in configurations.items()\n },\n label=label,\n )\n )\n" ]
[ [ "numpy.array", "numpy.flip" ] ]
qxde01/-gastric-cancer-detect
[ "9f2ffb1e0cee4b4c305609fc0a69e557571d197a" ]
[ "models/Unet.py" ]
[ "from tensorflow import keras\n#https://github.com/zhixuhao/unet\n\ndef conv_block(inputs, filters, kernel_size, strides, padding='same'):\n Z = keras.layers.Conv2D(filters, kernel_size, strides=strides, padding=padding, use_bias=False)(inputs)\n Z = keras.layers.BatchNormalization(axis=-1)(Z)\n A = keras.layers.PReLU(shared_axes=[1, 2])(Z)\n return A\n\ndef Unet(input_shape=(256, 256, 1)):\n inputs = keras.layers.Input(input_shape)\n conv1 = conv_block(inputs, 32, 3, 1, padding='same')\n conv1 = conv_block(conv1, 32, 3, 1, padding='same')\n pool1 = keras.layers.MaxPooling2D(pool_size=(2, 2))(conv1)\n conv2 = conv_block(pool1, 64, 3, 1, padding='same')\n conv2 = conv_block(conv2, 64, 3, 1, padding='same')\n pool2 = keras.layers.MaxPooling2D(pool_size=(2, 2))(conv2)\n conv3 = conv_block(pool2, 128, 3, 1, padding='same')\n conv3 = conv_block(conv3, 128, 3, 1, padding='same')\n\n pool3 = keras.layers.MaxPooling2D(pool_size=(2, 2))(conv3)\n conv4 = conv_block(pool3, 256, 3, 1, padding='same')\n conv4 = conv_block(conv4, 256, 3, 1, padding='same')\n drop4 = keras.layers.Dropout(0.5)(conv4)\n pool4 = keras.layers.MaxPooling2D(pool_size=(2, 2))(drop4)\n\n conv5 = conv_block(pool4, 512, 3, 1, padding='same')\n conv5 = conv_block(conv5, 512, 3, 1, padding='same')\n\n drop5 = keras.layers.Dropout(0.5)(conv5)\n\n up6 = conv_block(keras.layers.UpSampling2D(size=(2, 2))(drop5), 256, 3, 1, padding='same')\n merge6 = keras.layers.concatenate([drop4, up6], axis=3)\n conv6 =conv_block(merge6, 256, 3, 1, padding='same')\n conv6 = conv_block(conv6, 256, 3, 1, padding='same')\n\n up7 = conv_block(keras.layers.UpSampling2D(size=(2, 2))(conv6), 128, 3, 1, padding='same')\n merge7 = keras.layers.concatenate([conv3, up7], axis=3)\n\n conv7 =conv_block(merge7, 128, 3, 1, padding='same')\n conv7 = conv_block(conv7, 128, 3, 1, padding='same')\n\n up8 = conv_block(keras.layers.UpSampling2D(size=(2, 2))(conv7), 64, 3, 1, padding='same')\n merge8 = keras.layers.concatenate([conv2, up8], axis=3)\n\n conv8 =conv_block(merge8, 64, 3, 1, padding='same')\n conv8 = conv_block(conv8, 64, 3, 1, padding='same')\n\n up9 =conv_block(keras.layers.UpSampling2D(size=(2, 2))(conv8), 32, 3, 1, padding='same')\n merge9 = keras.layers.concatenate([conv1, up9], axis=3)\n\n conv9 = conv_block(merge9, 32, 3, 1, padding='same')\n conv9 = conv_block(conv9, 32, 3, 1, padding='same')\n conv9 = conv_block(conv9, 3, 3, 1, padding='same')\n mask = keras.layers.Conv2D(1, 3, activation='sigmoid',name='mask', padding='same')(conv9)\n\n #output=keras.layers.Flatten()(drop5)\n #output = keras.layers.Dense(1, activation='sigmoid', name='classify')(output)\n #model = keras.models.Model(inputs=inputs, outputs=[output,mask])\n model = keras.models.Model(inputs=inputs, outputs=mask)\n\n return model\n" ]
[ [ "tensorflow.keras.layers.Input", "tensorflow.keras.layers.UpSampling2D", "tensorflow.keras.models.Model", "tensorflow.keras.layers.Conv2D", "tensorflow.keras.layers.Dropout", "tensorflow.keras.layers.MaxPooling2D", "tensorflow.keras.layers.PReLU", "tensorflow.keras.layers.BatchNormalization", "tensorflow.keras.layers.concatenate" ] ]
Rmloong/movie-revenue-predictor
[ "702e16aec92ac13f47f655950e63bdacf1783200" ]
[ "src/model.py" ]
[ "\n\"\"\"\nModule that fits the model and stores it (joblib)\nin a pickle file.\nWhen run as a module, this will load a csv dataset,\ntrain a RF regression model, and then pickle the\nresulting model object to disk.\n\nNote: The parameters for the RF model were selected\nbased upon GridSearchCV exploration in jupyter notebook.\nIf new data or a new model type is needed, the user\nshould refit and GridSearch again.\n\"\"\"\nimport pickle\nimport numpy as np\nimport pandas as pd\nfrom sklearn.ensemble import RandomForestRegressor\n# from sklearn.model_selection import train_test_split\n# from sklearn.metrics import mean_squared_error\n# from sklearn.model_selection import GridSearchCV\nfrom sklearn.externals import joblib\nimport pymongo\n\ndef get_data(filename):\n \"\"\"Load clean data from a file and return training data.\n Parameters\n ----------\n filename: The path to a csv file containing the cleaned data.\n\n Returns\n -------\n X: A numpy array containing the columns used for training.\n y: A numpy array containing log(rev) values for model response.\n \"\"\"\n\n df = pd.read_csv(filename, sep = '|')\n X = df.iloc[:,1:]\n y = df.iloc[:,0]\n return X,y\n\ndef build_model(X,y):\n '''\n Builds a random forest model based upon the best parameters found in GridSearchCV\n Then pickles it (puts into storage)\n Parameters\n ----------\n X: A numpy array containing the columns used for training.\n y: A numpy array containing log(rev) values for model response.\n\n Returns\n -------\n None, the model will be pickled (put into storage)\n\n '''\n rf_best_params = RandomForestRegressor(n_estimators = 1000, max_depth = 10, max_features = 10)\n rf_best_params.fit(X,y)\n joblib.dump(rf_best_params, 'model.pkl')\n model_columns = X.columns\n joblib.dump(model_columns, 'model_columns.pkl')\n return None\n\nif __name__ == '__main__':\n X, y = get_data('../data/data_cleaned.csv')\n build_model(X,y)\n" ]
[ [ "pandas.read_csv", "sklearn.ensemble.RandomForestRegressor", "sklearn.externals.joblib.dump" ] ]
lizhaoliu-Lec/QANet_dureader
[ "439ba57f98e330d98de393d3ad879b7fd0e3fc29" ]
[ "dataloader.py" ]
[ "# -*- coding:utf8 -*-\n\nimport json\nimport logging\nimport numpy as np\nfrom collections import Counter\nimport jieba\n\n\ndef word_tokenize(sent):\n if isinstance(sent, list):\n tokens = sent\n else:\n tokens = jieba.lcut(sent)\n return [token for token in tokens if len(token) >= 1]\n\n\nclass DataLoader(object):\n \"\"\"\n This module implements the APIs for loading and using baidu reading comprehension dataset\n \"\"\"\n\n def __init__(self, max_a_len, max_p_len, max_q_len, max_char_len,\n train_files=[], dev_files=[], test_files=[]):\n self.logger = logging.getLogger(\"brc\")\n self.max_a_len = max_a_len\n self.max_p_len = max_p_len\n self.max_q_len = max_q_len\n self.max_char_len = max_char_len\n\n self.train_set, self.dev_set, self.test_set = [], [], []\n if train_files:\n for train_file in train_files:\n self.logger.info('---train file-----{}'.format(train_file))\n self.train_set += self._load_dataset(train_file, train=True)\n self.logger.info('Train set size: {} questions.'.format(len(self.train_set)))\n\n if dev_files:\n for dev_file in dev_files:\n self.logger.info('---dev file-----{}'.format(dev_file))\n self.dev_set += self._load_dataset(dev_file, train=True)\n self.logger.info('Dev set size: {} questions.'.format(len(self.dev_set)))\n\n if test_files:\n for test_file in test_files:\n self.test_set += self._load_dataset(test_file)\n self.logger.info('Test set size: {} questions.'.format(len(self.test_set)))\n\n def _load_dataset(self, data_path, train=False):\n \"\"\"\n Loads the dataset\n Args:\n data_path: the data file to load\n \"\"\"\n max_char_num = 0\n with open(data_path, encoding='UTF-8') as fin:\n data_set = []\n for idx, line in enumerate(fin):\n\n sample = json.loads(line.strip())\n\n if len(sample['answer']) > self.max_a_len:\n print(sample)\n print('got answer idx `%d` bigger than max_a_len `%d`, ignore it.' % (\n sample['answer'][0], self.max_a_len))\n continue\n\n question_tokens = word_tokenize(sample['question'])\n sample['question_tokens'] = question_tokens\n question_chars = [list(token) for token in question_tokens]\n sample['question_chars'] = question_chars\n\n for char in question_chars:\n if len(char) > max_char_num:\n max_char_num = len(char)\n\n context_tokens = word_tokenize(sample['context'])\n sample['context_tokens'] = context_tokens\n context_chars = [list(token) for token in context_tokens]\n sample['context_chars'] = context_chars\n\n for char in context_chars:\n if len(char) > max_char_num:\n max_char_num = len(char)\n\n data_set.append(sample)\n return data_set\n\n def _one_mini_batch(self, data, indices, pad_id, pad_char_id):\n \"\"\"\n Get one mini batch\n Args:\n data: all data\n indices: the indices of the samples to be selected\n pad_id:\n Returns:\n one batch of data\n \"\"\"\n batch_data = {\n 'raw_data': [data[i] for i in indices],\n 'question_token_ids': [],\n 'question_char_ids': [],\n 'question_length': [],\n 'context_token_ids': [],\n 'context_length': [],\n 'context_char_ids': [],\n 'label': [],\n }\n\n for sidx, sample in enumerate(batch_data['raw_data']):\n batch_data['question_token_ids'].append(sample['question_token_ids'])\n batch_data['question_char_ids'].append(sample['question_char_ids'])\n batch_data['question_length'].append(len(sample['question_token_ids']))\n batch_data['context_token_ids'].append(sample['context_token_ids'])\n batch_data['context_length'].append(min(len(sample['context_token_ids']), self.max_p_len))\n batch_data['context_char_ids'].append(sample['context_char_ids'])\n\n batch_data, padded_p_len, padded_q_len = self._dynamic_padding(batch_data, pad_id, pad_char_id)\n for sample in batch_data['raw_data']:\n batch_data['label'].append(sample['answer'][0])\n return batch_data\n\n def _dynamic_padding(self, batch_data, pad_id, pad_char_id):\n \"\"\"\n Dynamically pads the batch_data with pad_id\n \"\"\"\n pad_char_len = self.max_char_len\n pad_p_len = self.max_p_len # min(self.max_p_len, max(batch_data['passage_length']))\n pad_q_len = self.max_q_len # min(self.max_q_len, max(batch_data['question_length']))\n batch_data['context_token_ids'] = [(ids + [pad_id] * (pad_p_len - len(ids)))[: pad_p_len]\n for ids in batch_data['context_token_ids']]\n for index, char_list in enumerate(batch_data['context_char_ids']):\n # print(batch_data['passage_char_ids'])\n for char_index in range(len(char_list)):\n if len(char_list[char_index]) >= pad_char_len:\n char_list[char_index] = char_list[char_index][:self.max_char_len]\n else:\n char_list[char_index] += [pad_char_id] * (pad_char_len - len(char_list[char_index]))\n batch_data['context_char_ids'][index] = char_list\n batch_data['context_char_ids'] = [(ids + [[pad_char_id] * pad_char_len] * (pad_p_len - len(ids)))[:pad_p_len]\n for ids in batch_data['context_char_ids']]\n\n # print(np.array(batch_data['passage_char_ids']).shape, \"==========\")\n\n batch_data['question_token_ids'] = [(ids + [pad_id] * (pad_q_len - len(ids)))[: pad_q_len]\n for ids in batch_data['question_token_ids']]\n for index, char_list in enumerate(batch_data['question_char_ids']):\n for char_index in range(len(char_list)):\n if len(char_list[char_index]) >= pad_char_len:\n char_list[char_index] = char_list[char_index][:self.max_char_len]\n else:\n char_list[char_index] += [pad_char_id] * (pad_char_len - len(char_list[char_index]))\n batch_data['question_char_ids'][index] = char_list\n batch_data['question_char_ids'] = [(ids + [[pad_char_id] * pad_char_len] * (pad_q_len - len(ids)))[:pad_q_len]\n for ids in batch_data['question_char_ids']]\n\n return batch_data, pad_p_len, pad_q_len\n\n def word_iter(self, set_name=None):\n \"\"\"\n Iterates over all the words in the dataset\n Args:\n set_name: if it is set, then the specific set will be used\n Returns:\n a generator\n \"\"\"\n if set_name is None:\n data_set = self.train_set + self.dev_set + self.test_set\n elif set_name == 'train':\n data_set = self.train_set\n elif set_name == 'dev':\n data_set = self.dev_set\n elif set_name == 'test':\n data_set = self.test_set\n else:\n raise NotImplementedError('No data set named as {}'.format(set_name))\n if data_set is not None:\n for sample in data_set:\n for token in sample['question_tokens']:\n yield token\n for token in sample['context_tokens']:\n yield token\n\n def convert_to_ids(self, vocab):\n \"\"\"\n Convert the question and paragraph in the original dataset to ids\n Args:\n vocab: the vocabulary on this dataset\n \"\"\"\n for data_set in [self.train_set, self.dev_set, self.test_set]:\n if data_set is None:\n continue\n for sample in data_set:\n sample['question_token_ids'] = vocab.convert_word_to_ids(sample['question_tokens'])\n sample[\"question_char_ids\"] = vocab.convert_char_to_ids(sample['question_tokens'])\n sample['context_token_ids'] = vocab.convert_word_to_ids(sample['context_tokens'])\n sample[\"context_char_ids\"] = vocab.convert_char_to_ids(sample['context_tokens'])\n\n def next_batch(self, set_name, batch_size, pad_id, pad_char_id, shuffle=True):\n \"\"\"\n Generate data batches for a specific dataset (train/dev/test)\n Args:\n set_name: train/dev/test to indicate the set\n batch_size: number of samples in one batch\n pad_id: pad id\n pad_char_id: pad char id\n shuffle: if set to be true, the data is shuffled.\n Returns:\n a generator for all batches\n \"\"\"\n if set_name == 'train':\n data = self.train_set\n elif set_name == 'dev':\n data = self.dev_set\n elif set_name == 'test':\n data = self.test_set\n else:\n raise NotImplementedError('No data set named as {}'.format(set_name))\n data_size = len(data)\n indices = np.arange(data_size)\n if shuffle:\n np.random.shuffle(indices)\n for batch_start in np.arange(0, data_size, batch_size):\n batch_indices = indices[batch_start: batch_start + batch_size]\n yield self._one_mini_batch(data, batch_indices, pad_id, pad_char_id)\n" ]
[ [ "numpy.arange", "numpy.random.shuffle" ] ]
dubey/tensorflow
[ "2783e5925d83b3a333e416d1601f9fbeaa645520" ]
[ "tensorflow/lite/python/lite_test.py" ]
[ "# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for lite.py.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport tempfile\nfrom absl.testing import parameterized\nimport numpy as np\n\nfrom tensorflow.lite.python import lite\nfrom tensorflow.lite.python import lite_constants\nfrom tensorflow.lite.python.interpreter import Interpreter\nfrom tensorflow.python import keras\nfrom tensorflow.python.client import session\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.eager import def_function\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import test_util\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import gen_array_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import nn_ops\nfrom tensorflow.python.ops import variable_scope\nfrom tensorflow.python.ops import variables\nfrom tensorflow.python.ops.variables import global_variables_initializer as _global_variables_initializer\nfrom tensorflow.python.platform import gfile\nfrom tensorflow.python.platform import resource_loader\nfrom tensorflow.python.platform import test\nfrom tensorflow.python.saved_model import saved_model\nfrom tensorflow.python.training.training_util import write_graph\n\n\nclass FromConstructor(test_util.TensorFlowTestCase):\n\n # Tests invalid constructors using a dummy value for the GraphDef.\n def testInvalidConstructor(self):\n message = ('If input_tensors and output_tensors are None, both '\n 'input_arrays_with_shape and output_arrays must be defined.')\n\n # `output_arrays` is not defined.\n with self.assertRaises(ValueError) as error:\n lite.TFLiteConverter(\n None, None, [], input_arrays_with_shape=[('input', [3, 9])])\n self.assertEqual(message, str(error.exception))\n\n # `input_arrays_with_shape` is not defined.\n with self.assertRaises(ValueError) as error:\n lite.TFLiteConverter(None, [], None, output_arrays=['output'])\n self.assertEqual(message, str(error.exception))\n\n # Tests valid constructors using a dummy value for the GraphDef.\n def testValidConstructor(self):\n converter = lite.TFLiteConverter(\n None,\n None,\n None,\n input_arrays_with_shape=[('input', [3, 9])],\n output_arrays=['output'])\n self.assertFalse(converter._has_valid_tensors())\n self.assertEqual(converter.get_input_arrays(), ['input'])\n\n with self.assertRaises(ValueError) as error:\n converter._set_batch_size(1)\n self.assertEqual(\n 'The batch size cannot be set for this model. Please use '\n 'input_shapes parameter.', str(error.exception))\n\n converter = lite.TFLiteConverter(None, ['input_tensor'], ['output_tensor'])\n self.assertTrue(converter._has_valid_tensors())\n\n\n@test_util.run_v1_only('Incompatible with 2.0.')\nclass FromSessionTest(test_util.TensorFlowTestCase):\n\n def testFloat(self):\n in_tensor = array_ops.placeholder(\n shape=[1, 16, 16, 3], dtype=dtypes.float32)\n out_tensor = in_tensor + in_tensor\n sess = session.Session()\n\n # Convert model and ensure model is not None.\n converter = lite.TFLiteConverter.from_session(sess, [in_tensor],\n [out_tensor])\n tflite_model = converter.convert()\n self.assertTrue(tflite_model)\n\n # Check values from converted model.\n interpreter = Interpreter(model_content=tflite_model)\n interpreter.allocate_tensors()\n\n input_details = interpreter.get_input_details()\n self.assertEqual(1, len(input_details))\n self.assertEqual('Placeholder', input_details[0]['name'])\n self.assertEqual(np.float32, input_details[0]['dtype'])\n self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())\n self.assertEqual((0., 0.), input_details[0]['quantization'])\n\n output_details = interpreter.get_output_details()\n self.assertEqual(1, len(output_details))\n self.assertEqual('add', output_details[0]['name'])\n self.assertEqual(np.float32, output_details[0]['dtype'])\n self.assertTrue(([1, 16, 16, 3] == output_details[0]['shape']).all())\n self.assertEqual((0., 0.), output_details[0]['quantization'])\n\n def testString(self):\n in_tensor = array_ops.placeholder(shape=[4], dtype=dtypes.string)\n out_tensor = array_ops.reshape(in_tensor, shape=[2, 2])\n sess = session.Session()\n\n # Convert model and ensure model is not None.\n converter = lite.TFLiteConverter.from_session(sess, [in_tensor],\n [out_tensor])\n tflite_model = converter.convert()\n self.assertTrue(tflite_model)\n\n # Check values from converted model.\n interpreter = Interpreter(model_content=tflite_model)\n interpreter.allocate_tensors()\n\n input_details = interpreter.get_input_details()\n self.assertEqual(1, len(input_details))\n self.assertEqual('Placeholder', input_details[0]['name'])\n self.assertEqual(np.string_, input_details[0]['dtype'])\n self.assertTrue(([4] == input_details[0]['shape']).all())\n\n output_details = interpreter.get_output_details()\n self.assertEqual(1, len(output_details))\n self.assertEqual('Reshape', output_details[0]['name'])\n self.assertEqual(np.string_, output_details[0]['dtype'])\n self.assertTrue(([2, 2] == output_details[0]['shape']).all())\n # TODO(b/122659643): Test setting/getting string data via the python\n # interpreter API after support has been added.\n\n def testQuantization(self):\n in_tensor_1 = array_ops.placeholder(\n shape=[1, 16, 16, 3], dtype=dtypes.float32, name='inputA')\n in_tensor_2 = array_ops.placeholder(\n shape=[1, 16, 16, 3], dtype=dtypes.float32, name='inputB')\n out_tensor = array_ops.fake_quant_with_min_max_args(\n in_tensor_1 + in_tensor_2, min=0., max=1., name='output')\n sess = session.Session()\n\n # Convert model and ensure model is not None.\n converter = lite.TFLiteConverter.from_session(\n sess, [in_tensor_1, in_tensor_2], [out_tensor])\n converter.inference_type = lite_constants.QUANTIZED_UINT8\n converter.quantized_input_stats = {\n 'inputA': (0., 1.),\n 'inputB': (0., 1.)\n } # mean, std_dev\n tflite_model = converter.convert()\n self.assertTrue(tflite_model)\n\n # Check values from converted model.\n interpreter = Interpreter(model_content=tflite_model)\n interpreter.allocate_tensors()\n\n input_details = interpreter.get_input_details()\n self.assertEqual(2, len(input_details))\n self.assertEqual('inputA', input_details[0]['name'])\n self.assertEqual(np.uint8, input_details[0]['dtype'])\n self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())\n self.assertEqual((1., 0.),\n input_details[0]['quantization']) # scale, zero_point\n\n self.assertEqual('inputB', input_details[1]['name'])\n self.assertEqual(np.uint8, input_details[1]['dtype'])\n self.assertTrue(([1, 16, 16, 3] == input_details[1]['shape']).all())\n self.assertEqual((1., 0.),\n input_details[1]['quantization']) # scale, zero_point\n\n output_details = interpreter.get_output_details()\n self.assertEqual(1, len(output_details))\n self.assertEqual('output', output_details[0]['name'])\n self.assertEqual(np.uint8, output_details[0]['dtype'])\n self.assertTrue(([1, 16, 16, 3] == output_details[0]['shape']).all())\n self.assertTrue(output_details[0]['quantization'][0] > 0) # scale\n\n def testQuantizationInvalid(self):\n in_tensor_1 = array_ops.placeholder(\n shape=[1, 16, 16, 3], dtype=dtypes.float32, name='inputA')\n in_tensor_2 = array_ops.placeholder(\n shape=[1, 16, 16, 3], dtype=dtypes.float32, name='inputB')\n out_tensor = array_ops.fake_quant_with_min_max_args(\n in_tensor_1 + in_tensor_2, min=0., max=1., name='output')\n sess = session.Session()\n\n # Convert model and ensure model is not None.\n converter = lite.TFLiteConverter.from_session(\n sess, [in_tensor_1, in_tensor_2], [out_tensor])\n converter.inference_type = lite_constants.QUANTIZED_UINT8\n converter.quantized_input_stats = {'inputA': (0., 1.)} # mean, std_dev\n with self.assertRaises(ValueError) as error:\n converter.convert()\n self.assertEqual(\n 'Quantization input stats are not available for input tensors '\n '\\'inputB\\'.', str(error.exception))\n\n def testIntermediateInputArray(self):\n \"\"\"Convert a model from an intermediate input array.\"\"\"\n in_tensor_init = array_ops.placeholder(\n shape=[1, 16, 16, 3], dtype=dtypes.float32)\n in_tensor_final = in_tensor_init + in_tensor_init\n out_tensor = in_tensor_final + in_tensor_final\n sess = session.Session()\n\n # Convert model and ensure model is not None.\n converter = lite.TFLiteConverter.from_session(sess, [in_tensor_final],\n [out_tensor])\n tflite_model = converter.convert()\n self.assertTrue(tflite_model)\n\n # Check values from converted model.\n interpreter = Interpreter(model_content=tflite_model)\n interpreter.allocate_tensors()\n\n input_details = interpreter.get_input_details()\n self.assertEqual(1, len(input_details))\n self.assertEqual('add', input_details[0]['name'])\n self.assertEqual(np.float32, input_details[0]['dtype'])\n self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())\n self.assertEqual((0., 0.), input_details[0]['quantization'])\n\n output_details = interpreter.get_output_details()\n self.assertEqual(1, len(output_details))\n self.assertEqual('add_1', output_details[0]['name'])\n self.assertEqual(np.float32, output_details[0]['dtype'])\n self.assertTrue(([1, 16, 16, 3] == output_details[0]['shape']).all())\n self.assertEqual((0., 0.), output_details[0]['quantization'])\n\n def testSizeNoneInvalid(self):\n in_tensor = array_ops.placeholder(dtype=dtypes.float32)\n out_tensor = in_tensor + in_tensor\n sess = session.Session()\n\n # Test None as shape.\n converter = lite.TFLiteConverter.from_session(sess, [in_tensor],\n [out_tensor])\n with self.assertRaises(ValueError) as error:\n converter.convert()\n self.assertEqual('Provide an input shape for input array \\'Placeholder\\'.',\n str(error.exception))\n\n def testScalarValid(self):\n # Construct a graph using a scalar (empty shape) input.\n in_tensor = array_ops.placeholder(dtype=dtypes.float32, shape=[])\n out_tensor = in_tensor + in_tensor\n sess = session.Session()\n\n # Test conversion with the scalar input shape.\n converter = lite.TFLiteConverter.from_session(sess, [in_tensor],\n [out_tensor])\n tflite_model = converter.convert()\n self.assertTrue(tflite_model)\n\n # Check values from converted model.\n interpreter = Interpreter(model_content=tflite_model)\n interpreter.allocate_tensors()\n\n input_details = interpreter.get_input_details()\n self.assertEqual(1, len(input_details))\n self.assertEqual('Placeholder', input_details[0]['name'])\n self.assertEqual(np.float32, input_details[0]['dtype'])\n self.assertTrue(([] == input_details[0]['shape']).all())\n\n output_details = interpreter.get_output_details()\n self.assertEqual(1, len(output_details))\n self.assertEqual('add', output_details[0]['name'])\n self.assertEqual(np.float32, output_details[0]['dtype'])\n self.assertTrue(([] == input_details[0]['shape']).all())\n\n # Validate inference using the scalar inputs/outputs.\n test_input = np.array(4.0, dtype=np.float32)\n expected_output = np.array(8.0, dtype=np.float32)\n interpreter.set_tensor(input_details[0]['index'], test_input)\n interpreter.invoke()\n\n output_data = interpreter.get_tensor(output_details[0]['index'])\n self.assertTrue((expected_output == output_data).all())\n\n def testSizeInvalid(self):\n in_tensor = array_ops.placeholder(\n shape=[1, None, 16, 3], dtype=dtypes.float32)\n out_tensor = in_tensor + in_tensor\n sess = session.Session()\n\n # Test invalid shape. None after 1st dimension.\n converter = lite.TFLiteConverter.from_session(sess, [in_tensor],\n [out_tensor])\n with self.assertRaises(ValueError) as error:\n converter.convert()\n self.assertEqual(\n 'None is only supported in the 1st dimension. Tensor '\n '\\'Placeholder\\' has invalid shape \\'[1, None, 16, 3]\\'.',\n str(error.exception))\n\n def testBatchSizeValid(self):\n in_tensor = array_ops.placeholder(\n shape=[None, 16, 16, 3], dtype=dtypes.float32)\n out_tensor = in_tensor + in_tensor\n sess = session.Session()\n\n # Convert model and ensure model is not None.\n converter = lite.TFLiteConverter.from_session(sess, [in_tensor],\n [out_tensor])\n tflite_model = converter.convert()\n self.assertTrue(tflite_model)\n\n # Check values from converted model.\n interpreter = Interpreter(model_content=tflite_model)\n interpreter.allocate_tensors()\n\n input_details = interpreter.get_input_details()\n self.assertEqual(1, len(input_details))\n self.assertEqual('Placeholder', input_details[0]['name'])\n self.assertEqual(np.float32, input_details[0]['dtype'])\n self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())\n self.assertEqual((0., 0.), input_details[0]['quantization'])\n\n output_details = interpreter.get_output_details()\n self.assertEqual(1, len(output_details))\n self.assertEqual('add', output_details[0]['name'])\n self.assertEqual(np.float32, output_details[0]['dtype'])\n self.assertTrue(([1, 16, 16, 3] == output_details[0]['shape']).all())\n self.assertEqual((0., 0.), output_details[0]['quantization'])\n\n def testFreezeGraph(self):\n in_tensor = array_ops.placeholder(\n shape=[1, 16, 16, 3], dtype=dtypes.float32)\n var = variable_scope.get_variable(\n 'weights', shape=[1, 16, 16, 3], dtype=dtypes.float32)\n out_tensor = in_tensor + var\n sess = session.Session()\n sess.run(_global_variables_initializer())\n\n # Convert model and ensure model is not None.\n converter = lite.TFLiteConverter.from_session(sess, [in_tensor],\n [out_tensor])\n tflite_model = converter.convert()\n self.assertTrue(tflite_model)\n\n # Check values from converted model.\n interpreter = Interpreter(model_content=tflite_model)\n interpreter.allocate_tensors()\n\n input_details = interpreter.get_input_details()\n self.assertEqual(1, len(input_details))\n self.assertEqual('Placeholder', input_details[0]['name'])\n self.assertEqual(np.float32, input_details[0]['dtype'])\n self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())\n self.assertEqual((0., 0.), input_details[0]['quantization'])\n\n output_details = interpreter.get_output_details()\n self.assertEqual(1, len(output_details))\n self.assertEqual('add', output_details[0]['name'])\n self.assertEqual(np.float32, output_details[0]['dtype'])\n self.assertTrue(([1, 16, 16, 3] == output_details[0]['shape']).all())\n self.assertEqual((0., 0.), output_details[0]['quantization'])\n\n # TODO(nupurgarg): Verify value of contents in GraphViz.\n def testGraphviz(self):\n in_tensor = array_ops.placeholder(\n shape=[1, 16, 16, 3], dtype=dtypes.float32)\n out_tensor = in_tensor + in_tensor\n sess = session.Session()\n\n # Convert model and ensure model is not None.\n converter = lite.TFLiteConverter.from_session(sess, [in_tensor],\n [out_tensor])\n converter.output_format = lite_constants.GRAPHVIZ_DOT\n graphviz_output = converter.convert()\n self.assertTrue(graphviz_output)\n\n # TODO(nupurgarg): Verify value of contents in GraphViz.\n def testDumpGraphviz(self):\n in_tensor = array_ops.placeholder(\n shape=[1, 16, 16, 3], dtype=dtypes.float32)\n out_tensor = in_tensor + in_tensor\n sess = session.Session()\n\n # Convert model and ensure model is not None.\n converter = lite.TFLiteConverter.from_session(sess, [in_tensor],\n [out_tensor])\n graphviz_dir = self.get_temp_dir()\n converter.dump_graphviz_dir = graphviz_dir\n tflite_model = converter.convert()\n self.assertTrue(tflite_model)\n\n # Ensure interpreter is able to allocate and check graphviz data.\n interpreter = Interpreter(model_content=tflite_model)\n interpreter.allocate_tensors()\n\n num_items_graphviz = len(os.listdir(graphviz_dir))\n self.assertTrue(num_items_graphviz)\n\n # Convert model and ensure model is not None.\n converter = lite.TFLiteConverter.from_session(sess, [in_tensor],\n [out_tensor])\n graphviz_dir = self.get_temp_dir()\n converter.dump_graphviz_dir = graphviz_dir\n converter.dump_graphviz_video = True\n tflite_model = converter.convert()\n self.assertTrue(tflite_model)\n\n # Ensure graphviz folder has more data after using video flag.\n num_items_graphviz_video = len(os.listdir(graphviz_dir))\n self.assertTrue(num_items_graphviz_video > num_items_graphviz)\n\n def testInferenceInputType(self):\n in_tensor = array_ops.placeholder(\n shape=[1, 16, 16, 3], dtype=dtypes.float32)\n out_tensor = in_tensor + in_tensor\n sess = session.Session()\n\n # Convert model and ensure model is not None.\n converter = lite.TFLiteConverter.from_session(sess, [in_tensor],\n [out_tensor])\n converter.inference_input_type = lite_constants.QUANTIZED_UINT8\n converter.quantized_input_stats = {'Placeholder': (0., 1.)} # mean, std_dev\n tflite_model = converter.convert()\n self.assertTrue(tflite_model)\n\n # Check values from converted model.\n interpreter = Interpreter(model_content=tflite_model)\n interpreter.allocate_tensors()\n\n input_details = interpreter.get_input_details()\n self.assertEqual(1, len(input_details))\n self.assertEqual('Placeholder', input_details[0]['name'])\n self.assertEqual(np.uint8, input_details[0]['dtype'])\n self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())\n self.assertEqual((1., 0.), input_details[0]['quantization'])\n\n output_details = interpreter.get_output_details()\n self.assertEqual(1, len(output_details))\n self.assertEqual('add', output_details[0]['name'])\n self.assertEqual(np.float32, output_details[0]['dtype'])\n self.assertTrue(([1, 16, 16, 3] == output_details[0]['shape']).all())\n\n def testDefaultRangesStats(self):\n in_tensor = array_ops.placeholder(\n shape=[1, 16, 16, 3], dtype=dtypes.float32)\n out_tensor = in_tensor + in_tensor\n sess = session.Session()\n\n # Convert model and ensure model is not None.\n converter = lite.TFLiteConverter.from_session(sess, [in_tensor],\n [out_tensor])\n converter.inference_type = lite_constants.QUANTIZED_UINT8\n converter.quantized_input_stats = {'Placeholder': (0., 1.)} # mean, std_dev\n converter.default_ranges_stats = (0, 6) # min, max\n tflite_model = converter.convert()\n self.assertTrue(tflite_model)\n\n # Check values from converted model.\n interpreter = Interpreter(model_content=tflite_model)\n interpreter.allocate_tensors()\n\n input_details = interpreter.get_input_details()\n self.assertEqual(1, len(input_details))\n self.assertEqual('Placeholder', input_details[0]['name'])\n self.assertEqual(np.uint8, input_details[0]['dtype'])\n self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())\n self.assertEqual((1., 0.), input_details[0]['quantization'])\n\n output_details = interpreter.get_output_details()\n self.assertEqual(1, len(output_details))\n self.assertEqual('add', output_details[0]['name'])\n self.assertEqual(np.uint8, output_details[0]['dtype'])\n self.assertTrue(([1, 16, 16, 3] == output_details[0]['shape']).all())\n self.assertTrue(output_details[0]['quantization'][0] > 0) # scale\n\n def testPostTrainingQuantizeDeprecatedAttribute(self):\n in_tensor_1 = array_ops.placeholder(\n shape=[33, 33], dtype=dtypes.float32, name='inputA')\n in_tensor_2 = constant_op.constant(\n np.random.uniform(low=-10., high=10., size=(33, 33)),\n shape=[33, 33],\n dtype=dtypes.float32,\n name='inputB')\n out_tensor = math_ops.matmul(in_tensor_1, in_tensor_2, name='output')\n sess = session.Session()\n\n quantized_converter = lite.TFLiteConverter.from_session(\n sess, [in_tensor_1], [out_tensor])\n self.assertFalse(quantized_converter.post_training_quantize)\n\n quantized_converter.post_training_quantize = True\n self.assertTrue(quantized_converter.post_training_quantize)\n self.assertEqual(quantized_converter.optimizations, [lite.Optimize.DEFAULT])\n\n quantized_tflite = quantized_converter.convert()\n self.assertTrue(quantized_tflite)\n\n def testPostTrainingQuantize(self):\n np.random.seed(0)\n # We need the tensor to have more than 1024 elements for quantize_weights\n # to kick in. Thus, the [33, 33] shape.\n in_tensor_1 = array_ops.placeholder(\n shape=[33, 33], dtype=dtypes.float32, name='inputA')\n in_tensor_2 = constant_op.constant(\n np.random.uniform(low=-10., high=10., size=(33, 33)),\n shape=[33, 33],\n dtype=dtypes.float32,\n name='inputB')\n out_tensor = math_ops.matmul(in_tensor_1, in_tensor_2, name='output')\n sess = session.Session()\n\n # Convert float model.\n float_converter = lite.TFLiteConverter.from_session(sess, [in_tensor_1],\n [out_tensor])\n float_tflite = float_converter.convert()\n self.assertTrue(float_tflite)\n\n # Convert quantized weights model.\n quantized_converter = lite.TFLiteConverter.from_session(\n sess, [in_tensor_1], [out_tensor])\n quantized_converter.optimizations = [lite.Optimize.DEFAULT]\n quantized_tflite = quantized_converter.convert()\n self.assertTrue(quantized_tflite)\n\n # Ensure that the quantized weights tflite model is smaller.\n self.assertTrue(len(quantized_tflite) < len(float_tflite))\n\n def _getCalibrationQuantizeModel(self):\n np.random.seed(0)\n inp = array_ops.placeholder(\n dtype=dtypes.float32, shape=(1, 5, 5, 3), name='input')\n conv = nn_ops.conv2d(\n inp,\n filter=array_ops.ones([3, 3, 3, 16]),\n strides=[1, 1, 1, 1],\n padding='SAME')\n output = nn_ops.relu(conv, name='output')\n\n def calibration_gen():\n for _ in range(5):\n yield [np.random.uniform(-1, 1, size=(1, 5, 5, 3)).astype(np.float32)]\n\n return (inp, output, calibration_gen)\n\n def testPostTrainingCalibrateAndQuantize(self):\n inp, output, calibration_gen = self._getCalibrationQuantizeModel()\n sess = session.Session()\n\n # Convert float model.\n float_converter = lite.TFLiteConverter.from_session(sess, [inp], [output])\n float_tflite = float_converter.convert()\n self.assertTrue(float_tflite)\n\n # Convert quantized model.\n quantized_converter = lite.TFLiteConverter.from_session(\n sess, [inp], [output])\n quantized_converter.optimizations = [lite.Optimize.DEFAULT]\n quantized_converter.representative_dataset = calibration_gen\n quantized_tflite = quantized_converter.convert()\n self.assertTrue(quantized_tflite)\n\n # The default input and output types should be float.\n interpreter = Interpreter(model_content=quantized_tflite)\n interpreter.allocate_tensors()\n input_details = interpreter.get_input_details()\n self.assertEqual(1, len(input_details))\n self.assertEqual(np.float32, input_details[0]['dtype'])\n output_details = interpreter.get_output_details()\n self.assertEqual(1, len(output_details))\n self.assertEqual(np.float32, output_details[0]['dtype'])\n\n # Ensure that the quantized weights tflite model is smaller.\n self.assertLess(len(quantized_tflite), len(float_tflite))\n\n def testCalibrateAndQuantizeBuiltinInt8(self):\n inp, output, calibration_gen = self._getCalibrationQuantizeModel()\n sess = session.Session()\n\n # Convert float model.\n float_converter = lite.TFLiteConverter.from_session(sess, [inp], [output])\n float_tflite = float_converter.convert()\n self.assertTrue(float_tflite)\n\n # Convert model by specifying target spec (instead of optimizations), since\n # when targeting an integer only backend, quantization is mandatory.\n quantized_converter = lite.TFLiteConverter.from_session(\n sess, [inp], [output])\n quantized_converter.target_spec.supported_ops = [\n lite.OpsSet.TFLITE_BUILTINS_INT8\n ]\n quantized_converter.representative_dataset = calibration_gen\n quantized_tflite = quantized_converter.convert()\n self.assertTrue(quantized_tflite)\n\n # The default input and output types should be float.\n interpreter = Interpreter(model_content=quantized_tflite)\n interpreter.allocate_tensors()\n input_details = interpreter.get_input_details()\n self.assertEqual(1, len(input_details))\n self.assertEqual(np.float32, input_details[0]['dtype'])\n output_details = interpreter.get_output_details()\n self.assertEqual(1, len(output_details))\n self.assertEqual(np.float32, output_details[0]['dtype'])\n\n # Ensure that the quantized weights tflite model is smaller.\n self.assertLess(len(quantized_tflite), len(float_tflite))\n\n def testPostTrainingCalibrateAndQuantizeInt8Inputs(self):\n inp, output, calibration_gen = self._getCalibrationQuantizeModel()\n sess = session.Session()\n\n # Convert float model.\n float_converter = lite.TFLiteConverter.from_session(sess, [inp], [output])\n float_tflite = float_converter.convert()\n self.assertTrue(float_tflite)\n\n # Convert quantized weights model.\n quantized_converter = lite.TFLiteConverter.from_session(\n sess, [inp], [output])\n quantized_converter.inference_input_type = lite_constants.INT8\n quantized_converter.inference_output_type = lite_constants.INT8\n quantized_converter.optimizations = [lite.Optimize.DEFAULT]\n quantized_converter.representative_dataset = calibration_gen\n quantized_tflite = quantized_converter.convert()\n self.assertTrue(quantized_tflite)\n\n # The input and output types should be int8.\n interpreter = Interpreter(model_content=quantized_tflite)\n interpreter.allocate_tensors()\n input_details = interpreter.get_input_details()\n self.assertEqual(1, len(input_details))\n self.assertEqual(np.int8, input_details[0]['dtype'])\n output_details = interpreter.get_output_details()\n self.assertEqual(1, len(output_details))\n self.assertEqual(np.int8, output_details[0]['dtype'])\n\n # Ensure that the quantized weights tflite model is smaller.\n self.assertTrue(len(quantized_tflite) < len(float_tflite))\n\n def testFloatTocoConverter(self):\n \"\"\"Tests deprecated test TocoConverter.\"\"\"\n in_tensor = array_ops.placeholder(\n shape=[1, 16, 16, 3], dtype=dtypes.float32)\n out_tensor = in_tensor + in_tensor\n sess = session.Session()\n\n # Convert model and ensure model is not None.\n converter = lite.TocoConverter.from_session(sess, [in_tensor], [out_tensor])\n tflite_model = converter.convert()\n self.assertTrue(tflite_model)\n\n # Ensure the interpreter is able to load.\n interpreter = Interpreter(model_content=tflite_model)\n interpreter.allocate_tensors()\n\n def testMultipleOutputNodeNames(self):\n \"\"\"Tests converting a graph with an op that have multiple outputs.\"\"\"\n input_tensor = array_ops.placeholder(shape=[4], dtype=dtypes.float32)\n out0, out1, out2, out3 = array_ops.split(input_tensor, [1, 1, 1, 1], axis=0)\n sess = session.Session()\n\n # Convert model and ensure model is not None.\n converter = lite.TFLiteConverter.from_session(sess, [input_tensor],\n [out0, out1, out2, out3])\n tflite_model = converter.convert()\n self.assertTrue(tflite_model)\n\n # Check values from converted model.\n interpreter = Interpreter(model_content=tflite_model)\n interpreter.allocate_tensors()\n\n input_details = interpreter.get_input_details()\n self.assertEqual(1, len(input_details))\n interpreter.set_tensor(input_details[0]['index'],\n np.asarray([1.0, 2.0, 3.0, 4.0], dtype=np.float32))\n interpreter.invoke()\n\n output_details = interpreter.get_output_details()\n self.assertEqual(4, len(output_details))\n self.assertEqual(1.0, interpreter.get_tensor(output_details[0]['index']))\n self.assertEqual(2.0, interpreter.get_tensor(output_details[1]['index']))\n self.assertEqual(3.0, interpreter.get_tensor(output_details[2]['index']))\n self.assertEqual(4.0, interpreter.get_tensor(output_details[3]['index']))\n\n @test_util.run_in_graph_and_eager_modes\n def testFunctions(self):\n \"\"\"Tests tf.function in 1.X.\"\"\"\n\n @def_function.function\n def plus_placeholder(x, placeholder):\n return x + placeholder\n\n with ops.Graph().as_default():\n placeholder = array_ops.placeholder(\n dtype=dtypes.float32, shape=[1], name='input')\n variable_node = variables.Variable(1.0, name='variable_node')\n defun_node = plus_placeholder(variable_node, placeholder)\n output_node = math_ops.multiply(defun_node, 2.0, name='output_node')\n\n # Initialize variables in the model.\n sess = session.Session()\n sess.run(variables.variables_initializer([variable_node]))\n\n # Convert model and ensure model is not None.\n converter = lite.TFLiteConverter.from_session(sess, [placeholder],\n [output_node])\n tflite_model = converter.convert()\n self.assertTrue(tflite_model)\n\n # Check values from converted model.\n interpreter = Interpreter(model_content=tflite_model)\n interpreter.allocate_tensors()\n\n input_details = interpreter.get_input_details()\n self.assertEqual(1, len(input_details))\n self.assertEqual('input', input_details[0]['name'])\n self.assertEqual(np.float32, input_details[0]['dtype'])\n self.assertTrue(([1] == input_details[0]['shape']).all())\n self.assertEqual((0., 0.), input_details[0]['quantization'])\n\n output_details = interpreter.get_output_details()\n self.assertEqual(1, len(output_details))\n self.assertEqual('output_node', output_details[0]['name'])\n self.assertEqual(np.float32, output_details[0]['dtype'])\n self.assertTrue(([1] == output_details[0]['shape']).all())\n self.assertEqual((0., 0.), output_details[0]['quantization'])\n\n def testInferenceInputOutputTypeFloatDefault(self):\n in_tensor = array_ops.placeholder(\n shape=[1, 16, 16, 3], dtype=dtypes.float32)\n out_tensor = in_tensor + in_tensor\n sess = session.Session()\n\n # Convert model and ensure model is not None.\n converter = lite.TFLiteConverter.from_session(sess, [in_tensor],\n [out_tensor])\n tflite_model = converter.convert()\n self.assertTrue(tflite_model)\n\n # Check values from converted model.\n interpreter = Interpreter(model_content=tflite_model)\n interpreter.allocate_tensors()\n\n input_details = interpreter.get_input_details()\n self.assertEqual(1, len(input_details))\n self.assertEqual('Placeholder', input_details[0]['name'])\n self.assertEqual(np.float32, input_details[0]['dtype'])\n self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())\n\n output_details = interpreter.get_output_details()\n self.assertEqual(1, len(output_details))\n self.assertEqual('add', output_details[0]['name'])\n self.assertEqual(np.float32, output_details[0]['dtype'])\n self.assertTrue(([1, 16, 16, 3] == output_details[0]['shape']).all())\n\n def testInferenceInputOutputTypeQuantizedUint8Default(self):\n in_tensor = array_ops.placeholder(\n shape=[1, 16, 16, 3], dtype=dtypes.float32)\n out_tensor = array_ops.fake_quant_with_min_max_args(\n in_tensor + in_tensor, min=0., max=1., name='output')\n sess = session.Session()\n\n # Convert model and ensure model is not None.\n converter = lite.TFLiteConverter.from_session(sess, [in_tensor],\n [out_tensor])\n converter.inference_type = lite_constants.QUANTIZED_UINT8\n converter.quantized_input_stats = {'Placeholder': (0., 1.)} # mean, std_dev\n tflite_model = converter.convert()\n self.assertTrue(tflite_model)\n\n # Check values from converted model.\n interpreter = Interpreter(model_content=tflite_model)\n interpreter.allocate_tensors()\n\n input_details = interpreter.get_input_details()\n self.assertEqual(1, len(input_details))\n self.assertEqual('Placeholder', input_details[0]['name'])\n self.assertEqual(np.uint8, input_details[0]['dtype'])\n self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())\n\n output_details = interpreter.get_output_details()\n self.assertEqual(1, len(output_details))\n self.assertEqual('output', output_details[0]['name'])\n self.assertEqual(np.uint8, output_details[0]['dtype'])\n self.assertTrue(([1, 16, 16, 3] == output_details[0]['shape']).all())\n\n def testReusingConverterWithDifferentPostTrainingQuantization(self):\n in_tensor = array_ops.placeholder(\n shape=[1, 16, 16, 3], dtype=dtypes.float32)\n out_tensor = array_ops.fake_quant_with_min_max_args(\n in_tensor + in_tensor, min=0., max=1., name='output')\n sess = session.Session()\n\n # Convert model and ensure model is not None.\n converter = lite.TFLiteConverter.from_session(sess, [in_tensor],\n [out_tensor])\n\n converter.post_training_quantize = True\n tflite_model = converter.convert()\n self.assertTrue(tflite_model)\n\n converter.post_training_quantize = False\n tflite_model = converter.convert()\n self.assertTrue(tflite_model)\n\n\n@test_util.run_v1_only('Incompatible with 2.0.')\nclass FromFrozenGraphFile(test_util.TensorFlowTestCase):\n\n def testFloat(self):\n in_tensor = array_ops.placeholder(\n shape=[1, 16, 16, 3], dtype=dtypes.float32)\n _ = in_tensor + in_tensor\n sess = session.Session()\n\n # Write graph to file.\n graph_def_file = os.path.join(self.get_temp_dir(), 'model.pb')\n write_graph(sess.graph_def, '', graph_def_file, False)\n sess.close()\n\n # Convert model and ensure model is not None.\n converter = lite.TFLiteConverter.from_frozen_graph(graph_def_file,\n ['Placeholder'], ['add'])\n tflite_model = converter.convert()\n self.assertTrue(tflite_model)\n\n # Check values from converted model.\n interpreter = Interpreter(model_content=tflite_model)\n interpreter.allocate_tensors()\n\n input_details = interpreter.get_input_details()\n self.assertEqual(1, len(input_details))\n self.assertEqual('Placeholder', input_details[0]['name'])\n self.assertEqual(np.float32, input_details[0]['dtype'])\n self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())\n self.assertEqual((0., 0.), input_details[0]['quantization'])\n\n output_details = interpreter.get_output_details()\n self.assertEqual(1, len(output_details))\n self.assertEqual('add', output_details[0]['name'])\n self.assertEqual(np.float32, output_details[0]['dtype'])\n self.assertTrue(([1, 16, 16, 3] == output_details[0]['shape']).all())\n self.assertEqual((0., 0.), output_details[0]['quantization'])\n\n def testFloatWithShapesArray(self):\n in_tensor = array_ops.placeholder(\n shape=[1, 16, 16, 3], dtype=dtypes.float32)\n _ = in_tensor + in_tensor\n sess = session.Session()\n\n # Write graph to file.\n graph_def_file = os.path.join(self.get_temp_dir(), 'model.pb')\n write_graph(sess.graph_def, '', graph_def_file, False)\n sess.close()\n\n # Convert model and ensure model is not None.\n converter = lite.TFLiteConverter.from_frozen_graph(\n graph_def_file, ['Placeholder'], ['add'],\n input_shapes={'Placeholder': [1, 16, 16, 3]})\n tflite_model = converter.convert()\n self.assertTrue(tflite_model)\n\n # Check values from converted model.\n interpreter = Interpreter(model_content=tflite_model)\n interpreter.allocate_tensors()\n\n input_details = interpreter.get_input_details()\n self.assertEqual(1, len(input_details))\n self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())\n\n def testFreezeGraph(self):\n in_tensor = array_ops.placeholder(\n shape=[1, 16, 16, 3], dtype=dtypes.float32)\n var = variable_scope.get_variable(\n 'weights', shape=[1, 16, 16, 3], dtype=dtypes.float32)\n _ = in_tensor + var\n sess = session.Session()\n\n # Write graph to file.\n graph_def_file = os.path.join(self.get_temp_dir(), 'model.pb')\n write_graph(sess.graph_def, '', graph_def_file, False)\n sess.close()\n\n # Ensure the graph with variables cannot be converted.\n with self.assertRaises(ValueError) as error:\n lite.TFLiteConverter.from_frozen_graph(graph_def_file, ['Placeholder'],\n ['add'])\n self.assertEqual('Please freeze the graph using freeze_graph.py.',\n str(error.exception))\n\n def testPbtxt(self):\n in_tensor = array_ops.placeholder(\n shape=[1, 16, 16, 3], dtype=dtypes.float32)\n _ = in_tensor + in_tensor\n sess = session.Session()\n\n # Write graph to file.\n graph_def_file = os.path.join(self.get_temp_dir(), 'model.pbtxt')\n write_graph(sess.graph_def, '', graph_def_file, True)\n sess.close()\n\n # Convert model and ensure model is not None.\n converter = lite.TFLiteConverter.from_frozen_graph(graph_def_file,\n ['Placeholder'], ['add'])\n tflite_model = converter.convert()\n self.assertTrue(tflite_model)\n\n # Check values from converted model.\n interpreter = Interpreter(model_content=tflite_model)\n interpreter.allocate_tensors()\n\n input_details = interpreter.get_input_details()\n self.assertEqual(1, len(input_details))\n self.assertEqual('Placeholder', input_details[0]['name'])\n self.assertEqual(np.float32, input_details[0]['dtype'])\n self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())\n self.assertEqual((0., 0.), input_details[0]['quantization'])\n\n output_details = interpreter.get_output_details()\n self.assertEqual(1, len(output_details))\n self.assertEqual('add', output_details[0]['name'])\n self.assertEqual(np.float32, output_details[0]['dtype'])\n self.assertTrue(([1, 16, 16, 3] == output_details[0]['shape']).all())\n self.assertEqual((0., 0.), output_details[0]['quantization'])\n\n def testInvalidFileNotFound(self):\n with self.assertRaises(IOError) as error:\n lite.TFLiteConverter.from_frozen_graph('invalid_file', ['Placeholder'],\n ['add'])\n self.assertEqual('File \\'invalid_file\\' does not exist.',\n str(error.exception))\n\n def testInvalidFileBadData(self):\n graph_def_file = os.path.join(self.get_temp_dir(), 'invalid_file')\n with gfile.Open(graph_def_file, 'wb') as temp_file:\n temp_file.write('bad data')\n temp_file.flush()\n\n # Attempts to convert the invalid model.\n with self.assertRaises(IOError) as error:\n lite.TFLiteConverter.from_frozen_graph(graph_def_file, ['Placeholder'],\n ['add'])\n self.assertEqual(\n 'Unable to parse input file \\'{}\\'.'.format(graph_def_file),\n str(error.exception))\n\n def testFloatTocoConverter(self):\n in_tensor = array_ops.placeholder(\n shape=[1, 16, 16, 3], dtype=dtypes.float32)\n _ = in_tensor + in_tensor\n sess = session.Session()\n\n # Write graph to file.\n graph_def_file = os.path.join(self.get_temp_dir(), 'model.pb')\n write_graph(sess.graph_def, '', graph_def_file, False)\n sess.close()\n\n # Convert model and ensure model is not None.\n converter = lite.TocoConverter.from_frozen_graph(graph_def_file,\n ['Placeholder'], ['add'])\n tflite_model = converter.convert()\n self.assertTrue(tflite_model)\n\n # Ensure the model is able to load.\n interpreter = Interpreter(model_content=tflite_model)\n interpreter.allocate_tensors()\n\n\nclass FromFrozenGraphObjectDetection(test_util.TensorFlowTestCase):\n\n def _initObjectDetectionArgs(self):\n # Initializes the arguments required for the object detection model.\n # Looks for the model file which is saved in a different location internally\n # and externally.\n filename = resource_loader.get_path_to_datafile('testdata/tflite_graph.pb')\n if not os.path.exists(filename):\n filename = os.path.join(\n resource_loader.get_root_dir_with_all_resources(),\n '../tflite_mobilenet_ssd_quant_protobuf/tflite_graph.pb')\n if not os.path.exists(filename):\n raise IOError(\"File '{0}' does not exist.\".format(filename))\n\n self._graph_def_file = filename\n self._input_arrays = ['normalized_input_image_tensor']\n self._output_arrays = [\n 'TFLite_Detection_PostProcess', 'TFLite_Detection_PostProcess:1',\n 'TFLite_Detection_PostProcess:2', 'TFLite_Detection_PostProcess:3'\n ]\n self._input_shapes = {'normalized_input_image_tensor': [1, 300, 300, 3]}\n\n def testTFLiteGraphDef(self):\n # Tests the object detection model that cannot be loaded in TensorFlow.\n self._initObjectDetectionArgs()\n\n converter = lite.TFLiteConverter.from_frozen_graph(\n self._graph_def_file, self._input_arrays, self._output_arrays,\n self._input_shapes)\n converter.allow_custom_ops = True\n tflite_model = converter.convert()\n self.assertTrue(tflite_model)\n\n # Check values from converted model.\n interpreter = Interpreter(model_content=tflite_model)\n interpreter.allocate_tensors()\n\n input_details = interpreter.get_input_details()\n self.assertEqual(1, len(input_details))\n self.assertEqual('normalized_input_image_tensor', input_details[0]['name'])\n self.assertEqual(np.float32, input_details[0]['dtype'])\n self.assertTrue(([1, 300, 300, 3] == input_details[0]['shape']).all())\n self.assertEqual((0., 0.), input_details[0]['quantization'])\n\n output_details = interpreter.get_output_details()\n self.assertEqual(4, len(output_details))\n self.assertEqual('TFLite_Detection_PostProcess', output_details[0]['name'])\n self.assertEqual(np.float32, output_details[0]['dtype'])\n self.assertTrue(([1, 10, 4] == output_details[0]['shape']).all())\n self.assertEqual((0., 0.), output_details[0]['quantization'])\n\n self.assertEqual('TFLite_Detection_PostProcess:1',\n output_details[1]['name'])\n self.assertTrue(([1, 10] == output_details[1]['shape']).all())\n self.assertEqual('TFLite_Detection_PostProcess:2',\n output_details[2]['name'])\n self.assertTrue(([1, 10] == output_details[2]['shape']).all())\n self.assertEqual('TFLite_Detection_PostProcess:3',\n output_details[3]['name'])\n self.assertTrue(([1] == output_details[3]['shape']).all())\n\n def testTFLiteGraphDefMissingShape(self):\n # Tests invalid cases for the model that cannot be loaded in TensorFlow.\n self._initObjectDetectionArgs()\n\n # Missing `input_shapes`.\n with self.assertRaises(ValueError) as error:\n lite.TFLiteConverter.from_frozen_graph(\n self._graph_def_file, self._input_arrays, self._output_arrays)\n self.assertEqual('input_shapes must be defined for this model.',\n str(error.exception))\n\n def testTFLiteGraphDefInvalidShape(self):\n # Tests invalid cases for the model that cannot be loaded in TensorFlow.\n self._initObjectDetectionArgs()\n\n # `input_shapes` does not contain the names in `input_arrays`.\n with self.assertRaises(ValueError) as error:\n lite.TFLiteConverter.from_frozen_graph(\n self._graph_def_file,\n self._input_arrays,\n self._output_arrays,\n input_shapes={'invalid-value': [1, 19]})\n self.assertEqual(\n 'input_shapes must contain a value for each item in input_array.',\n str(error.exception))\n\n\n@test_util.run_v1_only('Incompatible with 2.0.')\nclass FromSavedModelTest(test_util.TensorFlowTestCase):\n\n def _createSavedModel(self, shape):\n \"\"\"Create a simple SavedModel.\"\"\"\n saved_model_dir = os.path.join(self.get_temp_dir(), 'simple_savedmodel')\n with session.Session() as sess:\n in_tensor_1 = array_ops.placeholder(\n shape=shape, dtype=dtypes.float32, name='inputB')\n in_tensor_2 = array_ops.placeholder(\n shape=shape, dtype=dtypes.float32, name='inputA')\n out_tensor = in_tensor_1 + in_tensor_2\n inputs = {'x': in_tensor_1, 'y': in_tensor_2}\n outputs = {'z': out_tensor}\n saved_model.simple_save(sess, saved_model_dir, inputs, outputs)\n return saved_model_dir\n\n def testSimpleModel(self):\n \"\"\"Test a SavedModel.\"\"\"\n saved_model_dir = self._createSavedModel(shape=[1, 16, 16, 3])\n\n # Convert model and ensure model is not None.\n converter = lite.TFLiteConverter.from_saved_model(saved_model_dir)\n tflite_model = converter.convert()\n self.assertTrue(tflite_model)\n\n interpreter = Interpreter(model_content=tflite_model)\n interpreter.allocate_tensors()\n\n input_details = interpreter.get_input_details()\n self.assertEqual(2, len(input_details))\n self.assertEqual('inputA', input_details[0]['name'])\n self.assertEqual(np.float32, input_details[0]['dtype'])\n self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())\n self.assertEqual((0., 0.), input_details[0]['quantization'])\n\n self.assertEqual('inputB', input_details[1]['name'])\n self.assertEqual(np.float32, input_details[1]['dtype'])\n self.assertTrue(([1, 16, 16, 3] == input_details[1]['shape']).all())\n self.assertEqual((0., 0.), input_details[1]['quantization'])\n\n output_details = interpreter.get_output_details()\n self.assertEqual(1, len(output_details))\n self.assertEqual('add', output_details[0]['name'])\n self.assertEqual(np.float32, output_details[0]['dtype'])\n self.assertTrue(([1, 16, 16, 3] == output_details[0]['shape']).all())\n self.assertEqual((0., 0.), output_details[0]['quantization'])\n\n def testNoneBatchSize(self):\n \"\"\"Test a SavedModel, with None in input tensor's shape.\"\"\"\n saved_model_dir = self._createSavedModel(shape=[None, 16, 16, 3])\n\n converter = lite.TFLiteConverter.from_saved_model(saved_model_dir)\n tflite_model = converter.convert()\n self.assertTrue(tflite_model)\n\n # Check values from converted model.\n interpreter = Interpreter(model_content=tflite_model)\n interpreter.allocate_tensors()\n\n input_details = interpreter.get_input_details()\n self.assertEqual(2, len(input_details))\n self.assertEqual('inputA', input_details[0]['name'])\n self.assertEqual(np.float32, input_details[0]['dtype'])\n self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())\n self.assertEqual((0., 0.), input_details[0]['quantization'])\n\n self.assertEqual('inputB', input_details[1]['name'])\n self.assertEqual(np.float32, input_details[1]['dtype'])\n self.assertTrue(([1, 16, 16, 3] == input_details[1]['shape']).all())\n self.assertEqual((0., 0.), input_details[1]['quantization'])\n\n output_details = interpreter.get_output_details()\n self.assertEqual(1, len(output_details))\n self.assertEqual('add', output_details[0]['name'])\n self.assertEqual(np.float32, output_details[0]['dtype'])\n self.assertTrue(([1, 16, 16, 3] == output_details[0]['shape']).all())\n self.assertEqual((0., 0.), output_details[0]['quantization'])\n\n def testOrderInputArrays(self):\n \"\"\"Test a SavedModel ordering of input arrays.\"\"\"\n saved_model_dir = self._createSavedModel(shape=[1, 16, 16, 3])\n\n converter = lite.TFLiteConverter.from_saved_model(\n saved_model_dir, input_arrays=['inputB', 'inputA'])\n tflite_model = converter.convert()\n self.assertTrue(tflite_model)\n\n # Check values from converted model.\n interpreter = Interpreter(model_content=tflite_model)\n interpreter.allocate_tensors()\n\n input_details = interpreter.get_input_details()\n self.assertEqual(2, len(input_details))\n self.assertEqual('inputA', input_details[0]['name'])\n self.assertEqual(np.float32, input_details[0]['dtype'])\n self.assertTrue(([1, 16, 16, 3] == input_details[0]['shape']).all())\n self.assertEqual((0., 0.), input_details[0]['quantization'])\n\n self.assertEqual('inputB', input_details[1]['name'])\n self.assertEqual(np.float32, input_details[1]['dtype'])\n self.assertTrue(([1, 16, 16, 3] == input_details[1]['shape']).all())\n self.assertEqual((0., 0.), input_details[1]['quantization'])\n\n output_details = interpreter.get_output_details()\n self.assertEqual(1, len(output_details))\n self.assertEqual('add', output_details[0]['name'])\n self.assertEqual(np.float32, output_details[0]['dtype'])\n self.assertTrue(([1, 16, 16, 3] == output_details[0]['shape']).all())\n self.assertEqual((0., 0.), output_details[0]['quantization'])\n\n def testSubsetInputArrays(self):\n \"\"\"Test a SavedModel with a subset of the input array names of the model.\"\"\"\n saved_model_dir = self._createSavedModel(shape=[1, 16, 16, 3])\n\n # Check case where input shape is given.\n converter = lite.TFLiteConverter.from_saved_model(\n saved_model_dir,\n input_arrays=['inputA'],\n input_shapes={'inputA': [1, 16, 16, 3]})\n\n tflite_model = converter.convert()\n self.assertTrue(tflite_model)\n\n # Check case where input shape is None.\n converter = lite.TFLiteConverter.from_saved_model(\n saved_model_dir, input_arrays=['inputA'], input_shapes={'inputA': None})\n\n tflite_model = converter.convert()\n self.assertTrue(tflite_model)\n\n def testSimpleModelTocoConverter(self):\n \"\"\"Test a SavedModel with deprecated TocoConverter.\"\"\"\n saved_model_dir = self._createSavedModel(shape=[1, 16, 16, 3])\n\n # Convert model and ensure model is not None.\n converter = lite.TocoConverter.from_saved_model(saved_model_dir)\n tflite_model = converter.convert()\n self.assertTrue(tflite_model)\n\n # Ensure the model is able to load.\n interpreter = Interpreter(model_content=tflite_model)\n interpreter.allocate_tensors()\n\n\nclass MyAddLayer(keras.layers.Layer):\n\n def __init__(self, increment, **kwargs):\n super(MyAddLayer, self).__init__(**kwargs)\n self._increment = increment\n\n def call(self, inputs):\n return inputs + self._increment\n\n def get_config(self):\n config = super(MyAddLayer, self).get_config()\n config['increment'] = self._increment\n return config\n\n\n@test_util.run_v1_only('Incompatible with 2.0.')\nclass FromKerasFile(test_util.TensorFlowTestCase, parameterized.TestCase):\n\n def setUp(self):\n super(FromKerasFile, self).setUp()\n self._keras_file = None\n self._custom_objects = None\n if not context.executing_eagerly():\n keras.backend.clear_session()\n\n def tearDown(self):\n if self._keras_file:\n os.remove(self._keras_file)\n super(FromKerasFile, self).tearDown()\n\n def _getSequentialModel(self, include_custom_layer=False):\n model = keras.models.Sequential()\n model.add(keras.layers.Dense(2, input_shape=(3,)))\n if include_custom_layer:\n model.add(MyAddLayer(1.0))\n model.add(keras.layers.RepeatVector(3))\n model.add(keras.layers.TimeDistributed(keras.layers.Dense(3)))\n model.compile(\n loss=keras.losses.MSE,\n optimizer='sgd',\n metrics=[keras.metrics.categorical_accuracy],\n sample_weight_mode='temporal')\n x = np.random.random((1, 3))\n y = np.random.random((1, 3, 3))\n model.train_on_batch(x, y)\n model.predict(x)\n\n try:\n fd, self._keras_file = tempfile.mkstemp('.h5')\n keras.models.save_model(model, self._keras_file)\n finally:\n os.close(fd)\n\n if include_custom_layer:\n self._custom_objects = {'MyAddLayer': MyAddLayer}\n\n @parameterized.named_parameters(('_graph', context.graph_mode),\n ('_eager', context.eager_mode))\n def testSequentialModel(self, test_context):\n \"\"\"Test a Sequential tf.keras model with default inputs.\"\"\"\n with test_context():\n self._getSequentialModel()\n\n converter = lite.TFLiteConverter.from_keras_model_file(self._keras_file)\n tflite_model = converter.convert()\n self.assertTrue(tflite_model)\n\n # Check tensor details of converted model.\n interpreter = Interpreter(model_content=tflite_model)\n interpreter.allocate_tensors()\n\n input_details = interpreter.get_input_details()\n self.assertLen(input_details, 1)\n self.assertEqual('dense_input', input_details[0]['name'])\n self.assertEqual(np.float32, input_details[0]['dtype'])\n self.assertTrue(([1, 3] == input_details[0]['shape']).all())\n self.assertEqual((0., 0.), input_details[0]['quantization'])\n\n output_details = interpreter.get_output_details()\n self.assertLen(output_details, 1)\n self.assertEqual(np.float32, output_details[0]['dtype'])\n self.assertTrue(([1, 3, 3] == output_details[0]['shape']).all())\n self.assertEqual((0., 0.), output_details[0]['quantization'])\n\n # Check inference of converted model.\n input_data = np.array([[1, 2, 3]], dtype=np.float32)\n interpreter.set_tensor(input_details[0]['index'], input_data)\n interpreter.invoke()\n tflite_result = interpreter.get_tensor(output_details[0]['index'])\n\n keras_model = keras.models.load_model(self._keras_file)\n keras_result = keras_model.predict(input_data)\n\n np.testing.assert_almost_equal(tflite_result, keras_result, 5)\n\n @parameterized.named_parameters(('_graph', context.graph_mode),\n ('_eager', context.eager_mode))\n def testCustomLayer(self, test_context):\n \"\"\"Test a Sequential tf.keras model with default inputs.\"\"\"\n with test_context():\n self._getSequentialModel(include_custom_layer=True)\n\n converter = lite.TFLiteConverter.from_keras_model_file(\n self._keras_file, custom_objects=self._custom_objects)\n tflite_model = converter.convert()\n self.assertTrue(tflite_model)\n\n # Check tensor details of converted model.\n interpreter = Interpreter(model_content=tflite_model)\n interpreter.allocate_tensors()\n\n input_details = interpreter.get_input_details()\n output_details = interpreter.get_output_details()\n\n # Check inference of converted model.\n input_data = np.array([[1, 2, 3]], dtype=np.float32)\n interpreter.set_tensor(input_details[0]['index'], input_data)\n interpreter.invoke()\n tflite_result = interpreter.get_tensor(output_details[0]['index'])\n\n keras_model = keras.models.load_model(\n self._keras_file, custom_objects=self._custom_objects)\n keras_result = keras_model.predict(input_data)\n\n np.testing.assert_almost_equal(tflite_result, keras_result, 5)\n\n def testSequentialModelInputArray(self):\n \"\"\"Test a Sequential tf.keras model testing input arrays argument.\"\"\"\n self._getSequentialModel()\n\n # Invalid input array raises error.\n with self.assertRaises(ValueError) as error:\n lite.TFLiteConverter.from_keras_model_file(\n self._keras_file, input_arrays=['invalid-input'])\n self.assertEqual(\"Invalid tensors 'invalid-input' were found.\",\n str(error.exception))\n\n # Valid input array.\n converter = lite.TFLiteConverter.from_keras_model_file(\n self._keras_file, input_arrays=['dense_input'])\n tflite_model = converter.convert()\n self.assertTrue(tflite_model)\n\n def testSequentialModelInputShape(self):\n \"\"\"Test a Sequential tf.keras model testing input shapes argument.\"\"\"\n self._getSequentialModel()\n\n # Passing in shape of invalid input array raises error.\n with self.assertRaises(ValueError) as error:\n converter = lite.TFLiteConverter.from_keras_model_file(\n self._keras_file, input_shapes={'invalid-input': [2, 3]})\n self.assertEqual(\n \"Invalid tensor 'invalid-input' found in tensor shapes map.\",\n str(error.exception))\n\n # Passing in shape of valid input array.\n converter = lite.TFLiteConverter.from_keras_model_file(\n self._keras_file, input_shapes={'dense_input': [2, 3]})\n tflite_model = converter.convert()\n self.assertTrue(tflite_model)\n\n # Check input shape from converted model.\n interpreter = Interpreter(model_content=tflite_model)\n interpreter.allocate_tensors()\n\n input_details = interpreter.get_input_details()\n self.assertLen(input_details, 1)\n self.assertEqual('dense_input', input_details[0]['name'])\n self.assertTrue(([2, 3] == input_details[0]['shape']).all())\n\n def testSequentialModelOutputArray(self):\n \"\"\"Test a Sequential tf.keras model testing output arrays argument.\"\"\"\n self._getSequentialModel()\n\n # Invalid output array raises error.\n with self.assertRaises(ValueError) as error:\n lite.TFLiteConverter.from_keras_model_file(\n self._keras_file, output_arrays=['invalid-output'])\n self.assertEqual(\"Invalid tensors 'invalid-output' were found.\",\n str(error.exception))\n\n # Valid output array.\n converter = lite.TFLiteConverter.from_keras_model_file(\n self._keras_file, output_arrays=['time_distributed/Reshape_1'])\n tflite_model = converter.convert()\n self.assertTrue(tflite_model)\n\n @parameterized.named_parameters(('_graph', context.graph_mode),\n ('_eager', context.eager_mode))\n def testFunctionalModel(self, test_context):\n \"\"\"Test a Functional tf.keras model with default inputs.\"\"\"\n with test_context():\n inputs = keras.layers.Input(shape=(3,), name='input')\n x = keras.layers.Dense(2)(inputs)\n output = keras.layers.Dense(3)(x)\n\n model = keras.models.Model(inputs, output)\n model.compile(\n loss=keras.losses.MSE,\n optimizer='sgd',\n metrics=[keras.metrics.categorical_accuracy])\n x = np.random.random((1, 3))\n y = np.random.random((1, 3))\n model.train_on_batch(x, y)\n\n model.predict(x)\n fd, self._keras_file = tempfile.mkstemp('.h5')\n try:\n keras.models.save_model(model, self._keras_file)\n finally:\n os.close(fd)\n\n # Convert to TFLite model.\n converter = lite.TFLiteConverter.from_keras_model_file(self._keras_file)\n tflite_model = converter.convert()\n self.assertTrue(tflite_model)\n\n # Check tensor details of converted model.\n interpreter = Interpreter(model_content=tflite_model)\n interpreter.allocate_tensors()\n\n input_details = interpreter.get_input_details()\n self.assertLen(input_details, 1)\n self.assertEqual('input', input_details[0]['name'])\n self.assertEqual(np.float32, input_details[0]['dtype'])\n self.assertTrue(([1, 3] == input_details[0]['shape']).all())\n self.assertEqual((0., 0.), input_details[0]['quantization'])\n\n output_details = interpreter.get_output_details()\n self.assertLen(output_details, 1)\n self.assertEqual(np.float32, output_details[0]['dtype'])\n self.assertTrue(([1, 3] == output_details[0]['shape']).all())\n self.assertEqual((0., 0.), output_details[0]['quantization'])\n\n # Check inference of converted model.\n input_data = np.array([[1, 2, 3]], dtype=np.float32)\n interpreter.set_tensor(input_details[0]['index'], input_data)\n interpreter.invoke()\n tflite_result = interpreter.get_tensor(output_details[0]['index'])\n\n keras_model = keras.models.load_model(self._keras_file)\n keras_result = keras_model.predict(input_data)\n\n np.testing.assert_almost_equal(tflite_result, keras_result, 5)\n\n def testFunctionalModelMultipleInputs(self):\n \"\"\"Test a Functional tf.keras model with multiple inputs and outputs.\"\"\"\n a = keras.layers.Input(shape=(3,), name='input_a')\n b = keras.layers.Input(shape=(3,), name='input_b')\n dense = keras.layers.Dense(4, name='dense')\n c = dense(a)\n d = dense(b)\n e = keras.layers.Dropout(0.5, name='dropout')(c)\n\n model = keras.models.Model([a, b], [d, e])\n model.compile(\n loss=keras.losses.MSE,\n optimizer='sgd',\n metrics=[keras.metrics.mae],\n loss_weights=[1., 0.5])\n\n input_a_np = np.random.random((10, 3))\n input_b_np = np.random.random((10, 3))\n output_d_np = np.random.random((10, 4))\n output_e_np = np.random.random((10, 4))\n model.train_on_batch([input_a_np, input_b_np], [output_d_np, output_e_np])\n\n model.predict([input_a_np, input_b_np], batch_size=5)\n fd, self._keras_file = tempfile.mkstemp('.h5')\n try:\n keras.models.save_model(model, self._keras_file)\n finally:\n os.close(fd)\n\n # Convert to TFLite model.\n converter = lite.TFLiteConverter.from_keras_model_file(self._keras_file)\n tflite_model = converter.convert()\n self.assertTrue(tflite_model)\n\n # Check values from converted model.\n interpreter = Interpreter(model_content=tflite_model)\n interpreter.allocate_tensors()\n\n input_details = interpreter.get_input_details()\n self.assertLen(input_details, 2)\n self.assertEqual('input_a', input_details[0]['name'])\n self.assertEqual(np.float32, input_details[0]['dtype'])\n self.assertTrue(([1, 3] == input_details[0]['shape']).all())\n self.assertEqual((0., 0.), input_details[0]['quantization'])\n\n self.assertEqual('input_b', input_details[1]['name'])\n self.assertEqual(np.float32, input_details[1]['dtype'])\n self.assertTrue(([1, 3] == input_details[1]['shape']).all())\n self.assertEqual((0., 0.), input_details[1]['quantization'])\n\n output_details = interpreter.get_output_details()\n self.assertLen(output_details, 2)\n self.assertEqual('dense_1/BiasAdd', output_details[0]['name'])\n self.assertEqual(np.float32, output_details[0]['dtype'])\n self.assertTrue(([1, 4] == output_details[0]['shape']).all())\n self.assertEqual((0., 0.), output_details[0]['quantization'])\n\n self.assertEqual('dropout/Identity', output_details[1]['name'])\n self.assertEqual(np.float32, output_details[1]['dtype'])\n self.assertTrue(([1, 4] == output_details[1]['shape']).all())\n self.assertEqual((0., 0.), output_details[1]['quantization'])\n\n def testFunctionalSequentialModel(self):\n \"\"\"Test a Functional tf.keras model containing a Sequential model.\"\"\"\n model = keras.models.Sequential()\n model.add(keras.layers.Dense(2, input_shape=(3,)))\n model.add(keras.layers.RepeatVector(3))\n model.add(keras.layers.TimeDistributed(keras.layers.Dense(3)))\n model = keras.models.Model(model.input, model.output)\n\n model.compile(\n loss=keras.losses.MSE,\n optimizer='sgd',\n metrics=[keras.metrics.categorical_accuracy],\n sample_weight_mode='temporal')\n x = np.random.random((1, 3))\n y = np.random.random((1, 3, 3))\n model.train_on_batch(x, y)\n model.predict(x)\n\n model.predict(x)\n fd, self._keras_file = tempfile.mkstemp('.h5')\n try:\n keras.models.save_model(model, self._keras_file)\n finally:\n os.close(fd)\n\n # Convert to TFLite model.\n converter = lite.TFLiteConverter.from_keras_model_file(self._keras_file)\n tflite_model = converter.convert()\n self.assertTrue(tflite_model)\n\n # Check tensor details of converted model.\n interpreter = Interpreter(model_content=tflite_model)\n interpreter.allocate_tensors()\n\n input_details = interpreter.get_input_details()\n self.assertLen(input_details, 1)\n self.assertEqual('dense_input', input_details[0]['name'])\n self.assertEqual(np.float32, input_details[0]['dtype'])\n self.assertTrue(([1, 3] == input_details[0]['shape']).all())\n self.assertEqual((0., 0.), input_details[0]['quantization'])\n\n output_details = interpreter.get_output_details()\n self.assertLen(output_details, 1)\n self.assertEqual('time_distributed/Reshape_1', output_details[0]['name'])\n self.assertEqual(np.float32, output_details[0]['dtype'])\n self.assertTrue(([1, 3, 3] == output_details[0]['shape']).all())\n self.assertEqual((0., 0.), output_details[0]['quantization'])\n\n # Check inference of converted model.\n input_data = np.array([[1, 2, 3]], dtype=np.float32)\n interpreter.set_tensor(input_details[0]['index'], input_data)\n interpreter.invoke()\n tflite_result = interpreter.get_tensor(output_details[0]['index'])\n\n keras_model = keras.models.load_model(self._keras_file)\n keras_result = keras_model.predict(input_data)\n\n np.testing.assert_almost_equal(tflite_result, keras_result, 5)\n\n def testSequentialModelTocoConverter(self):\n \"\"\"Test a Sequential tf.keras model with deprecated TocoConverter.\"\"\"\n self._getSequentialModel()\n\n converter = lite.TocoConverter.from_keras_model_file(self._keras_file)\n tflite_model = converter.convert()\n self.assertTrue(tflite_model)\n\n # Ensure the model is able to load.\n interpreter = Interpreter(model_content=tflite_model)\n interpreter.allocate_tensors()\n\n\n@test_util.run_v1_only('Incompatible with 2.0.')\nclass GrapplerTest(test_util.TensorFlowTestCase):\n\n def testConstantFolding(self):\n # Constant folding handles the tf.broadcast_to operation which was not\n # supported by the TFLite at the time this test was added.\n in_tensor = array_ops.placeholder(shape=[3, 3], dtype=dtypes.float32)\n y_const = constant_op.constant([1., 2., 3.])\n y_broadcast = gen_array_ops.broadcast_to(y_const, [3, 3])\n out_tensor = math_ops.matmul(in_tensor, y_broadcast, name='output')\n sess = session.Session()\n\n # Convert model.\n converter = lite.TFLiteConverter.from_session(sess, [in_tensor],\n [out_tensor])\n tflite_model = converter.convert()\n\n # Check values from converted model.\n interpreter = Interpreter(model_content=tflite_model)\n interpreter.allocate_tensors()\n\n input_details = interpreter.get_input_details()\n self.assertEqual(1, len(input_details))\n self.assertEqual('Placeholder', input_details[0]['name'])\n self.assertEqual(np.float32, input_details[0]['dtype'])\n self.assertTrue(([3, 3] == input_details[0]['shape']).all())\n\n output_details = interpreter.get_output_details()\n self.assertEqual(1, len(output_details))\n self.assertEqual('output', output_details[0]['name'])\n self.assertEqual(np.float32, output_details[0]['dtype'])\n self.assertTrue(([3, 3] == output_details[0]['shape']).all())\n\n\nclass ImportOpsUtilTest(test_util.TensorFlowTestCase):\n\n def testGetPotentiallySupportedOps(self):\n self.assertIsNotNone(lite.get_potentially_supported_ops())\n\n\nif __name__ == '__main__':\n test.main()\n" ]
[ [ "tensorflow.python.ops.variables.Variable", "tensorflow.python.ops.variable_scope.get_variable", "tensorflow.python.ops.math_ops.matmul", "tensorflow.python.ops.array_ops.ones", "tensorflow.python.keras.layers.Dense", "tensorflow.python.eager.context.executing_eagerly", "tensorflow.lite.python.lite.TFLiteConverter.from_keras_model_file", "numpy.random.random", "tensorflow.lite.python.lite.TocoConverter.from_frozen_graph", "tensorflow.python.platform.test.main", "tensorflow.python.training.training_util.write_graph", "tensorflow.python.platform.resource_loader.get_path_to_datafile", "tensorflow.python.keras.models.Sequential", "tensorflow.python.ops.math_ops.multiply", "tensorflow.lite.python.lite.TFLiteConverter.from_saved_model", "tensorflow.python.keras.models.load_model", "tensorflow.python.saved_model.saved_model.simple_save", "tensorflow.python.client.session.Session", "tensorflow.lite.python.interpreter.Interpreter", "tensorflow.python.ops.array_ops.placeholder", "tensorflow.python.keras.layers.RepeatVector", "tensorflow.python.ops.array_ops.split", "tensorflow.lite.python.lite.TocoConverter.from_session", "tensorflow.python.ops.gen_array_ops.broadcast_to", "tensorflow.python.framework.test_util.run_v1_only", "tensorflow.python.ops.variables.global_variables_initializer", "tensorflow.python.keras.models.Model", "tensorflow.lite.python.lite.TocoConverter.from_keras_model_file", "tensorflow.python.keras.models.save_model", "numpy.array", "tensorflow.python.framework.ops.Graph", "numpy.testing.assert_almost_equal", "tensorflow.lite.python.lite.TFLiteConverter.from_session", "tensorflow.python.keras.layers.Dropout", "tensorflow.lite.python.lite.get_potentially_supported_ops", "tensorflow.lite.python.lite.TFLiteConverter", "tensorflow.python.ops.array_ops.fake_quant_with_min_max_args", "tensorflow.python.ops.variables.variables_initializer", "tensorflow.lite.python.lite.TFLiteConverter.from_frozen_graph", "tensorflow.python.keras.layers.Input", "numpy.asarray", "tensorflow.python.platform.gfile.Open", "numpy.random.seed", "tensorflow.python.ops.nn_ops.relu", "tensorflow.python.framework.constant_op.constant", "numpy.random.uniform", "tensorflow.python.ops.array_ops.reshape", "tensorflow.python.platform.resource_loader.get_root_dir_with_all_resources", "tensorflow.python.keras.backend.clear_session", "tensorflow.lite.python.lite.TocoConverter.from_saved_model" ] ]
mcwimm/pyMANGA
[ "6c7b53087e53b116bb02f91c33974f3dfd9a46de" ]
[ "TreeModelLib/GrowthAndDeathDynamics/Mortality/RandomGrowth/RandomGrowth.py" ]
[ "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\n@date: 2021-Today\n@author: [email protected]\n\"\"\"\n\nimport numpy as np\nfrom TreeModelLib.GrowthAndDeathDynamics.Mortality.Random import Random\n\n\nclass RandomGrowth(Random):\n def __init__(self, args, case):\n super(Random, self).__init__(args, case)\n # Read input parameters from xml file\n self.getInputParameters(args)\n # Default values if no inputs are given\n try:\n self.k_die\n except:\n # Calibration factor default: 1e-12\n self.k_die = 1e-12\n print(\"NOTE: Use default `probability`: \" + str(self.k_die) +\n \".\")\n\n def getSurvival(self, args):\n self.survive = 1\n # Calculate the probability to die\n args.delta_volume = args.volume - args.volume_before\n\n # = dV/dt/V\n relative_volume_increment = args.delta_volume / (args.time *\n args.volume)\n self.p_die = self.k_die / relative_volume_increment\n\n # Get a random number\n r = np.random.uniform(0, 1, 1)\n if r < self.p_die:\n self.survive = 0\n print(\"\\t Tree died randomly. Random number: \" + str(r[0]) +\n \", p: \" + str(self.p_die))\n\n return self.survive\n\n def getMortalityVariables(self, args, growth_concept_information):\n # Variable to store volume of previous time step (m³)\n try:\n args.volume_before = growth_concept_information[\n \"volume_previous_ts\"]\n\n if args.volume_before == \"NaN\":\n args.volume_before = 0\n except KeyError:\n args.volume_before = 0\n\n def setMortalityVariables(self, args, growth_concept_information):\n # The current tree volume is the volume of t-1 in the next time step\n growth_concept_information[\"volume_previous_ts\"] = \\\n args.volume\n return growth_concept_information\n\n def getInputParameters(self, args):\n # All tags are optional\n missing_tags = [\"type\", \"mortality\", \"k_die\"]\n for arg in args.iterdescendants():\n tag = arg.tag\n if tag == \"k_die\":\n self.k_die = float(args.find(\"k_die\").text)\n elif tag == \"type\":\n case = args.find(\"type\").text\n try:\n missing_tags.remove(tag)\n except ValueError:\n print(\"WARNING: Tag \" + tag +\n \" not specified for \" + super().getConceptName() +\n \" (\" + case + \") \" +\n \"mortality initialisation!\")\n" ]
[ [ "numpy.random.uniform" ] ]
jmniederle/misgan
[ "8ac447bbc1fceaec15664eae00137d9804a45936" ]
[ "src/celeba_misgan_impute.py" ]
[ "import torch\nimport torch.nn as nn\nfrom datetime import datetime\nfrom pathlib import Path\nimport argparse\nfrom celeba_generator import ConvDataGenerator, ConvMaskGenerator\nfrom celeba_critic import ConvCritic\nfrom masked_celeba import BlockMaskedCelebA, IndepMaskedCelebA\nfrom imputer import UNetImputer\nfrom misgan_impute import misgan_impute\n\n\nuse_cuda = torch.cuda.is_available()\ndevice = torch.device('cuda' if use_cuda else 'cpu')\n\n\ndef parallelize(model):\n return nn.DataParallel(model).to(device)\n\n\ndef main():\n parser = argparse.ArgumentParser()\n\n # resume from checkpoint\n parser.add_argument('--resume')\n\n # path of CelebA dataset\n parser.add_argument('--data-dir', default='celeba-data')\n\n # training options\n parser.add_argument('--workers', type=int, default=0)\n parser.add_argument('--epoch', type=int, default=800)\n parser.add_argument('--batch-size', type=int, default=512)\n parser.add_argument('--pretrain', default=None)\n parser.add_argument('--imputeronly', action='store_true')\n\n # log options: 0 to disable plot-interval or save-interval\n parser.add_argument('--plot-interval', type=int, default=50)\n parser.add_argument('--save-interval', type=int, default=0)\n parser.add_argument('--prefix', default='impute')\n\n # mask options (data): block|indep\n parser.add_argument('--mask', default='block')\n # option for block: set to 0 for variable size\n parser.add_argument('--block-len', type=int, default=32)\n # option for indep:\n parser.add_argument('--obs-prob', type=float, default=.2)\n parser.add_argument('--obs-prob-high', type=float, default=None)\n\n # model options\n parser.add_argument('--tau', type=float, default=.5)\n parser.add_argument('--alpha', type=float, default=.1) # 0: separate\n parser.add_argument('--beta', type=float, default=.1)\n parser.add_argument('--gamma', type=float, default=0)\n # options for mask generator: sigmoid, hardsigmoid, fusion\n parser.add_argument('--maskgen', default='fusion')\n parser.add_argument('--gp-lambda', type=float, default=10)\n parser.add_argument('--n-critic', type=int, default=5)\n parser.add_argument('--n-latent', type=int, default=128)\n\n args = parser.parse_args()\n\n checkpoint = None\n # Resume from previously stored checkpoint\n if args.resume:\n print(f'Resume: {args.resume}')\n output_dir = Path(args.resume)\n checkpoint = torch.load(str(output_dir / 'log' / 'checkpoint.pth'),\n map_location='cpu')\n for key, arg in vars(checkpoint['args']).items():\n if key not in ['resume']:\n setattr(args, key, arg)\n\n if args.imputeronly:\n assert args.pretrain is not None\n\n mask = args.mask\n obs_prob = args.obs_prob\n obs_prob_high = args.obs_prob_high\n block_len = args.block_len\n if block_len == 0:\n block_len = None\n\n if args.maskgen == 'sigmoid':\n hard_sigmoid = False\n elif args.maskgen == 'hardsigmoid':\n hard_sigmoid = True\n elif args.maskgen == 'fusion':\n hard_sigmoid = -.1, 1.1\n else:\n raise NotImplementedError\n\n if mask == 'indep':\n if obs_prob_high is None:\n mask_str = f'indep_{obs_prob:g}'\n else:\n mask_str = f'indep_{obs_prob:g}_{obs_prob_high:g}'\n elif mask == 'block':\n mask_str = 'block_{}'.format(block_len if block_len else 'varsize')\n else:\n raise NotImplementedError\n\n path = '{}_{}_{}'.format(\n args.prefix, datetime.now().strftime('%m%d.%H%M%S'),\n '_'.join([\n f'tau_{args.tau:g}',\n f'maskgen_{args.maskgen}',\n f'coef_{args.alpha:g}_{args.beta:g}_{args.gamma:g}',\n mask_str,\n ]))\n\n if not args.resume:\n output_dir = Path('results') / 'celeba' / path\n print(output_dir)\n\n if mask == 'indep':\n data = IndepMaskedCelebA(\n data_dir=args.data_dir,\n obs_prob=obs_prob, obs_prob_high=obs_prob_high)\n elif mask == 'block':\n data = BlockMaskedCelebA(\n data_dir=args.data_dir, block_len=block_len)\n\n n_gpu = torch.cuda.device_count()\n print(f'Use {n_gpu} GPUs.')\n data_gen = parallelize(ConvDataGenerator())\n mask_gen = parallelize(ConvMaskGenerator(hard_sigmoid=hard_sigmoid))\n imputer = UNetImputer().to(device)\n\n data_critic = parallelize(ConvCritic(n_channels=3))\n mask_critic = parallelize(ConvCritic(n_channels=1))\n impu_critic = parallelize(ConvCritic(n_channels=3))\n\n misgan_impute(args, data_gen, mask_gen, imputer,\n data_critic, mask_critic, impu_critic,\n data, output_dir, checkpoint)\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "torch.device", "torch.cuda.is_available", "torch.nn.DataParallel", "torch.cuda.device_count" ] ]
uiuc-arc/gpytorch
[ "8a520be2a4c877e87a36bb5f1e6a8565f337c1c7" ]
[ "gpytorch/models/exact_gp.py" ]
[ "#!/usr/bin/env python3\n\nimport warnings\nfrom copy import deepcopy\n\nimport torch\n\nfrom .. import settings\nfrom ..distributions import MultivariateNormal\nfrom ..likelihoods import _GaussianLikelihoodBase\nfrom ..utils.broadcasting import _mul_broadcast_shape\nfrom ..utils.warnings import GPInputWarning\nfrom .exact_prediction_strategies import prediction_strategy\nfrom .gp import GP\n\n\nclass ExactGP(GP):\n r\"\"\"\n The base class for any Gaussian process latent function to be used in conjunction\n with exact inference.\n\n :param torch.Tensor train_inputs: (size n x d) The training features :math:`\\mathbf X`.\n :param torch.Tensor train_targets: (size n) The training targets :math:`\\mathbf y`.\n :param ~gpytorch.likelihoods.GaussianLikelihood likelihood: The Gaussian likelihood that defines\n the observational distribution. Since we're using exact inference, the likelihood must be Gaussian.\n\n The :meth:`forward` function should describe how to compute the prior latent distribution\n on a given input. Typically, this will involve a mean and kernel function.\n The result must be a :obj:`~gpytorch.distributions.MultivariateNormal`.\n\n Calling this model will return the posterior of the latent Gaussian process when conditioned\n on the training data. The output will be a :obj:`~gpytorch.distributions.MultivariateNormal`.\n\n Example:\n >>> class MyGP(gpytorch.models.ExactGP):\n >>> def __init__(self, train_x, train_y, likelihood):\n >>> super().__init__(train_x, train_y, likelihood)\n >>> self.mean_module = gpytorch.means.ZeroMean()\n >>> self.covar_module = gpytorch.kernels.ScaleKernel(gpytorch.kernels.RBFKernel())\n >>>\n >>> def forward(self, x):\n >>> mean = self.mean_module(x)\n >>> covar = self.covar_module(x)\n >>> return gpytorch.distributions.MultivariateNormal(mean, covar)\n >>>\n >>> # train_x = ...; train_y = ...\n >>> likelihood = gpytorch.likelihoods.GaussianLikelihood()\n >>> model = MyGP(train_x, train_y, likelihood)\n >>>\n >>> # test_x = ...;\n >>> model(test_x) # Returns the GP latent function at test_x\n >>> likelihood(model(test_x)) # Returns the (approximate) predictive posterior distribution at test_x\n \"\"\"\n\n def __init__(self, train_inputs, train_targets, likelihood):\n if train_inputs is not None and torch.is_tensor(train_inputs):\n train_inputs = (train_inputs,)\n if train_inputs is not None and not all(torch.is_tensor(train_input) for train_input in train_inputs):\n raise RuntimeError(\"Train inputs must be a tensor, or a list/tuple of tensors\")\n if not isinstance(likelihood, _GaussianLikelihoodBase):\n raise RuntimeError(\"ExactGP can only handle Gaussian likelihoods\")\n\n super(ExactGP, self).__init__()\n if train_inputs is not None:\n self.train_inputs = tuple(tri.unsqueeze(-1) if tri.ndimension() == 1 else tri for tri in train_inputs)\n self.train_targets = train_targets\n else:\n self.train_inputs = None\n self.train_targets = None\n self.likelihood = likelihood\n\n self.prediction_strategy = None\n\n @property\n def train_targets(self):\n return self._train_targets\n\n @train_targets.setter\n def train_targets(self, value):\n object.__setattr__(self, \"_train_targets\", value)\n\n def _apply(self, fn):\n if self.train_inputs is not None:\n self.train_inputs = tuple(fn(train_input) for train_input in self.train_inputs)\n self.train_targets = fn(self.train_targets)\n return super(ExactGP, self)._apply(fn)\n\n def local_load_samples(self, samples_dict, memo, prefix):\n \"\"\"\n Replace the model's learned hyperparameters with samples from a posterior distribution.\n \"\"\"\n # Pyro always puts the samples in the first batch dimension\n num_samples = next(iter(samples_dict.values())).size(0)\n self.train_inputs = tuple(tri.unsqueeze(0).expand(num_samples, *tri.shape) for tri in self.train_inputs)\n self.train_targets = self.train_targets.unsqueeze(0).expand(num_samples, *self.train_targets.shape)\n super().local_load_samples(samples_dict, memo, prefix)\n\n def set_train_data(self, inputs=None, targets=None, strict=True):\n \"\"\"\n Set training data (does not re-fit model hyper-parameters).\n\n :param torch.Tensor inputs: The new training inputs.\n :param torch.Tensor targets: The new training targets.\n :param bool strict: (default True) If `True`, the new inputs and\n targets must have the same shape, dtype, and device\n as the current inputs and targets. Otherwise, any shape/dtype/device are allowed.\n \"\"\"\n if inputs is not None:\n if torch.is_tensor(inputs):\n inputs = (inputs,)\n inputs = tuple(input_.unsqueeze(-1) if input_.ndimension() == 1 else input_ for input_ in inputs)\n if strict:\n for input_, t_input in zip(inputs, self.train_inputs or (None,)):\n for attr in {\"shape\", \"dtype\", \"device\"}:\n expected_attr = getattr(t_input, attr, None)\n found_attr = getattr(input_, attr, None)\n if expected_attr != found_attr:\n msg = \"Cannot modify {attr} of inputs (expected {e_attr}, found {f_attr}).\"\n msg = msg.format(attr=attr, e_attr=expected_attr, f_attr=found_attr)\n raise RuntimeError(msg)\n self.train_inputs = inputs\n if targets is not None:\n if strict:\n for attr in {\"shape\", \"dtype\", \"device\"}:\n expected_attr = getattr(self.train_targets, attr, None)\n found_attr = getattr(targets, attr, None)\n if expected_attr != found_attr:\n msg = \"Cannot modify {attr} of targets (expected {e_attr}, found {f_attr}).\"\n msg = msg.format(attr=attr, e_attr=expected_attr, f_attr=found_attr)\n raise RuntimeError(msg)\n self.train_targets = targets\n self.prediction_strategy = None\n\n def get_fantasy_model(self, inputs, targets, **kwargs):\n \"\"\"\n Returns a new GP model that incorporates the specified inputs and targets as new training data.\n\n Using this method is more efficient than updating with `set_train_data` when the number of inputs is relatively\n small, because any computed test-time caches will be updated in linear time rather than computed from scratch.\n\n .. note::\n If `targets` is a batch (e.g. `b x m`), then the GP returned from this method will be a batch mode GP.\n If `inputs` is of the same (or lesser) dimension as `targets`, then it is assumed that the fantasy points\n are the same for each target batch.\n\n :param torch.Tensor inputs: (`b1 x ... x bk x m x d` or `f x b1 x ... x bk x m x d`) Locations of fantasy\n observations.\n :param torch.Tensor targets: (`b1 x ... x bk x m` or `f x b1 x ... x bk x m`) Labels of fantasy observations.\n :return: An `ExactGP` model with `n + m` training examples, where the `m` fantasy examples have been added\n and all test-time caches have been updated.\n :rtype: ~gpytorch.models.ExactGP\n \"\"\"\n if self.prediction_strategy is None:\n raise RuntimeError(\n \"Fantasy observations can only be added after making predictions with a model so that \"\n \"all test independent caches exist. Call the model on some data first!\"\n )\n\n model_batch_shape = self.train_inputs[0].shape[:-2]\n\n if self.train_targets.dim() > len(model_batch_shape) + 1:\n raise RuntimeError(\"Cannot yet add fantasy observations to multitask GPs, but this is coming soon!\")\n\n if not isinstance(inputs, list):\n inputs = [inputs]\n\n inputs = [i.unsqueeze(-1) if i.ndimension() == 1 else i for i in inputs]\n\n target_batch_shape = targets.shape[:-1]\n input_batch_shape = inputs[0].shape[:-2]\n tbdim, ibdim = len(target_batch_shape), len(input_batch_shape)\n\n if not (tbdim == ibdim + 1 or tbdim == ibdim):\n raise RuntimeError(\n f\"Unsupported batch shapes: The target batch shape ({target_batch_shape}) must have either the \"\n f\"same dimension as or one more dimension than the input batch shape ({input_batch_shape})\"\n )\n\n # Check whether we can properly broadcast batch dimensions\n err_msg = (\n f\"Model batch shape ({model_batch_shape}) and target batch shape \"\n f\"({target_batch_shape}) are not broadcastable.\"\n )\n _mul_broadcast_shape(model_batch_shape, target_batch_shape, error_msg=err_msg)\n\n if len(model_batch_shape) > len(input_batch_shape):\n input_batch_shape = model_batch_shape\n if len(model_batch_shape) > len(target_batch_shape):\n target_batch_shape = model_batch_shape\n\n # If input has no fantasy batch dimension but target does, we can save memory and computation by not\n # computing the covariance for each element of the batch. Therefore we don't expand the inputs to the\n # size of the fantasy model here - this is done below, after the evaluation and fast fantasy update\n train_inputs = [tin.expand(input_batch_shape + tin.shape[-2:]) for tin in self.train_inputs]\n train_targets = self.train_targets.expand(target_batch_shape + self.train_targets.shape[-1:])\n\n full_inputs = [\n torch.cat([train_input, input.expand(input_batch_shape + input.shape[-2:])], dim=-2)\n for train_input, input in zip(train_inputs, inputs)\n ]\n full_targets = torch.cat([train_targets, targets.expand(target_batch_shape + targets.shape[-1:])], dim=-1)\n\n try:\n fantasy_kwargs = {\"noise\": kwargs.pop(\"noise\")}\n except KeyError:\n fantasy_kwargs = {}\n\n full_output = super(ExactGP, self).__call__(*full_inputs, **kwargs)\n\n # Copy model without copying training data or prediction strategy (since we'll overwrite those)\n old_pred_strat = self.prediction_strategy\n old_train_inputs = self.train_inputs\n old_train_targets = self.train_targets\n old_likelihood = self.likelihood\n self.prediction_strategy = None\n self.train_inputs = None\n self.train_targets = None\n self.likelihood = None\n new_model = deepcopy(self)\n self.prediction_strategy = old_pred_strat\n self.train_inputs = old_train_inputs\n self.train_targets = old_train_targets\n self.likelihood = old_likelihood\n\n new_model.likelihood = old_likelihood.get_fantasy_likelihood(**fantasy_kwargs)\n new_model.prediction_strategy = old_pred_strat.get_fantasy_strategy(\n inputs, targets, full_inputs, full_targets, full_output, **fantasy_kwargs\n )\n\n # if the fantasies are at the same points, we need to expand the inputs for the new model\n if tbdim == ibdim + 1:\n new_model.train_inputs = [fi.expand(target_batch_shape + fi.shape[-2:]) for fi in full_inputs]\n else:\n new_model.train_inputs = full_inputs\n new_model.train_targets = full_targets\n\n return new_model\n\n def train(self, mode=True):\n if mode:\n self.prediction_strategy = None\n return super(ExactGP, self).train(mode)\n\n def _load_from_state_dict(\n self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs\n ):\n self.prediction_strategy = None\n super()._load_from_state_dict(\n state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs\n )\n\n def __call__(self, *args, **kwargs):\n train_inputs = list(self.train_inputs) if self.train_inputs is not None else []\n inputs = [i.unsqueeze(-1) if i.ndimension() == 1 else i for i in args]\n\n # Training mode: optimizing\n if self.training:\n if self.train_inputs is None:\n raise RuntimeError(\n \"train_inputs, train_targets cannot be None in training mode. \"\n \"Call .eval() for prior predictions, or call .set_train_data() to add training data.\"\n )\n if settings.debug.on():\n if not all(torch.equal(train_input, input) for train_input, input in zip(train_inputs, inputs)):\n raise RuntimeError(\"You must train on the training inputs!\")\n res = super().__call__(*inputs, **kwargs)\n return res\n\n # Prior mode\n elif settings.prior_mode.on() or self.train_inputs is None or self.train_targets is None:\n full_inputs = args\n full_output = super(ExactGP, self).__call__(*full_inputs, **kwargs)\n if settings.debug().on():\n if not isinstance(full_output, MultivariateNormal):\n raise RuntimeError(\"ExactGP.forward must return a MultivariateNormal\")\n return full_output\n\n # Posterior mode\n else:\n if settings.debug.on():\n if all(torch.equal(train_input, input) for train_input, input in zip(train_inputs, inputs)):\n warnings.warn(\n \"The input matches the stored training data. Did you forget to call model.train()?\",\n GPInputWarning,\n )\n\n # Get the terms that only depend on training data\n if self.prediction_strategy is None:\n train_output = super().__call__(*train_inputs, **kwargs)\n\n # Create the prediction strategy for\n self.prediction_strategy = prediction_strategy(\n train_inputs=train_inputs,\n train_prior_dist=train_output,\n train_labels=self.train_targets,\n likelihood=self.likelihood,\n )\n\n # Concatenate the input to the training input\n full_inputs = []\n batch_shape = train_inputs[0].shape[:-2]\n for train_input, input in zip(train_inputs, inputs):\n # Make sure the batch shapes agree for training/test data\n if batch_shape != train_input.shape[:-2]:\n batch_shape = _mul_broadcast_shape(batch_shape, train_input.shape[:-2])\n train_input = train_input.expand(*batch_shape, *train_input.shape[-2:])\n if batch_shape != input.shape[:-2]:\n batch_shape = _mul_broadcast_shape(batch_shape, input.shape[:-2])\n train_input = train_input.expand(*batch_shape, *train_input.shape[-2:])\n input = input.expand(*batch_shape, *input.shape[-2:])\n full_inputs.append(torch.cat([train_input, input], dim=-2))\n\n # Get the joint distribution for training/test data\n full_output = super(ExactGP, self).__call__(*full_inputs, **kwargs)\n if settings.debug().on():\n if not isinstance(full_output, MultivariateNormal):\n raise RuntimeError(\"ExactGP.forward must return a MultivariateNormal\")\n full_mean, full_covar = full_output.loc, full_output.lazy_covariance_matrix\n\n # Determine the shape of the joint distribution\n batch_shape = full_output.batch_shape\n joint_shape = full_output.event_shape\n tasks_shape = joint_shape[1:] # For multitask learning\n test_shape = torch.Size([joint_shape[0] - self.prediction_strategy.train_shape[0], *tasks_shape])\n\n # Make the prediction\n with settings._use_eval_tolerance():\n predictive_mean, predictive_covar = self.prediction_strategy.exact_prediction(full_mean, full_covar)\n\n # Reshape predictive mean to match the appropriate event shape\n predictive_mean = predictive_mean.view(*batch_shape, *test_shape).contiguous()\n return full_output.__class__(predictive_mean, predictive_covar)\n" ]
[ [ "torch.is_tensor", "torch.cat", "torch.equal", "torch.Size" ] ]
dermida/openpilot
[ "7aec87896ec53a536af5cec97548b5d66c49fbda" ]
[ "selfdrive/car/volkswagen/carstate.py" ]
[ "import numpy as np\nfrom cereal import car\nfrom selfdrive.config import Conversions as CV\nfrom selfdrive.car.interfaces import CarStateBase\nfrom opendbc.can.parser import CANParser\nfrom opendbc.can.can_define import CANDefine\nfrom selfdrive.car.volkswagen.values import DBC_FILES, CANBUS, NetworkLocation, TransmissionType, GearShifter, BUTTON_STATES, CarControllerParams\n\nclass CarState(CarStateBase):\n def __init__(self, CP):\n super().__init__(CP)\n can_define = CANDefine(DBC_FILES.mqb)\n if CP.transmissionType == TransmissionType.automatic:\n self.shifter_values = can_define.dv[\"Getriebe_11\"][\"GE_Fahrstufe\"]\n elif CP.transmissionType == TransmissionType.direct:\n self.shifter_values = can_define.dv[\"EV_Gearshift\"][\"GearPosition\"]\n self.hca_status_values = can_define.dv[\"LH_EPS_03\"][\"EPS_HCA_Status\"]\n self.buttonStates = BUTTON_STATES.copy()\n\n def update(self, pt_cp, cam_cp, ext_cp, trans_type):\n ret = car.CarState.new_message()\n # Update vehicle speed and acceleration from ABS wheel speeds.\n ret.wheelSpeeds.fl = pt_cp.vl[\"ESP_19\"][\"ESP_VL_Radgeschw_02\"] * CV.KPH_TO_MS\n ret.wheelSpeeds.fr = pt_cp.vl[\"ESP_19\"][\"ESP_VR_Radgeschw_02\"] * CV.KPH_TO_MS\n ret.wheelSpeeds.rl = pt_cp.vl[\"ESP_19\"][\"ESP_HL_Radgeschw_02\"] * CV.KPH_TO_MS\n ret.wheelSpeeds.rr = pt_cp.vl[\"ESP_19\"][\"ESP_HR_Radgeschw_02\"] * CV.KPH_TO_MS\n\n ret.vEgoRaw = float(np.mean([ret.wheelSpeeds.fl, ret.wheelSpeeds.fr, ret.wheelSpeeds.rl, ret.wheelSpeeds.rr]))\n ret.vEgo, ret.aEgo = self.update_speed_kf(ret.vEgoRaw)\n ret.standstill = bool(pt_cp.vl[\"ESP_21\"][\"ESP_Haltebestaetigung\"])\n\n # Update steering angle, rate, yaw rate, and driver input torque. VW send\n # the sign/direction in a separate signal so they must be recombined.\n ret.steeringAngleDeg = pt_cp.vl[\"LH_EPS_03\"][\"EPS_Berechneter_LW\"] * (1, -1)[int(pt_cp.vl[\"LH_EPS_03\"][\"EPS_VZ_BLW\"])]\n ret.steeringRateDeg = pt_cp.vl[\"LWI_01\"][\"LWI_Lenkradw_Geschw\"] * (1, -1)[int(pt_cp.vl[\"LWI_01\"][\"LWI_VZ_Lenkradw_Geschw\"])]\n ret.steeringTorque = pt_cp.vl[\"LH_EPS_03\"][\"EPS_Lenkmoment\"] * (1, -1)[int(pt_cp.vl[\"LH_EPS_03\"][\"EPS_VZ_Lenkmoment\"])]\n ret.steeringPressed = abs(ret.steeringTorque) > CarControllerParams.STEER_DRIVER_ALLOWANCE\n ret.yawRate = pt_cp.vl[\"ESP_02\"][\"ESP_Gierrate\"] * (1, -1)[int(pt_cp.vl[\"ESP_02\"][\"ESP_VZ_Gierrate\"])] * CV.DEG_TO_RAD\n\n # Verify EPS readiness to accept steering commands\n hca_status = self.hca_status_values.get(pt_cp.vl[\"LH_EPS_03\"][\"EPS_HCA_Status\"])\n ret.steerError = hca_status in [\"DISABLED\", \"FAULT\"]\n ret.steerWarning = hca_status in [\"INITIALIZING\", \"REJECTED\"]\n\n # Update gas, brakes, and gearshift.\n ret.gas = pt_cp.vl[\"Motor_20\"][\"MO_Fahrpedalrohwert_01\"] / 100.0\n ret.gasPressed = ret.gas > 0\n ret.brake = pt_cp.vl[\"ESP_05\"][\"ESP_Bremsdruck\"] / 250.0 # FIXME: this is pressure in Bar, not sure what OP expects\n ret.brakePressed = bool(pt_cp.vl[\"ESP_05\"][\"ESP_Fahrer_bremst\"])\n\n # Update gear and/or clutch position data.\n if trans_type == TransmissionType.automatic:\n ret.gearShifter = self.parse_gear_shifter(self.shifter_values.get(pt_cp.vl[\"Getriebe_11\"][\"GE_Fahrstufe\"], None))\n elif trans_type == TransmissionType.direct:\n ret.gearShifter = self.parse_gear_shifter(self.shifter_values.get(pt_cp.vl[\"EV_Gearshift\"][\"GearPosition\"], None))\n elif trans_type == TransmissionType.manual:\n ret.clutchPressed = not pt_cp.vl[\"Motor_14\"][\"MO_Kuppl_schalter\"]\n if bool(pt_cp.vl[\"Gateway_72\"][\"BCM1_Rueckfahrlicht_Schalter\"]):\n ret.gearShifter = GearShifter.reverse\n else:\n ret.gearShifter = GearShifter.drive\n\n # Update door and trunk/hatch lid open status.\n ret.doorOpen = any([pt_cp.vl[\"Gateway_72\"][\"ZV_FT_offen\"],\n pt_cp.vl[\"Gateway_72\"][\"ZV_BT_offen\"],\n pt_cp.vl[\"Gateway_72\"][\"ZV_HFS_offen\"],\n pt_cp.vl[\"Gateway_72\"][\"ZV_HBFS_offen\"],\n pt_cp.vl[\"Gateway_72\"][\"ZV_HD_offen\"]])\n\n # Update seatbelt fastened status.\n ret.seatbeltUnlatched = pt_cp.vl[\"Airbag_02\"][\"AB_Gurtschloss_FA\"] != 3\n\n # Update driver preference for metric. VW stores many different unit\n # preferences, including separate units for for distance vs. speed.\n # We use the speed preference for OP.\n self.displayMetricUnits = not pt_cp.vl[\"Einheiten_01\"][\"KBI_MFA_v_Einheit_02\"]\n\n # Consume blind-spot monitoring info/warning LED states, if available.\n # Infostufe: BSM LED on, Warnung: BSM LED flashing\n if self.CP.enableBsm:\n ret.leftBlindspot = bool(ext_cp.vl[\"SWA_01\"][\"SWA_Infostufe_SWA_li\"]) or bool(ext_cp.vl[\"SWA_01\"][\"SWA_Warnung_SWA_li\"])\n ret.rightBlindspot = bool(ext_cp.vl[\"SWA_01\"][\"SWA_Infostufe_SWA_re\"]) or bool(ext_cp.vl[\"SWA_01\"][\"SWA_Warnung_SWA_re\"])\n\n # Consume factory LDW data relevant for factory SWA (Lane Change Assist)\n # and capture it for forwarding to the blind spot radar controller\n self.ldw_lane_warning_left = bool(cam_cp.vl[\"LDW_02\"][\"LDW_SW_Warnung_links\"])\n self.ldw_lane_warning_right = bool(cam_cp.vl[\"LDW_02\"][\"LDW_SW_Warnung_rechts\"])\n self.ldw_side_dlc_tlc = bool(cam_cp.vl[\"LDW_02\"][\"LDW_Seite_DLCTLC\"])\n self.ldw_dlc = cam_cp.vl[\"LDW_02\"][\"LDW_DLC\"]\n self.ldw_tlc = cam_cp.vl[\"LDW_02\"][\"LDW_TLC\"]\n\n # Stock FCW is considered active if the release bit for brake-jerk warning\n # is set. Stock AEB considered active if the partial braking or target\n # braking release bits are set.\n # Refer to VW Self Study Program 890253: Volkswagen Driver Assistance\n # Systems, chapter on Front Assist with Braking: Golf Family for all MQB\n ret.stockFcw = bool(ext_cp.vl[\"ACC_10\"][\"AWV2_Freigabe\"])\n ret.stockAeb = bool(ext_cp.vl[\"ACC_10\"][\"ANB_Teilbremsung_Freigabe\"]) or bool(ext_cp.vl[\"ACC_10\"][\"ANB_Zielbremsung_Freigabe\"])\n\n # Update ACC radar status.\n accStatus = pt_cp.vl[\"TSK_06\"][\"TSK_Status\"]\n if accStatus == 2:\n # ACC okay and enabled, but not currently engaged\n ret.cruiseState.available = True\n ret.cruiseState.enabled = False\n elif accStatus in [3, 4, 5]:\n # ACC okay and enabled, currently engaged and regulating speed (3) or engaged with driver accelerating (4) or overrun (5)\n ret.cruiseState.available = True\n ret.cruiseState.enabled = True\n else:\n # ACC okay but disabled (1), or a radar visibility or other fault/disruption (6 or 7)\n ret.cruiseState.available = False\n ret.cruiseState.enabled = False\n\n # Update ACC setpoint. When the setpoint is zero or there's an error, the\n # radar sends a set-speed of ~90.69 m/s / 203mph.\n ret.cruiseState.speed = ext_cp.vl[\"ACC_02\"][\"ACC_Wunschgeschw\"] * CV.KPH_TO_MS\n if ret.cruiseState.speed > 90:\n ret.cruiseState.speed = 0\n\n # Update control button states for turn signals and ACC controls.\n self.buttonStates[\"accelCruise\"] = bool(pt_cp.vl[\"GRA_ACC_01\"][\"GRA_Tip_Hoch\"])\n self.buttonStates[\"decelCruise\"] = bool(pt_cp.vl[\"GRA_ACC_01\"][\"GRA_Tip_Runter\"])\n self.buttonStates[\"cancel\"] = bool(pt_cp.vl[\"GRA_ACC_01\"][\"GRA_Abbrechen\"])\n self.buttonStates[\"setCruise\"] = bool(pt_cp.vl[\"GRA_ACC_01\"][\"GRA_Tip_Setzen\"])\n self.buttonStates[\"resumeCruise\"] = bool(pt_cp.vl[\"GRA_ACC_01\"][\"GRA_Tip_Wiederaufnahme\"])\n self.buttonStates[\"gapAdjustCruise\"] = bool(pt_cp.vl[\"GRA_ACC_01\"][\"GRA_Verstellung_Zeitluecke\"])\n ret.leftBlinker = bool(pt_cp.vl[\"Blinkmodi_02\"][\"Comfort_Signal_Left\"])\n ret.rightBlinker = bool(pt_cp.vl[\"Blinkmodi_02\"][\"Comfort_Signal_Right\"])\n\n # Read ACC hardware button type configuration info that has to pass thru\n # to the radar. Ends up being different for steering wheel buttons vs\n # third stalk type controls.\n self.graHauptschalter = pt_cp.vl[\"GRA_ACC_01\"][\"GRA_Hauptschalter\"]\n self.graTypHauptschalter = pt_cp.vl[\"GRA_ACC_01\"][\"GRA_Typ_Hauptschalter\"]\n self.graButtonTypeInfo = pt_cp.vl[\"GRA_ACC_01\"][\"GRA_ButtonTypeInfo\"]\n self.graTipStufe2 = pt_cp.vl[\"GRA_ACC_01\"][\"GRA_Tip_Stufe_2\"]\n # Pick up the GRA_ACC_01 CAN message counter so we can sync to it for\n # later cruise-control button spamming.\n self.graMsgBusCounter = pt_cp.vl[\"GRA_ACC_01\"][\"COUNTER\"]\n\n # Additional safety checks performed in CarInterface.\n self.parkingBrakeSet = bool(pt_cp.vl[\"Kombi_01\"][\"KBI_Handbremse\"]) # FIXME: need to include an EPB check as well\n ret.espDisabled = pt_cp.vl[\"ESP_21\"][\"ESP_Tastung_passiv\"] != 0\n\n return ret\n\n @staticmethod\n def get_can_parser(CP):\n # this function generates lists for signal, messages and initial values\n signals = [\n # sig_name, sig_address, default\n (\"EPS_Berechneter_LW\", \"LH_EPS_03\", 0), # Absolute steering angle\n (\"EPS_VZ_BLW\", \"LH_EPS_03\", 0), # Steering angle sign\n (\"LWI_Lenkradw_Geschw\", \"LWI_01\", 0), # Absolute steering rate\n (\"LWI_VZ_Lenkradw_Geschw\", \"LWI_01\", 0), # Steering rate sign\n (\"ESP_VL_Radgeschw_02\", \"ESP_19\", 0), # ABS wheel speed, front left\n (\"ESP_VR_Radgeschw_02\", \"ESP_19\", 0), # ABS wheel speed, front right\n (\"ESP_HL_Radgeschw_02\", \"ESP_19\", 0), # ABS wheel speed, rear left\n (\"ESP_HR_Radgeschw_02\", \"ESP_19\", 0), # ABS wheel speed, rear right\n (\"ESP_Gierrate\", \"ESP_02\", 0), # Absolute yaw rate\n (\"ESP_VZ_Gierrate\", \"ESP_02\", 0), # Yaw rate sign\n (\"ZV_FT_offen\", \"Gateway_72\", 0), # Door open, driver\n (\"ZV_BT_offen\", \"Gateway_72\", 0), # Door open, passenger\n (\"ZV_HFS_offen\", \"Gateway_72\", 0), # Door open, rear left\n (\"ZV_HBFS_offen\", \"Gateway_72\", 0), # Door open, rear right\n (\"ZV_HD_offen\", \"Gateway_72\", 0), # Trunk or hatch open\n (\"Comfort_Signal_Left\", \"Blinkmodi_02\", 0), # Left turn signal including comfort blink interval\n (\"Comfort_Signal_Right\", \"Blinkmodi_02\", 0), # Right turn signal including comfort blink interval\n (\"AB_Gurtschloss_FA\", \"Airbag_02\", 0), # Seatbelt status, driver\n (\"AB_Gurtschloss_BF\", \"Airbag_02\", 0), # Seatbelt status, passenger\n (\"ESP_Fahrer_bremst\", \"ESP_05\", 0), # Brake pedal pressed\n (\"ESP_Bremsdruck\", \"ESP_05\", 0), # Brake pressure applied\n (\"MO_Fahrpedalrohwert_01\", \"Motor_20\", 0), # Accelerator pedal value\n (\"EPS_Lenkmoment\", \"LH_EPS_03\", 0), # Absolute driver torque input\n (\"EPS_VZ_Lenkmoment\", \"LH_EPS_03\", 0), # Driver torque input sign\n (\"EPS_HCA_Status\", \"LH_EPS_03\", 3), # EPS HCA control status\n (\"ESP_Tastung_passiv\", \"ESP_21\", 0), # Stability control disabled\n (\"ESP_Haltebestaetigung\", \"ESP_21\", 0), # ESP hold confirmation\n (\"KBI_MFA_v_Einheit_02\", \"Einheiten_01\", 0), # MPH vs KMH speed display\n (\"KBI_Handbremse\", \"Kombi_01\", 0), # Manual handbrake applied\n (\"TSK_Status\", \"TSK_06\", 0), # ACC engagement status from drivetrain coordinator\n (\"GRA_Hauptschalter\", \"GRA_ACC_01\", 0), # ACC button, on/off\n (\"GRA_Abbrechen\", \"GRA_ACC_01\", 0), # ACC button, cancel\n (\"GRA_Tip_Setzen\", \"GRA_ACC_01\", 0), # ACC button, set\n (\"GRA_Tip_Hoch\", \"GRA_ACC_01\", 0), # ACC button, increase or accel\n (\"GRA_Tip_Runter\", \"GRA_ACC_01\", 0), # ACC button, decrease or decel\n (\"GRA_Tip_Wiederaufnahme\", \"GRA_ACC_01\", 0), # ACC button, resume\n (\"GRA_Verstellung_Zeitluecke\", \"GRA_ACC_01\", 0), # ACC button, time gap adj\n (\"GRA_Typ_Hauptschalter\", \"GRA_ACC_01\", 0), # ACC main button type\n (\"GRA_Tip_Stufe_2\", \"GRA_ACC_01\", 0), # unknown related to stalk type\n (\"GRA_ButtonTypeInfo\", \"GRA_ACC_01\", 0), # unknown related to stalk type\n (\"COUNTER\", \"GRA_ACC_01\", 0), # GRA_ACC_01 CAN message counter\n ]\n\n checks = [\n # sig_address, frequency\n (\"LWI_01\", 100), # From J500 Steering Assist with integrated sensors\n (\"LH_EPS_03\", 100), # From J500 Steering Assist with integrated sensors\n (\"ESP_19\", 100), # From J104 ABS/ESP controller\n (\"ESP_05\", 50), # From J104 ABS/ESP controller\n (\"ESP_21\", 50), # From J104 ABS/ESP controller\n (\"Motor_20\", 50), # From J623 Engine control module\n (\"TSK_06\", 50), # From J623 Engine control module\n (\"ESP_02\", 50), # From J104 ABS/ESP controller\n (\"GRA_ACC_01\", 33), # From J533 CAN gateway (via LIN from steering wheel controls)\n (\"Gateway_72\", 10), # From J533 CAN gateway (aggregated data)\n (\"Airbag_02\", 5), # From J234 Airbag control module\n (\"Kombi_01\", 2), # From J285 Instrument cluster\n (\"Blinkmodi_02\", 1), # From J519 BCM (sent at 1Hz when no lights active, 50Hz when active)\n (\"Einheiten_01\", 1), # From J??? not known if gateway, cluster, or BCM\n ]\n\n if CP.transmissionType == TransmissionType.automatic:\n signals += [(\"GE_Fahrstufe\", \"Getriebe_11\", 0)] # Auto trans gear selector position\n checks += [(\"Getriebe_11\", 20)] # From J743 Auto transmission control module\n elif CP.transmissionType == TransmissionType.direct:\n signals += [(\"GearPosition\", \"EV_Gearshift\", 0)] # EV gear selector position\n checks += [(\"EV_Gearshift\", 10)] # From J??? unknown EV control module\n elif CP.transmissionType == TransmissionType.manual:\n signals += [(\"MO_Kuppl_schalter\", \"Motor_14\", 0), # Clutch switch\n (\"BCM1_Rueckfahrlicht_Schalter\", \"Gateway_72\", 0)] # Reverse light from BCM\n checks += [(\"Motor_14\", 10)] # From J623 Engine control module\n\n if CP.networkLocation == NetworkLocation.fwdCamera:\n # Radars are here on CANBUS.pt\n signals += MqbExtraSignals.fwd_radar_signals\n checks += MqbExtraSignals.fwd_radar_checks\n if CP.enableBsm:\n signals += MqbExtraSignals.bsm_radar_signals\n checks += MqbExtraSignals.bsm_radar_checks\n\n return CANParser(DBC_FILES.mqb, signals, checks, CANBUS.pt)\n\n @staticmethod\n def get_cam_can_parser(CP):\n\n signals = [\n # sig_name, sig_address, default\n (\"LDW_SW_Warnung_links\", \"LDW_02\", 0), # Blind spot in warning mode on left side due to lane departure\n (\"LDW_SW_Warnung_rechts\", \"LDW_02\", 0), # Blind spot in warning mode on right side due to lane departure\n (\"LDW_Seite_DLCTLC\", \"LDW_02\", 0), # Direction of most likely lane departure (left or right)\n (\"LDW_DLC\", \"LDW_02\", 0), # Lane departure, distance to line crossing\n (\"LDW_TLC\", \"LDW_02\", 0), # Lane departure, time to line crossing\n ]\n\n checks = [\n # sig_address, frequency\n (\"LDW_02\", 10) # From R242 Driver assistance camera\n ]\n\n if CP.networkLocation == NetworkLocation.gateway:\n # Radars are here on CANBUS.cam\n signals += MqbExtraSignals.fwd_radar_signals\n checks += MqbExtraSignals.fwd_radar_checks\n if CP.enableBsm:\n signals += MqbExtraSignals.bsm_radar_signals\n checks += MqbExtraSignals.bsm_radar_checks\n\n return CANParser(DBC_FILES.mqb, signals, checks, CANBUS.cam)\n\nclass MqbExtraSignals:\n # Additional signal and message lists for optional or bus-portable controllers\n fwd_radar_signals = [\n (\"ACC_Wunschgeschw\", \"ACC_02\", 0), # ACC set speed\n (\"AWV2_Freigabe\", \"ACC_10\", 0), # FCW brake jerk release\n (\"ANB_Teilbremsung_Freigabe\", \"ACC_10\", 0), # AEB partial braking release\n (\"ANB_Zielbremsung_Freigabe\", \"ACC_10\", 0), # AEB target braking release\n ]\n fwd_radar_checks = [\n (\"ACC_10\", 50), # From J428 ACC radar control module\n (\"ACC_02\", 17), # From J428 ACC radar control module\n ]\n bsm_radar_signals = [\n (\"SWA_Infostufe_SWA_li\", \"SWA_01\", 0), # Blind spot object info, left\n (\"SWA_Warnung_SWA_li\", \"SWA_01\", 0), # Blind spot object warning, left\n (\"SWA_Infostufe_SWA_re\", \"SWA_01\", 0), # Blind spot object info, right\n (\"SWA_Warnung_SWA_re\", \"SWA_01\", 0), # Blind spot object warning, right\n ]\n bsm_radar_checks = [\n (\"SWA_01\", 20), # From J1086 Lane Change Assist\n ]\n" ]
[ [ "numpy.mean" ] ]
sanghoon/Higher-HRNet-Human-Pose-Estimation
[ "f6f24a3eec9ac82ca18edd1e22de62f6f201caea" ]
[ "tools/dist_train.py" ]
[ "# ------------------------------------------------------------------------------\n# Copyright (c) Microsoft\n# Licensed under the MIT License.\n# Written by Bin Xiao ([email protected])\n# Modified by Bowen Cheng ([email protected])\n# ------------------------------------------------------------------------------\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport argparse\nimport os\nimport pprint\nimport shutil\nimport warnings\n\nimport torch\nimport torch.backends.cudnn as cudnn\nimport torch.distributed as dist\nimport torch.multiprocessing as mp\nimport torch.nn.parallel\nimport torch.optim\nimport torch.utils.data\nimport torch.utils.data.distributed\nfrom tensorboardX import SummaryWriter\n\nimport _init_paths\nimport models\n\nfrom config import cfg\nfrom config import update_config\nfrom core.loss import MultiLossFactory\nfrom core.trainer import do_train\nfrom dataset import make_dataloader\nfrom fp16_utils.fp16util import network_to_half\nfrom fp16_utils.fp16_optimizer import FP16_Optimizer\nfrom utils.utils import create_logger\nfrom utils.utils import get_optimizer\nfrom utils.utils import save_checkpoint\nfrom utils.utils import setup_logger\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(description='Train keypoints network')\n # general\n parser.add_argument('--cfg',\n help='experiment configure file name',\n required=True,\n type=str)\n\n parser.add_argument('opts',\n help=\"Modify config options using the command-line\",\n default=None,\n nargs=argparse.REMAINDER)\n\n # distributed training\n parser.add_argument('--gpu',\n help='gpu id for multiprocessing training',\n type=str)\n parser.add_argument('--world-size',\n default=1,\n type=int,\n help='number of nodes for distributed training')\n parser.add_argument('--dist-url',\n default='tcp://127.0.0.1:23456',\n type=str,\n help='url used to set up distributed training')\n parser.add_argument('--rank',\n default=0,\n type=int,\n help='node rank for distributed training')\n\n args = parser.parse_args()\n\n return args\n\n\ndef main():\n args = parse_args()\n update_config(cfg, args)\n\n cfg.defrost()\n cfg.RANK = args.rank\n cfg.freeze()\n\n logger, final_output_dir, tb_log_dir = create_logger(\n cfg, args.cfg, 'train'\n )\n\n logger.info(pprint.pformat(args))\n logger.info(cfg)\n\n if args.gpu is not None:\n warnings.warn('You have chosen a specific GPU. This will completely '\n 'disable data parallelism.')\n\n if args.dist_url == \"env://\" and args.world_size == -1:\n args.world_size = int(os.environ[\"WORLD_SIZE\"])\n\n args.distributed = args.world_size > 1 or cfg.MULTIPROCESSING_DISTRIBUTED\n\n ngpus_per_node = torch.cuda.device_count()\n if cfg.MULTIPROCESSING_DISTRIBUTED:\n # Since we have ngpus_per_node processes per node, the total world_size\n # needs to be adjusted accordingly\n args.world_size = ngpus_per_node * args.world_size\n # Use torch.multiprocessing.spawn to launch distributed processes: the\n # main_worker process function\n mp.spawn(\n main_worker,\n nprocs=ngpus_per_node,\n args=(ngpus_per_node, args, final_output_dir, tb_log_dir)\n )\n else:\n # Simply call main_worker function\n main_worker(\n ','.join([str(i) for i in cfg.GPUS]),\n ngpus_per_node,\n args,\n final_output_dir,\n tb_log_dir\n )\n\n\ndef main_worker(\n gpu, ngpus_per_node, args, final_output_dir, tb_log_dir\n):\n # cudnn related setting\n cudnn.benchmark = cfg.CUDNN.BENCHMARK\n torch.backends.cudnn.deterministic = cfg.CUDNN.DETERMINISTIC\n torch.backends.cudnn.enabled = cfg.CUDNN.ENABLED\n\n if cfg.FP16.ENABLED:\n assert torch.backends.cudnn.enabled, \"fp16 mode requires cudnn backend to be enabled.\"\n\n if cfg.FP16.STATIC_LOSS_SCALE != 1.0:\n if not cfg.FP16.ENABLED:\n print(\"Warning: if --fp16 is not used, static_loss_scale will be ignored.\")\n\n args.gpu = gpu\n\n if args.gpu is not None:\n print(\"Use GPU: {} for training\".format(args.gpu))\n\n update_config(cfg, args)\n if args.distributed:\n if args.dist_url == \"env://\" and args.rank == -1:\n args.rank = int(os.environ[\"RANK\"])\n if cfg.MULTIPROCESSING_DISTRIBUTED:\n # For multiprocessing distributed training, rank needs to be the\n # global rank among all the processes\n args.rank = args.rank * ngpus_per_node + gpu\n print('Init process group: dist_url: {}, world_size: {}, rank: {}'.\n format(args.dist_url, args.world_size, args.rank))\n dist.init_process_group(\n backend=cfg.DIST_BACKEND,\n init_method=args.dist_url,\n world_size=args.world_size,\n rank=args.rank\n )\n\n # setup logger\n logger, _ = setup_logger(final_output_dir, args.rank, 'train')\n\n model = eval('models.'+cfg.MODEL.NAME+'.get_pose_net')(\n cfg, is_train=True\n )\n\n # copy model file\n if not cfg.MULTIPROCESSING_DISTRIBUTED or (\n cfg.MULTIPROCESSING_DISTRIBUTED\n and args.rank % ngpus_per_node == 0\n ):\n this_dir = os.path.dirname(__file__)\n shutil.copy2(\n os.path.join(this_dir, '../lib/models', cfg.MODEL.NAME + '.py'),\n final_output_dir\n )\n\n writer_dict = {\n 'writer': SummaryWriter(log_dir=tb_log_dir),\n 'train_global_steps': 0,\n 'valid_global_steps': 0,\n }\n\n if not cfg.MULTIPROCESSING_DISTRIBUTED or (\n cfg.MULTIPROCESSING_DISTRIBUTED\n and args.rank % ngpus_per_node == 0\n ):\n dump_input = torch.rand(\n (1, 3, cfg.DATASET.INPUT_SIZE, cfg.DATASET.INPUT_SIZE)\n )\n # writer_dict['writer'].add_graph(model, (dump_input, ))\n # logger.info(get_model_summary(model, dump_input, verbose=cfg.VERBOSE))\n\n if cfg.FP16.ENABLED:\n model = network_to_half(model)\n\n if args.distributed:\n # For multiprocessing distributed, DistributedDataParallel constructor\n # should always set the single device scope, otherwise,\n # DistributedDataParallel will use all available devices.\n if args.gpu is not None:\n torch.cuda.set_device(args.gpu)\n model.cuda(args.gpu)\n # When using a single GPU per process and per\n # DistributedDataParallel, we need to divide the batch size\n # ourselves based on the total number of GPUs we have\n # args.workers = int(args.workers / ngpus_per_node)\n model = torch.nn.parallel.DistributedDataParallel(\n model, device_ids=[args.gpu]\n )\n else:\n model.cuda()\n # DistributedDataParallel will divide and allocate batch_size to all\n # available GPUs if device_ids are not set\n model = torch.nn.parallel.DistributedDataParallel(model)\n elif args.gpu is not None:\n # args.gpu = int(args.gpu)\n # torch.cuda.set_device(args.gpu)\n # model = model.cuda(args.gpu)\n device_ids = [int(i) for i in args.gpu.split(',') if i]\n model = torch.nn.DataParallel(model, device_ids=device_ids).cuda()\n else:\n model = torch.nn.DataParallel(model).cuda()\n\n # define loss function (criterion) and optimizer\n loss_factory = MultiLossFactory(cfg).cuda()\n\n # Data loading code\n train_loader = make_dataloader(\n cfg, is_train=True, distributed=args.distributed\n )\n logger.info(train_loader.dataset)\n\n best_perf = -1\n best_model = False\n last_epoch = -1\n optimizer = get_optimizer(cfg, model)\n\n if cfg.FP16.ENABLED:\n optimizer = FP16_Optimizer(\n optimizer,\n static_loss_scale=cfg.FP16.STATIC_LOSS_SCALE,\n dynamic_loss_scale=cfg.FP16.DYNAMIC_LOSS_SCALE\n )\n\n begin_epoch = cfg.TRAIN.BEGIN_EPOCH\n checkpoint_file = os.path.join(\n final_output_dir, 'checkpoint.pth.tar')\n if cfg.AUTO_RESUME and os.path.exists(checkpoint_file):\n logger.info(\"=> loading checkpoint '{}'\".format(checkpoint_file))\n checkpoint = torch.load(checkpoint_file)\n begin_epoch = checkpoint['epoch']\n best_perf = checkpoint['perf']\n last_epoch = checkpoint['epoch']\n model.load_state_dict(checkpoint['state_dict'])\n\n optimizer.load_state_dict(checkpoint['optimizer'])\n logger.info(\"=> loaded checkpoint '{}' (epoch {})\".format(\n checkpoint_file, checkpoint['epoch']))\n\n if cfg.FP16.ENABLED:\n lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(\n optimizer.optimizer, cfg.TRAIN.LR_STEP, cfg.TRAIN.LR_FACTOR,\n last_epoch=last_epoch\n )\n else:\n lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(\n optimizer, cfg.TRAIN.LR_STEP, cfg.TRAIN.LR_FACTOR,\n last_epoch=last_epoch\n )\n\n for epoch in range(begin_epoch, cfg.TRAIN.END_EPOCH):\n lr_scheduler.step()\n\n # train one epoch\n do_train(cfg, model, train_loader, loss_factory, optimizer, epoch,\n final_output_dir, tb_log_dir, writer_dict, fp16=cfg.FP16.ENABLED)\n\n perf_indicator = epoch\n if perf_indicator >= best_perf:\n best_perf = perf_indicator\n best_model = True\n else:\n best_model = False\n\n if not cfg.MULTIPROCESSING_DISTRIBUTED or (\n cfg.MULTIPROCESSING_DISTRIBUTED\n and args.rank == 0\n ):\n logger.info('=> saving checkpoint to {}'.format(final_output_dir))\n save_checkpoint({\n 'epoch': epoch + 1,\n 'model': cfg.MODEL.NAME,\n 'state_dict': model.state_dict(),\n 'best_state_dict': getattr(model, 'module', model).state_dict(), # Unwrap DataPrallel\n 'perf': perf_indicator,\n 'optimizer': optimizer.state_dict(),\n }, best_model, final_output_dir)\n\n final_model_state_file = os.path.join(\n final_output_dir, 'final_state{}.pth.tar'.format(gpu)\n )\n\n logger.info('saving final model state to {}'.format(\n final_model_state_file))\n torch.save(getattr(model, 'module', model).state_dict(), final_model_state_file)\n writer_dict['writer'].close()\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "torch.rand", "torch.distributed.init_process_group", "torch.multiprocessing.spawn", "torch.nn.parallel.DistributedDataParallel", "torch.cuda.device_count", "torch.optim.lr_scheduler.MultiStepLR", "torch.cuda.set_device", "torch.load", "torch.nn.DataParallel" ] ]
HannahElisa/RegRCNN
[ "1aa69d00c61bd36685213248bb30d4ba30ac5a06" ]
[ "exec.py" ]
[ "#!/usr/bin/env python\n# Copyright 2019 Division of Medical Image Computing, German Cancer Research Center (DKFZ).\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\" execution script. this where all routines come together and the only script you need to call.\n refer to parse args below to see options for execution.\n\"\"\"\n\nimport RegRCNN.plotting as plg\n\nimport os\nimport warnings\nimport argparse\nimport time\n\nimport torch\n\nimport RegRCNN.utils.exp_utils as utils\nfrom evaluator import Evaluator\nfrom predictor import Predictor\n\n\nfor msg in [\"Attempting to set identical bottom==top results\",\n \"This figure includes Axes that are not compatible with tight_layout\",\n \"Data has no positive values, and therefore cannot be log-scaled.\",\n \".*invalid value encountered in true_divide.*\"]:\n warnings.filterwarnings(\"ignore\", msg)\n\n\ndef train(cf, logger):\n \"\"\"\n performs the training routine for a given fold. saves plots and selected parameters to the experiment dir\n specified in the configs. logs to file and tensorboard.\n \"\"\"\n logger.info('performing training in {}D over fold {} on experiment {} with model {}'.format(\n cf.dim, cf.fold, cf.exp_dir, cf.model))\n logger.time(\"train_val\")\n\n # -------------- inits and settings -----------------\n net = model.net(cf, logger).cuda()\n if cf.optimizer == \"ADAMW\":\n optimizer = torch.optim.AdamW(utils.parse_params_for_optim(net, weight_decay=cf.weight_decay,\n exclude_from_wd=cf.exclude_from_wd),\n lr=cf.learning_rate[0])\n elif cf.optimizer == \"SGD\":\n optimizer = torch.optim.SGD(utils.parse_params_for_optim(net, weight_decay=cf.weight_decay),\n lr=cf.learning_rate[0], momentum=0.3)\n if cf.dynamic_lr_scheduling:\n scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode=cf.scheduling_mode, factor=cf.lr_decay_factor,\n patience=cf.scheduling_patience)\n model_selector = utils.ModelSelector(cf, logger)\n\n starting_epoch = 1\n if cf.resume:\n checkpoint_path = os.path.join(cf.fold_dir, \"last_state.pth\")\n starting_epoch, net, optimizer, model_selector = \\\n utils.load_checkpoint(checkpoint_path, net, optimizer, model_selector)\n logger.info('resumed from checkpoint {} to epoch {}'.format(checkpoint_path, starting_epoch))\n\n # prepare monitoring\n monitor_metrics = utils.prepare_monitoring(cf)\n\n logger.info('loading dataset and initializing batch generators...')\n batch_gen = data_loader.get_train_generators(cf, logger)\n\n # -------------- training -----------------\n for epoch in range(starting_epoch, cf.num_epochs + 1):\n\n logger.info('starting training epoch {}/{}'.format(epoch, cf.num_epochs))\n logger.time(\"train_epoch\")\n\n net.train()\n\n train_results_list = []\n train_evaluator = Evaluator(cf, logger, mode='train')\n\n for i in range(cf.num_train_batches):\n logger.time(\"train_batch_loadfw\")\n batch = next(batch_gen['train'])\n batch_gen['train'].generator.stats['roi_counts'] += batch['roi_counts']\n batch_gen['train'].generator.stats['empty_counts'] += batch['empty_counts']\n\n logger.time(\"train_batch_loadfw\")\n logger.time(\"train_batch_netfw\")\n results_dict = net.train_forward(batch)\n logger.time(\"train_batch_netfw\")\n logger.time(\"train_batch_bw\")\n optimizer.zero_grad()\n results_dict['torch_loss'].backward()\n if cf.clip_norm:\n torch.nn.utils.clip_grad_norm_(net.parameters(), cf.clip_norm, norm_type=2) # gradient clipping\n optimizer.step()\n train_results_list.append(({k:v for k,v in results_dict.items() if k != \"seg_preds\"}, batch[\"pid\"])) # slim res dict\n if not cf.server_env:\n print(\"\\rFinished training batch \" +\n \"{}/{} in {:.1f}s ({:.2f}/{:.2f} forw load/net, {:.2f} backw).\".format(i+1, cf.num_train_batches,\n logger.get_time(\"train_batch_loadfw\")+\n logger.get_time(\"train_batch_netfw\")\n +logger.time(\"train_batch_bw\"),\n logger.get_time(\"train_batch_loadfw\",reset=True),\n logger.get_time(\"train_batch_netfw\", reset=True),\n logger.get_time(\"train_batch_bw\", reset=True)), end=\"\", flush=True)\n print()\n\n #--------------- train eval ----------------\n if (epoch-1)%cf.plot_frequency==0:\n # view an example batch\n utils.split_off_process(plg.view_batch, cf, batch, results_dict, has_colorchannels=cf.has_colorchannels,\n show_gt_labels=True, get_time=\"train-example plot\",\n out_file=os.path.join(cf.plot_dir, 'batch_example_train_{}.png'.format(cf.fold)))\n\n\n logger.time(\"evals\")\n _, monitor_metrics['train'] = train_evaluator.evaluate_predictions(train_results_list, monitor_metrics['train'])\n logger.time(\"evals\")\n logger.time(\"train_epoch\", toggle=False)\n del train_results_list\n\n #----------- validation ------------\n logger.info('starting validation in mode {}.'.format(cf.val_mode))\n logger.time(\"val_epoch\")\n with torch.no_grad():\n net.eval()\n val_results_list = []\n val_evaluator = Evaluator(cf, logger, mode=cf.val_mode)\n val_predictor = Predictor(cf, net, logger, mode='val')\n\n for i in range(batch_gen['n_val']):\n logger.time(\"val_batch\")\n batch = next(batch_gen[cf.val_mode])\n if cf.val_mode == 'val_patient':\n results_dict = val_predictor.predict_patient(batch)\n elif cf.val_mode == 'val_sampling':\n results_dict = net.train_forward(batch, is_validation=True)\n val_results_list.append([results_dict, batch[\"pid\"]])\n if not cf.server_env:\n print(\"\\rFinished validation {} {}/{} in {:.1f}s.\".format('patient' if cf.val_mode=='val_patient' else 'batch',\n i + 1, batch_gen['n_val'],\n logger.time(\"val_batch\")), end=\"\", flush=True)\n print()\n\n #------------ val eval -------------\n if (epoch - 1) % cf.plot_frequency == 0:\n utils.split_off_process(plg.view_batch, cf, batch, results_dict, has_colorchannels=cf.has_colorchannels,\n show_gt_labels=True, get_time=\"val-example plot\",\n out_file=os.path.join(cf.plot_dir, 'batch_example_val_{}.png'.format(cf.fold)))\n\n logger.time(\"evals\")\n _, monitor_metrics['val'] = val_evaluator.evaluate_predictions(val_results_list, monitor_metrics['val'])\n\n model_selector.run_model_selection(net, optimizer, monitor_metrics, epoch)\n del val_results_list\n #----------- monitoring -------------\n monitor_metrics.update({\"lr\": \n {str(g) : group['lr'] for (g, group) in enumerate(optimizer.param_groups)}})\n logger.metrics2tboard(monitor_metrics, global_step=epoch)\n logger.time(\"evals\")\n\n logger.info('finished epoch {}/{}, took {:.2f}s. train total: {:.2f}s, average: {:.2f}s. val total: {:.2f}s, average: {:.2f}s.'.format(\n epoch, cf.num_epochs, logger.get_time(\"train_epoch\")+logger.time(\"val_epoch\"), logger.get_time(\"train_epoch\"),\n logger.get_time(\"train_epoch\", reset=True)/cf.num_train_batches, logger.get_time(\"val_epoch\"),\n logger.get_time(\"val_epoch\", reset=True)/batch_gen[\"n_val\"]))\n logger.info(\"time for evals: {:.2f}s\".format(logger.get_time(\"evals\", reset=True)))\n\n #-------------- scheduling -----------------\n if cf.dynamic_lr_scheduling:\n scheduler.step(monitor_metrics[\"val\"][cf.scheduling_criterion][-1])\n else:\n for param_group in optimizer.param_groups:\n param_group['lr'] = cf.learning_rate[epoch-1]\n\n logger.time(\"train_val\")\n logger.info(\"Training and validating over {} epochs took {}\".format(cf.num_epochs, logger.get_time(\"train_val\", format=\"hms\", reset=True)))\n batch_gen['train'].generator.print_stats(logger, plot=True)\n\ndef test(cf, logger, max_fold=None):\n \"\"\"performs testing for a given fold (or held out set). saves stats in evaluator.\n \"\"\"\n logger.time(\"test_fold\")\n logger.info('starting testing model of fold {} in exp {}'.format(cf.fold, cf.exp_dir))\n net = model.net(cf, logger).cuda()\n batch_gen = data_loader.get_test_generator(cf, logger)\n\n test_predictor = Predictor(cf, net, logger, mode='test')\n test_results_list = test_predictor.predict_test_set(batch_gen, return_results = not hasattr(\n cf, \"eval_test_separately\") or not cf.eval_test_separately)\n\n if test_results_list is not None:\n test_evaluator = Evaluator(cf, logger, mode='test')\n test_evaluator.evaluate_predictions(test_results_list)\n test_evaluator.score_test_df(max_fold=max_fold)\n\n logger.info('Testing of fold {} took {}.\\n'.format(cf.fold, logger.get_time(\"test_fold\", reset=True, format=\"hms\")))\n\nif __name__ == '__main__':\n stime = time.time()\n\n parser = argparse.ArgumentParser()\n parser.add_argument('--dataset_name', type=str, default='toy',\n help=\"path to the dataset-specific code in source_dir/datasets\")\n parser.add_argument('--exp_dir', type=str, default='/home/gregor/Documents/regrcnn/datasets/toy/experiments/dev',\n help='path to experiment dir. will be created if non existent.')\n parser.add_argument('-m', '--mode', type=str, default='train_test', help='one out of: create_exp, analysis, train, train_test, or test')\n parser.add_argument('-f', '--folds', nargs='+', type=int, default=None, help='None runs over all folds in CV. otherwise specify list of folds.')\n parser.add_argument('--server_env', default=False, action='store_true', help='change IO settings to deploy models on a cluster.')\n parser.add_argument('--data_dest', type=str, default=None, help=\"path to final data folder if different from config\")\n parser.add_argument('--use_stored_settings', default=False, action='store_true',\n help='load configs from existing exp_dir instead of source dir. always done for testing, '\n 'but can be set to true to do the same for training. useful in job scheduler environment, '\n 'where source code might change before the job actually runs.')\n parser.add_argument('--resume', action=\"store_true\", default=False,\n help='if given, resume from checkpoint(s) of the specified folds.')\n parser.add_argument('-d', '--dev', default=False, action='store_true', help=\"development mode: shorten everything\")\n\n args = parser.parse_args()\n args.dataset_name = os.path.join(\"datasets\", args.dataset_name) if not \"datasets\" in args.dataset_name else args.dataset_name\n folds = args.folds\n resume = None if args.resume in ['None', 'none'] else args.resume\n\n if args.mode == 'create_exp':\n cf = utils.prep_exp(args.dataset_name, args.exp_dir, args.server_env, use_stored_settings=False)\n logger = utils.get_logger(cf.exp_dir, cf.server_env, -1)\n logger.info('created experiment directory at {}'.format(args.exp_dir))\n\n elif args.mode == 'train' or args.mode == 'train_test':\n cf = utils.prep_exp(args.dataset_name, args.exp_dir, args.server_env, args.use_stored_settings)\n if args.dev:\n folds = [0,1]\n cf.batch_size, cf.num_epochs, cf.min_save_thresh, cf.save_n_models = 3 if cf.dim==2 else 1, 2, 0, 2\n cf.num_train_batches, cf.num_val_batches, cf.max_val_patients = 5, 1, 1\n cf.test_n_epochs, cf.max_test_patients = cf.save_n_models, 2\n torch.backends.cudnn.benchmark = cf.dim==3\n else:\n torch.backends.cudnn.benchmark = cf.cuda_benchmark\n if args.data_dest is not None:\n cf.data_dest = args.data_dest\n \n logger = utils.get_logger(cf.exp_dir, cf.server_env, cf.sysmetrics_interval)\n data_loader = utils.import_module('data_loader', os.path.join(args.dataset_name, 'data_loader.py'))\n model = utils.import_module('model', cf.model_path)\n logger.info(\"loaded model from {}\".format(cf.model_path))\n if folds is None:\n folds = range(cf.n_cv_splits)\n\n for fold in folds:\n \"\"\"k-fold cross-validation: the dataset is split into k equally-sized folds, one used for validation,\n one for testing, the rest for training. This loop iterates k-times over the dataset, cyclically moving the\n splits. k==folds, fold in [0,folds) says which split is used for testing.\n \"\"\"\n cf.fold_dir = os.path.join(cf.exp_dir, 'fold_{}'.format(fold)); cf.fold = fold\n logger.set_logfile(fold=fold)\n cf.resume = resume\n if not os.path.exists(cf.fold_dir):\n os.mkdir(cf.fold_dir)\n train(cf, logger)\n cf.resume = None\n if args.mode == 'train_test':\n test(cf, logger)\n\n elif args.mode == 'test':\n cf = utils.prep_exp(args.dataset_name, args.exp_dir, args.server_env, use_stored_settings=True, is_training=False)\n if args.data_dest is not None:\n cf.data_dest = args.data_dest\n logger = utils.get_logger(cf.exp_dir, cf.server_env, cf.sysmetrics_interval)\n data_loader = utils.import_module('data_loader', os.path.join(args.dataset_name, 'data_loader.py'))\n model = utils.import_module('model', cf.model_path)\n logger.info(\"loaded model from {}\".format(cf.model_path))\n\n fold_dirs = sorted([os.path.join(cf.exp_dir, f) for f in os.listdir(cf.exp_dir) if\n os.path.isdir(os.path.join(cf.exp_dir, f)) and f.startswith(\"fold\")])\n if folds is None:\n folds = range(cf.n_cv_splits)\n if args.dev:\n folds = folds[:2]\n cf.max_test_patients, cf.test_n_epochs = 2, 2\n else:\n torch.backends.cudnn.benchmark = cf.cuda_benchmark\n for fold in folds:\n cf.fold_dir = os.path.join(cf.exp_dir, 'fold_{}'.format(fold)); cf.fold = fold\n logger.set_logfile(fold=fold)\n if cf.fold_dir in fold_dirs:\n test(cf, logger, max_fold=max([int(f[-1]) for f in fold_dirs]))\n else:\n logger.info(\"Skipping fold {} since no model parameters found.\".format(fold))\n # load raw predictions saved by predictor during testing, run aggregation algorithms and evaluation.\n elif args.mode == 'analysis':\n \"\"\" analyse already saved predictions.\n \"\"\"\n cf = utils.prep_exp(args.dataset_name, args.exp_dir, args.server_env, use_stored_settings=True, is_training=False)\n logger = utils.get_logger(cf.exp_dir, cf.server_env, cf.sysmetrics_interval)\n\n if cf.hold_out_test_set and cf.ensemble_folds:\n predictor = Predictor(cf, net=None, logger=logger, mode='analysis')\n results_list = predictor.load_saved_predictions()\n logger.info('starting evaluation...')\n cf.fold = \"overall\"\n evaluator = Evaluator(cf, logger, mode='test')\n evaluator.evaluate_predictions(results_list)\n evaluator.score_test_df(max_fold=cf.fold)\n else:\n fold_dirs = sorted([os.path.join(cf.exp_dir, f) for f in os.listdir(cf.exp_dir) if\n os.path.isdir(os.path.join(cf.exp_dir, f)) and f.startswith(\"fold\")])\n if args.dev:\n cf.test_n_epochs = 2\n fold_dirs = fold_dirs[:1]\n if folds is None:\n folds = range(cf.n_cv_splits)\n for fold in folds:\n cf.fold = fold; cf.fold_dir = os.path.join(cf.exp_dir, 'fold_{}'.format(cf.fold))\n logger.set_logfile(fold=fold)\n if cf.fold_dir in fold_dirs:\n predictor = Predictor(cf, net=None, logger=logger, mode='analysis')\n results_list = predictor.load_saved_predictions()\n # results_list[x][1] is pid, results_list[x][0] is list of len samples-per-patient, each entry hlds\n # list of boxes per that sample, i.e., len(results_list[x][y][0]) would be nr of boxes in sample y of patient x\n logger.info('starting evaluation...')\n evaluator = Evaluator(cf, logger, mode='test')\n evaluator.evaluate_predictions(results_list)\n max_fold = max([int(f[-1]) for f in fold_dirs])\n evaluator.score_test_df(max_fold=max_fold)\n else:\n logger.info(\"Skipping fold {} since no model parameters found.\".format(fold))\n else:\n raise ValueError('mode \"{}\" specified in args is not implemented.'.format(args.mode))\n \n mins, secs = divmod((time.time() - stime), 60)\n h, mins = divmod(mins, 60)\n t = \"{:d}h:{:02d}m:{:02d}s\".format(int(h), int(mins), int(secs))\n logger.info(\"{} total runtime: {}\".format(os.path.split(__file__)[1], t))\n del logger\n torch.cuda.empty_cache()\n\n" ]
[ [ "torch.cuda.empty_cache", "torch.optim.lr_scheduler.ReduceLROnPlateau", "torch.no_grad" ] ]
superlich7/FasterRcnnTF_ICPR2018
[ "7ab0bad4df1e772bb8cece55a1c83b0bb1804a3b" ]
[ "lib/model/test.py" ]
[ "# --------------------------------------------------------\n# Tensorflow Faster R-CNN\n# Licensed under The MIT License [see LICENSE for details]\n# Written by Xinlei Chen\n# --------------------------------------------------------\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport cv2\nimport numpy as np\ntry:\n import cPickle as pickle\nexcept ImportError:\n import pickle\nimport os\nimport math\n\nfrom utils.timer import Timer\nfrom utils.blob import im_list_to_blob\n\nfrom model.config import cfg, get_output_dir\nfrom model.bbox_transform import clip_boxes, bbox_transform_inv\nfrom model.nms_wrapper import nms\n\ndef _get_image_blob(im):\n \"\"\"Converts an image into a network input.\n Arguments:\n im (ndarray): a color image in BGR order\n Returns:\n blob (ndarray): a data blob holding an image pyramid\n im_scale_factors (list): list of image scales (relative to im) used\n in the image pyramid\n \"\"\"\n im_orig = im.astype(np.float32, copy=True)\n im_orig -= cfg.PIXEL_MEANS\n\n im_shape = im_orig.shape\n im_size_min = np.min(im_shape[0:2])\n im_size_max = np.max(im_shape[0:2])\n\n processed_ims = []\n im_scale_factors = []\n\n for target_size in cfg.TEST.SCALES:\n im_scale = float(target_size) / float(im_size_min)\n # Prevent the biggest axis from being more than MAX_SIZE\n if np.round(im_scale * im_size_max) > cfg.TEST.MAX_SIZE:\n im_scale = float(cfg.TEST.MAX_SIZE) / float(im_size_max)\n im = cv2.resize(im_orig, None, None, fx=im_scale, fy=im_scale,\n interpolation=cv2.INTER_LINEAR)\n im_scale_factors.append(im_scale)\n processed_ims.append(im)\n\n # Create a blob to hold the input images\n blob = im_list_to_blob(processed_ims)\n\n return blob, np.array(im_scale_factors)\n\ndef _get_blobs(im):\n \"\"\"Convert an image and RoIs within that image into network inputs.\"\"\"\n blobs = {}\n blobs['data'], im_scale_factors = _get_image_blob(im)\n\n return blobs, im_scale_factors\n\ndef _clip_boxes(boxes, im_shape):\n \"\"\"Clip boxes to image boundaries.\"\"\"\n # x1 >= 0\n boxes[:, 0::4] = np.maximum(boxes[:, 0::4], 0)\n # y1 >= 0\n boxes[:, 1::4] = np.maximum(boxes[:, 1::4], 0)\n # x2 < im_shape[1]\n boxes[:, 2::4] = np.minimum(boxes[:, 2::4], im_shape[1] - 1)\n # y2 < im_shape[0]\n boxes[:, 3::4] = np.minimum(boxes[:, 3::4], im_shape[0] - 1)\n return boxes\n\ndef _rescale_boxes(boxes, inds, scales):\n \"\"\"Rescale boxes according to image rescaling.\"\"\"\n for i in range(boxes.shape[0]):\n boxes[i,:] = boxes[i,:] / scales[int(inds[i])]\n\n return boxes\n\ndef im_detect(sess, net, im):\n blobs, im_scales = _get_blobs(im)\n assert len(im_scales) == 1, \"Only single-image batch implemented\"\n\n im_blob = blobs['data']\n blobs['im_info'] = np.array([im_blob.shape[1], im_blob.shape[2], im_scales[0]], dtype=np.float32)\n\n _, scores, bbox_pred, rois = net.test_image(sess, blobs['data'], blobs['im_info'])\n \n boxes = rois[:, 1:5] / im_scales[0]\n scores = np.reshape(scores, [scores.shape[0], -1])\n bbox_pred = np.reshape(bbox_pred, [bbox_pred.shape[0], -1])\n if cfg.TEST.BBOX_REG:\n # Apply bounding-box regression deltas\n box_deltas = bbox_pred\n pred_boxes = bbox_transform_inv(boxes, box_deltas)\n pred_boxes = _clip_boxes(pred_boxes, im.shape)\n else:\n # Simply repeat the boxes, once for each class\n pred_boxes = np.tile(boxes, (1, scores.shape[1]))\n\n return scores, pred_boxes\n\ndef apply_nms(all_boxes, thresh):\n \"\"\"Apply non-maximum suppression to all predicted boxes output by the\n test_net method.\n \"\"\"\n num_classes = len(all_boxes)\n num_images = len(all_boxes[0])\n nms_boxes = [[[] for _ in range(num_images)] for _ in range(num_classes)]\n for cls_ind in range(num_classes):\n for im_ind in range(num_images):\n dets = all_boxes[cls_ind][im_ind]\n if dets == []:\n continue\n\n x1 = dets[:, 0]\n y1 = dets[:, 1]\n x2 = dets[:, 2]\n y2 = dets[:, 3]\n scores = dets[:, 4]\n inds = np.where((x2 > x1) & (y2 > y1))[0]\n dets = dets[inds,:]\n if dets == []:\n continue\n\n keep = nms(dets, thresh)\n if len(keep) == 0:\n continue\n nms_boxes[cls_ind][im_ind] = dets[keep, :].copy()\n return nms_boxes\n\ndef test_net(sess, net, imdb, weights_filename, max_per_image=100, thresh=0.65):\n np.random.seed(cfg.RNG_SEED)\n \"\"\"Test a Fast R-CNN network on an image database.\"\"\"\n num_images = len(imdb.image_index)\n # all detections are collected into:\n # all_boxes[cls][image] = N x 5 array of detections in\n # (x1, y1, x2, y2, score)\n all_boxes = [[[] for _ in range(num_images)]\n for _ in range(imdb.num_classes)]\n\n output_dir = get_output_dir(imdb, weights_filename)\n # timers\n _t = {'im_detect' : Timer(), 'misc' : Timer()}\n\n for i in range(num_images):\n im = cv2.imread(imdb.image_path_at(i))\n\n _t['im_detect'].tic()\n scores, boxes = im_detect(sess, net, im)\n _t['im_detect'].toc()\n\n _t['misc'].tic()\n\n # skip j = 0, because it's the background class\n for j in range(1, imdb.num_classes):\n inds = np.where(scores[:, j] > thresh)[0]\n cls_scores = scores[inds, j]\n cls_boxes = boxes[inds, j*4:(j+1)*4]\n cls_dets = np.hstack((cls_boxes, cls_scores[:, np.newaxis])) \\\n .astype(np.float32, copy=False)\n keep = nms(cls_dets, cfg.TEST.NMS)\n cls_dets = cls_dets[keep, :]\n all_boxes[j][i] = cls_dets\n\n # Limit to max_per_image detections *over all classes*\n if max_per_image > 0:\n image_scores = np.hstack([all_boxes[j][i][:, -1]\n for j in range(1, imdb.num_classes)])\n if len(image_scores) > max_per_image:\n image_thresh = np.sort(image_scores)[-max_per_image]\n for j in range(1, imdb.num_classes):\n keep = np.where(all_boxes[j][i][:, -1] >= image_thresh)[0]\n all_boxes[j][i] = all_boxes[j][i][keep, :]\n\n #fow img show\n #print(imdb.image_path_at(i))\n #for k in range(len(all_boxes[1][i])):\n # cv2.rectangle(im,(all_boxes[1][i][k,0],all_boxes[1][i][k,1]),(all_boxes[1][i][k,2],all_boxes[1][i][k,3]),(0,0,255),2)\n #cv2.imwrite(os.path.join(output_dir,str(i)+'.jpg'),im)\n\n #for output txt\n if i % 100 == 0:\n print('{}/10000 done'.format(i))\n result_file = os.path.join(output_dir,imdb._image_index[i]+'.txt')\n fout = open(result_file,'w')\n for k in range(len(all_boxes[1][i])):\n x1 = all_boxes[1][i][k,0]\n y1 = all_boxes[1][i][k,1]\n x2 = all_boxes[1][i][k,0]\n y2 = all_boxes[1][i][k,3]\n x3 = all_boxes[1][i][k,2]\n y3 = all_boxes[1][i][k,3]\n x4 = all_boxes[1][i][k,2]\n y4 = all_boxes[1][i][k,1]\n fout.write(str(x1)+','+str(y1)+','+str(x2)+','+str(y2)+','+str(x3)+','+str(y3)+','+str(x4)+','+str(y4)+'\\n')\n fout.close()\n\n det_file = os.path.join(output_dir, 'detections.pkl')\n with open(det_file, 'wb') as f:\n pickle.dump(all_boxes, f, pickle.HIGHEST_PROTOCOL)\n\n\n" ]
[ [ "numpy.max", "numpy.array", "numpy.reshape", "numpy.minimum", "numpy.random.seed", "numpy.round", "numpy.tile", "numpy.min", "numpy.where", "numpy.sort", "numpy.hstack", "numpy.maximum" ] ]
bala-office/fdic-data-warehouse
[ "b8d34ef9ffa0123d7c42f1b27e2f683211a6cd55" ]
[ "data_extraction.py" ]
[ "import pandas as pd\nimport numpy as np\n\npd.set_option(\"max_rows\", 25)\npd.set_option(\"max_columns\", 80)\n\n# reading in the 5 FDIC csv files and assigning them to dictionary keys. \ncolumns = [\n \"ADDRESBR\", \n \"ADDRESS\", \n \"ASSET\",\n \"BKCLASS\", \n \"BKMO\", \n \"BRCENM\",\n \"BRNUM\", \n \"BRSERTYP\", \n \"CBSA_DIV_NAMB\", \n \"CHARTER\", \n \"CHRTAGNN\",\n \"CHRTAGNT\", \n \"CITY\", \n \"CITY2BR\", \n \"CITYBR\", \n \"CITYHCR\",\n \"CLCODE\", \n \"CNTRYNA\", \n \"CNTRYNAB\", \n \"CNTYNAMB\", \n \"CNTYNUMB\",\n \"DEPDOM\", \n \"DEPSUM\", \n \"DEPSUMBR\", \n \"FDICDBS\", \n \"FDICNAME\", \n \"FED\",\n \"FEDNAME\", \n \"HCTMULT\", \n \"METROBR\",\n \"MICROBR\", \n \"NAMEBR\", \n \"NAMEFULL\",\n \"NAMEHCR\", \n \"OCCDIST\", \n \"OCCNAME\", \n \"REGAGNT\", \n \"RSSDHCR\",\n \"RSSDID\", \n \"SIMS_ACQUIRED_DATE\", \n \"SIMS_ESTABLISHED_DATE\",\n \"SIMS_LATITUDE\", \n \"SIMS_LONGITUDE\", \n \"SPECDESC\", \n \"SPECGRP\",\n \"STALP\", \n \"STALPBR\", \n \"STALPHCR\", \n \"STNAME\", \n \"STNAMEBR\", \n \"UNINUMBR\",\n \"UNIT\", \n \"USA\", \n \"YEAR\", \n \"ZIP\", \n \"ZIPBR\",\n]\n\ndf_dictionary = {}\nfor item in range(2016, 2021):\n df_dictionary[\"df_\" + str(item)] = pd.read_csv(\n \"data/All_\" + str(item) + \".csv\", \n encoding=\"Latin-1\", \n usecols= lambda x: x.upper() in columns\n )\n\ncombined_df = pd.concat([df for df in df_dictionary.values()], ignore_index=True)\ncombined_df.columns = combined_df.columns.str.lower()\n\ncombined_df.rename(\n columns={\n \"addresbr\": \"branch_address\", \n \"asset\":\"total_assets\", \n \"bkclass\":\"bank_class\", \n \"bkmo\": \"bank_office_identifier\",\n \"brcenm\":\"deposit_code\",\n \"brnum\" :\"unique_bank_branch_id\",\n \"brsertyp\":\"branch_service_type\",\n \"cbsa_div_namb\" : \"county_2.5_million+_name\",\n \"chrtagnn\":\"charter_agency_name\",\n \"charter\":\"charter_type\",\n \"chrtagnt\":\"charter_abbreviation\", \n \"city\":\"hq_city\", \n \"city2br\": \"branch_city\",\n \"citybr\": \"branch_city_name\",\n \"clcode\":\"bank_category\", \n \"cntryna\":\"country_hq\",\n \"cntrynab\" : \"branch_country_name\",\n \"cntynamb\":\"branch_county_name\", \n \"cntynumb\": \"branch_county_number\",\n \"depdom\":\"total_domestic_deposits\", \n \"depsum\": \"total_deposits\",\n \"depsumbr\": \"branch_office_deposits\",\n \"fdicdbs\": \"fdic_regional_office_id\", \n \"fdicname\": \"fdic_regional_office_name\",\n \"fed\": \"fed_district_id\", \n \"fedname\": \"fed_district_name\",\n \"hctmult\": \"holding_company_type\",\n \"insagnt1\": \"insurance_status\",\n \"insured\" : \"insured_category\", \n \"metrobr\" : \"urban_area_50000+\", \n \"microbr\" : \"urban_area_10000_under_50000\",\n \"namebr\" : \"branch_name\", \n \"namefull\" : \"bank_name\",\n \"namehcr\" : \"bank_hq_name\", \n \"occdist\" : \"currency_district_id\", \n \"occname\" : \"currency_district_name\", \n \"regagnt\" : \"regulatory_agency_name\", \n \"rssdhcr\" : \"unique_bank_id_fed_board\", \n \"rssdid\" : \"unique_bank_id_number\", \n \"sims_acquired_date\" : \"acquisition_date\", \n \"sims_established_date\" : \"branch_location_established_date\",\n \"sims_latitude\" : \"branch_latitude\",\n \"sims_longitude\" : \"branch_longitude\", \n \"specdesc\" : \"primary_industry_classification\", \n \"specgrp\" : \"bank_asset_classification_category\", \n \"stalp\" : \"hq_state\", \n \"stalpbr\" : \"branch_state\",\n \"stalphcr\" : \"bank_hq_state\",\n \"stname\" : \"hq_state_name\", \n \"stnamebr\": \"branch_state_name\", \n \"uninumbr\" : \"branch_unique_number\", \n \"unit\" : \"only_main_office_no_branches\", \n \"usa\" : \"in_usa\", \n \"zip\" : \"hq_zipcode\", \n \"zipbr\" : \"branch_zipcode\",\n }, \n inplace=True,\n)\n\nreplace_columns = [\n \"total_assets\", \n \"total_domestic_deposits\", \n \"branch_office_deposits\", \n \"total_deposits\"\n]\nfor column in replace_columns:\n combined_df[column] = combined_df[column].str.replace(\",\", \"\")\n\ncategory_columns = [\n \"bank_class\", \n \"branch_service_type\", \n \"county_2.5_million+_name\", \n \"charter_type\", \n \"bank_category\", \n \"fdic_regional_office_id\", \n \"fdic_regional_office_name\", \n \"fed_district_id\", \n \"fed_district_name\",\n \"holding_company_type\", \n \"urban_area_50000+\", \n \"urban_area_10000_under_50000\", \n \"currency_district_id\", \n \"currency_district_name\",\n \"primary_industry_classification\", \n \"bank_asset_classification_category\", \n \"bank_hq_state\", \n \"charter_agency_name\", \n \"charter_abbreviation\", \n \"regulatory_agency_name\", \n \"deposit_code\"\n]\n\nfor column in category_columns:\n combined_df[column] = combined_df[column].astype(\"category\")\n\ncombined_df = combined_df.astype(\n {\n \"acquisition_date\": \"datetime64\",\n \"branch_location_established_date\": \"datetime64\",\n \"branch_county_number\": \"int32\", \n \"branch_zipcode\": \"int32\", \n \"total_deposits\": \"int64\",\n \"bank_office_identifier\": \"int8\", \n \"only_main_office_no_branches\": \"float64\",\n \"in_usa\": \"int8\", \"total_assets\": \n \"int64\", \"total_domestic_deposits\": \"int64\",\n \"branch_office_deposits\": \"int64\",\n }\n)\n \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n" ]
[ [ "pandas.set_option" ] ]
YANGZ001/OrganicChem-LabMate-AI
[ "fb826d85dd852aab987b9bef6856d8da6a4bd9be" ]
[ "continuous-variables/literature-code-in-python/Manuscript/LabMate-AI-balanced.py" ]
[ "import numpy as np\nimport pandas as pd\nfrom sklearn.model_selection import KFold\nfrom sklearn.ensemble import RandomForestRegressor\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.externals.joblib import dump\n\n#load data\nfilename = 'train_data.txt'\ntrain = pd.read_csv(filename, sep= '\\t')\narray = train.values\nX = array[:,1:-1] \nY = array[:,-1] \n\n#General stuff\nseed = 1234 \nkfold = KFold(n_splits = 10, random_state = seed)\nscoring = 'neg_mean_absolute_error'\nmodel = RandomForestRegressor(random_state=seed)\n\n#Parameters to tune\nestimators = np.arange(100, 1050, 50)\nestimators_int = np.ndarray.tolist(estimators)\nparam_grid = {'n_estimators':estimators_int, 'max_features':('auto', 'sqrt'), 'max_depth':[None, 2, 4]}\n\n#search best parameters and train\ngrid = GridSearchCV(estimator=model, param_grid=param_grid, scoring=scoring, cv=kfold, n_jobs=6)\ngrid_result = grid.fit(X, Y)\n\n#print the best data cranked out from the grid search\nnp.savetxt('best_score.txt', [\"best_score: %s\" % grid.best_score_], fmt ='%s')\nbest_params = pd.DataFrame([grid.best_params_], columns=grid.best_params_.keys())\n\n#Predict the future\nfilename2 = 'all_combos.txt'\ndf_all_combos = pd.read_csv(filename2, sep= '\\t')\ndf_train_corrected = train.iloc[:,:-1]\nunseen = pd.concat([df_all_combos, df_train_corrected]).drop_duplicates(keep=False)\narray2 = unseen.values\nX2 = array2[:,1:]\n\nmodel2 = RandomForestRegressor(n_estimators = grid.best_params_['n_estimators'], max_features = grid.best_params_['max_features'], max_depth = grid.best_params_['max_depth'], random_state = seed)\nRF_fit = model2.fit(X, Y)\npredictions = model2.predict(X2)\npredictions_df = pd.DataFrame(data=predictions, columns=['Prediction'])\nfeat_imp = pd.DataFrame(model2.feature_importances_, index=['Pyridine', 'Aldehyde', 'Isocyanide', 'Temperature', 'Solvent', 'Catalyst', 'Time'], columns=['Feature_importances'])\n\n#get individual tree preds\nall_predictions = []\nfor e in model2.estimators_:\n all_predictions += [e.predict(X2)]\n\n#get variance and dataframe\nvariance = np.var(all_predictions, axis=0)\nvariance_df = pd.DataFrame(data=variance, columns=['Variance'])\n\nassert len(variance) == len(predictions)\n\n#concatenate tables\ninitial_data = pd.DataFrame(data=array2, columns = ['Iteration', 'Pyridine', 'Aldehyde', 'Isocyanide', 'Temperature', 'Solvent', 'Catalyst', 'Time'])\ndf = pd.concat([initial_data, predictions_df, variance_df], axis=1)\n\nif len(Y) < 19:\n\tdf_sorted = df.sort_values(by=['Variance', 'Catalyst'], ascending=[False, True])\n\ttoPerform = df_sorted.iloc[0]\n\t\nelif len(Y) >= 19 and np.max(Y[10:]) >= 4 * np.max(Y[:9]):\n\tdf_sorted = df.sort_values(by=['Prediction', 'Catalyst'], ascending=[False, True])\n\tpreliminary = df_sorted.iloc[0:5]\n\tdf_sorted2 = preliminary.sort_values(by=['Variance', 'Catalyst'], ascending=[True, True])\n\ttoPerform = df_sorted2.iloc[0]\n\nelse:\n\tdf_sorted = df.sort_values(by=['Prediction', 'Catalyst'], ascending=[False, True])\n\tpreliminary = df_sorted.iloc[0:10]\n\tdf_sorted2 = preliminary.sort_values(by=['Variance', 'Catalyst'], ascending=[False, True])\n\ttoPerform = df_sorted2.iloc[0]\n\t\n#save data\nfeat_imp.to_csv('feature_importances.txt', sep= '\\t') \nbest_params.to_csv('best_parameters.txt', sep= '\\t')\ntoPerform.to_csv('selected_reaction.txt', sep = '\\t')\ndf_sorted.to_csv('predictions.txt', sep = '\\t')\nfilename3 = 'random_forest_model_grid.sav'\ndump(grid, filename3)\n\nprint('Have a good one, mate!')\n \n \n\n" ]
[ [ "numpy.max", "numpy.savetxt", "sklearn.model_selection.GridSearchCV", "pandas.DataFrame", "sklearn.externals.joblib.dump", "numpy.ndarray.tolist", "numpy.arange", "sklearn.ensemble.RandomForestRegressor", "pandas.concat", "sklearn.model_selection.KFold", "pandas.read_csv", "numpy.var" ] ]
cornhundred/clustergrammer-glidget
[ "14b622ea91e236bee08c47b91caf12178f97aa8b" ]
[ "examples/himc_helper_functions_v0_17_1.py" ]
[ "# Version: 0.17.1\n# This is a set of scripts that are used in processing 10x single cell data\n# improved dehsahing pipeline\n\nimport gzip\nfrom scipy import io\nfrom scipy.sparse import csc_matrix\nfrom ast import literal_eval as make_tuple\nimport pandas as pd\nimport numpy as np\nfrom copy import deepcopy\nimport os\nimport matplotlib.pyplot as plt\nfrom sklearn.preprocessing import StandardScaler\nimport random\nfrom functools import lru_cache\n\ndef get_version():\n print('0.17.1', 'two dimensional clustering bug fix')\n\ndef make_dir(directory):\n if not os.path.exists(directory):\n os.mkdir(directory)\n\ndef load_crv3_feature_matrix(inst_path, to_csc=True, hto_list=None,\n drop_default_lane=True, add_lane_to_barcodes=None, sp={}, df={}):\n\n # Read Barcodes\n ###########################\n filename = inst_path + 'barcodes.tsv.gz'\n f = gzip.open(filename, 'rt')\n lines = f.readlines()\n f.close()\n\n # if we are adding a lane, then we always want to drop the default cr lane\n if add_lane_to_barcodes is not None:\n drop_default_lane = True\n\n barcodes = []\n for inst_bc in lines:\n inst_bc = inst_bc.strip().split('\\t')\n\n # remove dash from barcodes if necessary\n if drop_default_lane:\n if '-' in inst_bc[0]:\n inst_bc[0] = inst_bc[0].split('-')[0]\n\n\n barcodes.append(inst_bc[0])\n\n if add_lane_to_barcodes is not None:\n barcodes = [x + '-' + add_lane_to_barcodes for x in barcodes]\n\n # Load Matrix\n #################\n mat = io.mmread(inst_path + 'matrix.mtx.gz')\n mat_csr = mat.tocsr()\n\n # Get Indexes of Feature Types\n ##################################\n filename = inst_path + 'features.tsv.gz'\n f = gzip.open(filename, 'rt')\n lines = f.readlines()\n f.close()\n\n feature_indexes = {}\n feature_lines = {}\n\n for index in range(len(lines)):\n\n inst_line = lines[index].strip().split('\\t')\n\n if len(inst_line) > 1:\n\n inst_feat = inst_line[2].replace('Gene Expression', 'gex')\n inst_feat = inst_feat.replace('Antibody Capture', 'adt').replace('Custom', 'custom')\n\n if hto_list is not None:\n if inst_feat == 'custom' or inst_feat == 'adt':\n inst_feat = ('hto' if inst_line[0] in hto_list else 'adt')\n\n if inst_feat not in feature_indexes:\n feature_indexes[inst_feat] = []\n\n feature_indexes[inst_feat].append(index)\n\n else:\n inst_feat = 'gex'\n\n if inst_feat not in feature_indexes:\n feature_indexes[inst_feat] = []\n\n feature_indexes[inst_feat].append(index)\n\n inst_sp = {}\n\n for inst_feat in feature_indexes:\n inst_sp[inst_feat] = {}\n\n inst_sp[inst_feat]['barcodes'] = barcodes\n\n inst_indexes = feature_indexes[inst_feat]\n\n # Separate feature lists\n ser_lines = pd.Series(lines)\n ser_lines_found = ser_lines[inst_indexes]\n lines_found = list(ser_lines_found.array)\n\n # save feature lines\n feature_lines[inst_feat] = lines_found\n\n # save as compressed sparse column matrix (for barcode filtering)\n mat_filt = (mat_csr[inst_indexes, :].tocsc() if to_csc else mat_csr[inst_indexes, :])\n\n inst_sp[inst_feat]['mat'] = mat_filt\n\n # Make unique feature names\n for inst_feat in feature_lines:\n feat_lines = feature_lines[inst_feat]\n feat_lines = [x.strip().split('\\t') for x in feat_lines]\n\n if len(feat_lines[0]) > 1:\n # find non-unique initial feature names (add id later if necessary)\n ini_names = [x[1] for x in feat_lines]\n\n ini_name_count = pd.Series(ini_names).value_counts()\n duplicate_names = ini_name_count[ini_name_count > 1].index.tolist()\n\n new_names = [x[1] if x[1] not in duplicate_names else x[1] + '_' + x[0] for x in feat_lines]\n else:\n new_names = [x[0] for x in feat_lines]\n\n # quick hack to clean up names\n new_names = [x.replace('_TotalSeqB', '') for x in new_names]\n\n inst_sp[inst_feat]['features'] = new_names\n\n # save feature lines\n if len(feat_lines[0]) > 1:\n cols = [inst_feat + '-ID', inst_feat + '-name', 'feature-type']\n df['meta_' + inst_feat] = pd.DataFrame(feat_lines, index=new_names, columns=cols)\n else:\n df['meta_' + inst_feat] = pd.DataFrame(feat_lines, index=new_names, columns=[inst_feat + '-ID'])\n\n sp['ini'] = inst_sp\n\n return sp, df\n\ndef load_crv2_gene_matrix(inst_path):\n '''\n Loads gene expression data from 10x in sparse matrix format and returns a\n Pandas dataframe\n '''\n\n import pandas as pd\n from scipy import io\n from scipy import sparse\n from ast import literal_eval as make_tuple\n\n # matrix\n mat = io.mmread( inst_path + 'matrix.mtx').tocsc()\n\n\n # genes\n filename = inst_path + 'genes.tsv'\n f = open(filename, 'r')\n lines_genes = f.readlines()\n f.close()\n\n # make unique gene names\n #############################\n gene_list = [x.strip().split('\\t') for x in lines_genes]\n\n # find non-unique initial gene names\n ini_names = [x[1] for x in gene_list]\n\n ini_name_count = pd.Series(ini_names).value_counts()\n duplicate_names = ini_name_count[ini_name_count > 1].index.tolist()\n genes = [x[1] if x[1] not in duplicate_names else x[1] + '_' + x[0] for x in gene_list]\n\n\n # barcodes\n filename = inst_path + 'barcodes.tsv'\n f = open(filename, 'r')\n lines = f.readlines()\n f.close()\n\n cell_barcodes = []\n for inst_bc in lines:\n inst_bc = inst_bc.strip().split('\\t')\n\n # remove dash from barcodes if necessary\n if '-' in inst_bc[0]:\n inst_bc[0] = inst_bc[0].split('-')[0]\n\n cell_barcodes.append(inst_bc[0])\n\n # parse tuples if necessary\n try:\n cell_barcodes = [make_tuple(x) for x in cell_barcodes]\n except:\n pass\n\n try:\n genes = [make_tuple(x) for x in genes]\n except:\n pass\n\n # generate feature_data\n feature_data = {}\n feature_data['gex'] = {}\n feature_data['gex']['features'] = genes\n feature_data['gex']['barcodes'] = cell_barcodes\n feature_data['gex']['mat'] = mat\n\n return feature_data\n\ndef plot_metadata(meta_cell, metadata_type='gex-umi-sum', logy=True, logx=False, min_umi=0, max_umi=1e9, figsize=(10,5)):\n\n ser_meta = meta_cell[metadata_type]\n\n # filter\n ser_meta = ser_meta[ser_meta >= min_umi]\n ser_meta = ser_meta[ser_meta <= max_umi]\n ser_meta = ser_meta.sort_values(ascending=False)\n\n ser_meta.plot(logy=logy, logx=logx, figsize=figsize)\n\ndef plot_umi_levels(feature_data, feature_type='gex', logy=True, logx=False,\n figsize=(10,5), min_umi=0, max_umi=1e9, zscore_features=False):\n '''\n This function takes a feature data format or dictionary of DataFrames and plots\n UMI levels\n '''\n\n if 'mat' in feature_data[feature_type]:\n mat_csc = feature_data[feature_type]['mat']\n\n if zscore_features:\n print('z-scoring feature_data')\n inst_df = pd.DataFrame(data=mat_csc.todense(), columns=feature_data[feature_type]['barcodes'])\n\n net.load_df(inst_df)\n net.normalize(axis='row', norm_type='zscore')\n inst_df = net.export_df()\n\n # sort\n ser_sum = inst_df.sum(axis=0).sort_values(ascending=False)\n\n else:\n # drop cells with fewer than threshold events\n ser_sum = mat_csc.sum(axis=0)\n arr_sum = np.asarray(ser_sum[0,:])\n\n # sort\n ser_sum = pd.Series(arr_sum[0], index=feature_data[feature_type]['barcodes']).sort_values(ascending=False)\n\n # filter\n ser_sum = ser_sum[ser_sum >= min_umi]\n ser_sum = ser_sum[ser_sum <= max_umi]\n\n else:\n inst_df = feature_data[feature_type]\n\n if zscore_features:\n print('zscore features')\n net.load_df(inst_df)\n net.normalize(axis='row', norm_type='zscore')\n inst_df = net.export_df()\n\n # sort\n ser_sum = inst_df.sum(axis=0).sort_values(ascending=False)\n\n # filter\n ser_sum = ser_sum[ser_sum >= min_umi]\n ser_sum = ser_sum[ser_sum <= max_umi]\n\n ser_sum.plot(logy=logy, logx=logx, figsize=figsize)\n return ser_sum\n\ndef filter_barcodes_by_umi(feature_data, feature_type, min_umi=0, max_umi=1e8,\n make_sparse=True, zscore_features=False):\n\n # feature data format\n ########################\n if 'mat' in feature_data[feature_type]:\n\n # sparse matrix\n ##################\n mat_csc = feature_data[feature_type]['mat']\n\n if zscore_features:\n print('*** warning, z-scoring not supported in feature_data format')\n\n # drop barcodes with fewer than threshold UMI\n ser_sum = mat_csc.sum(axis=0)\n arr_sum = np.asarray(ser_sum[0,:])\n ser_sum = pd.Series(arr_sum[0])\n ser_keep = ser_sum[ser_sum >= min_umi]\n ser_keep = ser_keep[ser_keep <= max_umi]\n\n # these indexes will be used to filter all features\n keep_indexes = ser_keep.index.tolist()\n\n # filter barcodes\n barcodes = feature_data[feature_type]['barcodes']\n ser_barcodes = pd.Series(barcodes)\n barcodes_filt = ser_barcodes[keep_indexes].get_values()\n\n # return Dictionary of DataFrames\n filtered_data = {}\n for inst_feat in feature_data:\n\n if 'meta' not in inst_feat:\n\n inst_mat = feature_data[inst_feat]['mat']\n mat_filt = inst_mat[:, keep_indexes]\n feature_names = feature_data[inst_feat]['features']\n\n inst_data = {}\n inst_data['mat'] = mat_filt\n inst_data['barcodes'] = barcodes_filt\n inst_data['features'] = feature_names\n\n filtered_data[inst_feat] = inst_data\n\n else:\n\n # dense matrix\n ###############\n # drop barcodes with fewer than threshold UMI\n inst_df = feature_data[feature_type]\n\n if zscore_features:\n print('z-scoring features')\n net.load_df(inst_df)\n net.normalize(axis='row', norm_type='zscore')\n inst_df = net.export_df()\n\n ser_sum = inst_df.sum(axis=0)\n ser_keep = ser_sum[ser_sum >= min_umi]\n ser_keep = ser_keep[ser_keep <= max_umi]\n keep_cols = ser_keep.index.tolist()\n\n # filter data\n filtered_data = {}\n for inst_feat in feature_data:\n\n filtered_data[inst_feat] = feature_data[inst_feat][keep_cols]\n\n return filtered_data\n\ndef convert_to_dense(feat_data, df=None):\n # initialize df if necessary\n if df is None:\n df = {}\n for inst_feat in feat_data:\n mat = feat_data[inst_feat]['mat']\n rows = feat_data[inst_feat]['features']\n cols = feat_data[inst_feat]['barcodes']\n\n dense_mat = mat.todense()\n df[inst_feat] = pd.DataFrame(dense_mat, index=rows, columns=cols)\n\n return df\n\ndef check_feature_data_size(feature_data):\n for inst_feat in feature_data:\n if 'meta' not in inst_feat:\n print(inst_feat)\n print(len(feature_data[inst_feat]['features']), len(feature_data[inst_feat]['barcodes']))\n print(feature_data[inst_feat]['mat'].shape, '\\n')\n\ndef get_mito_genes(gene_list):\n # Removing Mitochondrial Genes\n ini_mito_list = ['MTRNR2L11', 'MTRF1', 'MTRNR2L12', 'MTRNR2L13', 'MTRF1L',\n 'MTRNR2L6', 'MTRNR2L7','MTRNR2L10', 'MTRNR2L8', 'MTRNR2L5',\n 'MTRNR2L1', 'MTRNR2L3', 'MTRNR2L4']\n\n list_mito_genes = list(map(lambda x:x.lower(), ini_mito_list))\n\n found_mito_genes = [x for x in gene_list if 'mt-' == x[:3].lower() or\n x.split('_')[0].lower() in list_mito_genes]\n\n return found_mito_genes\n\ndef mito_prop_and_suspected_dead(df_gex, meta_cell, mito_thresh=0.9,\n plot_mito=True, s=5, alpha=0.2):\n\n print('find mito genes')\n all_genes = df_gex.index.tolist()\n mito_genes = get_mito_genes(all_genes)\n\n print('cacl mito sum')\n mito_sum = df_gex.loc[mito_genes].sum(axis=0)\n\n gex_sum = meta_cell['gex-umi-sum']\n\n mito_proportion = mito_sum/gex_sum\n\n print('assign live-dead')\n\n\n meta_cell['dead-cell-mito'] = mito_proportion.apply(lambda x: 'dead-cell' if x >= mito_thresh else 'live-cell')\n\n meta_cell['gex-mito-proportion-umi'] = mito_proportion\n\n if plot_mito:\n # mito_proportion.sort_values(ascending=False).plot()\n\n all_cells = meta_cell.index.tolist()\n color_list = ['red' if meta_cell.loc[x,'dead-cell-mito'] == 'dead-cell' else 'blue' for x in all_cells]\n\n meta_cell.plot(kind='scatter',\n x='gex-umi-sum-ash',\n y='gex-mito-proportion-umi',\n alpha=alpha,\n s=s,\n figsize=(10,10),\n c=color_list)\n\n ser_dead_counts = meta_cell['dead-cell-mito'].value_counts()\n print('live-cell', ser_dead_counts['live-cell'])\n if 'dead-cell' in ser_dead_counts:\n print('dead-cell', ser_dead_counts['dead-cell'])\n\n\n return meta_cell\n\ndef label_suspected_red_blood_cells(df, heme_genes=['HBA1', 'HBA2', 'HBB', 'HBD', 'HBQ1'],\n thresh=100,\n keep='below',\n x_max=1e4,\n y_max=1500):\n\n # Added HBD and HBQ based on HCA bone marrow signatures\n\n # save heme gene umi levels (no umi or ash normalization)\n heme_genes = [x for x in df['gex'].index.tolist() if x in heme_genes]\n ser_heme = df['gex'].loc[heme_genes].sum(axis=0)\n df['meta_cell']['heme-ab-umi'] = ser_heme\n\n\n keep_barcodes, drop_barcodes = one_dim_filter(df['meta_cell'],\n name='heme-ab-umi',\n thresh=thresh,\n keep=keep,\n x_max=x_max,\n y_max=y_max)\n\n df['meta_cell'].loc[keep_barcodes, 'red-blood-cell'] = 'False'\n df['meta_cell'].loc[drop_barcodes, 'red-blood-cell'] = 'True'\n\ndef get_ig_genes(genes):\n ig_genes = [x for x in genes if x[:2] == 'IG']\n return ig_genes\n\ndef set_hto_thresh(df_hto_ash, meta_hto, hto_name, scale_factor, thresh=1, xlim=7, ylim =100):\n\n ser_hto = deepcopy(df_hto_ash.loc[hto_name]) * (1/scale_factor)\n\n n, bins, patches = plt.hist(ser_hto, bins=100, range=(0, xlim))\n\n colors = []\n for inst_bin in bins:\n if inst_bin <= thresh:\n colors.append('red')\n else:\n colors.append('blue')\n\n # apply the same color for each class to match the map\n for patch, color in zip(patches, colors):\n patch.set_facecolor(color)\n\n # hto threshold after arcsinh, and scale\n meta_hto.loc[hto_name, 'hto-threshold-ash-scale'] = thresh\n # hto threshold after arcinh\n meta_hto.loc[hto_name, 'hto-threshold-ash'] = thresh * scale_factor\n # hto threshold in umi level\n meta_hto.loc[hto_name, 'hto-threshold-umi'] = np.sinh(thresh * scale_factor) * 5\n\n # save scale factor (to align peaks of HTO histograms)\n meta_hto.loc[hto_name, 'scale-factor'] = scale_factor\n\n plt.ylim((0,ylim))\n\ndef ini_meta_cell(df):\n list_ser = []\n\n # look for available data types\n found_types = list(set(['gex', 'adt', 'hto']).intersection(df.keys()))\n for inst_type in found_types:\n\n # calc umi sum\n inst_ser = df[inst_type].sum(axis=0)\n inst_ser.name = inst_type + '-umi-sum'\n\n list_ser.append(inst_ser)\n\n df['meta_cell'] = pd.DataFrame(data=list_ser).transpose()\n if 'gex' in df.keys():\n df_gex = deepcopy(df['gex'])\n df_gex[df_gex >= 1] = 1\n ser_gene_num = df_gex.sum(axis=0)\n df['meta_cell']['num_expressed_genes'] = ser_gene_num\n return df\n\ndef meta_cell_gex_wo_mito_ribo(df_gex_ini, meta_cell):\n\n df_gex = deepcopy(df_gex_ini)\n\n # calc umi sum\n ser_umi_sum = df_gex.sum(axis=0)\n\n meta_cell['gex-umi-sum-no-ribo-mito'] = ser_umi_sum\n\n # count number of measured genes\n\n df_gex[df_gex >= 1] = 1\n ser_gene_num = df_gex.sum(axis=0)\n\n meta_cell['num_expressed_genes_no-ribo-mito'] = ser_gene_num\n\n return meta_cell\n\ndef ini_meta_gene(df_gex_ini):\n\n df_gex = deepcopy(df_gex_ini)\n\n # Mean UMI\n ser_gene_mean = df_gex.mean(axis=1)\n ser_gene_mean.name = 'mean'\n\n # Variance UMI\n ser_gene_var = df_gex.mean(axis=1)\n ser_gene_var.name = 'variance'\n\n # fraction of cells measured\n df_gex[df_gex >= 1] = 1\n ser_gene_meas = df_gex.sum(axis=1)/df_gex.shape[1]\n ser_gene_meas.name = 'fraction of cells measured'\n\n meta_gene = pd.concat([ser_gene_mean, ser_gene_var, ser_gene_meas], axis=1)\n\n return meta_gene\n\n\n\ndef plot_signal_vs_noise(df, alpha=0.25, s=10, hto_range=7, inf_replace=1000):\n\n fig, axes = plt.subplots(nrows=1, ncols=2)\n\n list_first = []\n list_second = []\n list_cells = []\n for inst_cell in df.columns.tolist():\n inst_ser = df[inst_cell].sort_values(ascending=False)\n inst_first = inst_ser.get_values()[0]\n inst_second = inst_ser.get_values()[1]\n\n list_first.append(inst_first)\n list_second.append(inst_second)\n list_cells.append(inst_cell)\n\n ser_first = pd.Series(data=list_first, index=list_cells, name='first highest HTO')\n ser_second = pd.Series(data=list_second, index=list_cells, name='second highest HTO')\n\n df_comp = pd.concat([ser_first, ser_second], axis=1).transpose()\n\n df_comp.transpose().plot(kind='scatter', figsize=(5,5),\n x='first highest HTO', y='second highest HTO',\n ylim=(0,hto_range), xlim=(0,hto_range), alpha=alpha, s=s, ax=axes[0])\n\n sn_ratio = np.log2(df_comp.loc['first highest HTO']/df_comp.loc['second highest HTO'])\n\n\n # replace positive infinities with set value\n sn_ratio = sn_ratio.replace(np.Inf, inf_replace)\n sn_ratio.hist(bins=100, ax=axes[1], figsize=(15,7))\n\n return df_comp, sn_ratio\n\ndef filter_ribo_mito_from_gex(df):\n\n # save avg values to meta_cell\n if 'gex-mito-avg' not in df['meta_cell']:\n\n df_gex = deepcopy(df['gex'])\n meta_cell = deepcopy(df['meta_cell'])\n\n all_genes = df_gex.index.tolist()\n\n ini_genes = deepcopy(all_genes)\n\n ribo_rpl = [x for x in all_genes if x.lower().startswith('rpl')]\n ribo_rps = [x for x in all_genes if x.lower().startswith('rps')]\n ribo_genes = ribo_rpl + ribo_rps\n\n # calculate average ribo gene expression\n ser_ribo = df_gex.loc[ribo_genes].mean(axis=0)\n\n keep_genes = [x for x in all_genes if x not in ribo_genes]\n\n df_gex = df_gex.loc[keep_genes]\n\n all_genes = df_gex.index.tolist()\n\n mito_genes = get_mito_genes(all_genes)\n\n # calculate average mito gene expression\n ser_mito = df_gex.loc[mito_genes].mean(axis=0)\n\n keep_genes = [x for x in all_genes if x not in mito_genes]\n\n # save mito and ribo genes\n mr_genes = sorted(list(set(ini_genes).difference(keep_genes)))\n df_mr = df['gex'].loc[mr_genes]\n\n # drop mito and ribo genes\n df_gex = df['gex'].loc[keep_genes]\n\n meta_cell['gex-ribo-avg'] = ser_ribo\n meta_cell['gex-mito-avg'] = ser_mito\n\n df['gex'] = df_gex\n df['meta_cell'] = meta_cell\n df['gex-mr'] = df_mr\n else:\n print('already filtered mito and ribo genes')\n\n return df\n\n\ndef add_cats_from_meta(barcodes, df_meta, add_cat_list):\n '''\n Add categories from df_meta.\n '''\n\n # get metadata of interest (add_cat_list) from barcodes of interest\n df_cats = df_meta.loc[barcodes][add_cat_list]\n\n # get list of cats\n list_cat_ini = [list(x) for x in df_cats.values]\n\n # add titles to cats\n list_cat_titles = [ list([str(x) + ': ' + str(y) for x,y in zip(add_cat_list, a)]) for a in list_cat_ini]\n\n # add barcodes to new columns\n new_cols = [tuple([x] + y) for x,y in zip(barcodes, list_cat_titles)]\n\n return new_cols\n\n\ndef make_cyto_export(df, num_var_genes=500, inf_replace=10):\n\n keep_meta_base = ['gex-umi-sum',\n 'gex-num-unique',\n 'gex-mito-proportion-umi',\n 'gex-mito-avg',\n 'gex-ribo-avg',\n 'hto-umi-sum',\n 'hto-sn',\n 'adt-umi-sum'\n ]\n\n df_cyto = None\n\n for inst_type in ['gex', 'adt', 'hto', 'meta_cell']:\n if inst_type in df.keys():\n inst_df = deepcopy(df[inst_type])\n\n # filter for top var genes\n if inst_type == 'gex':\n keep_var_genes = inst_df.var(axis=1).sort_values(ascending=False).index.tolist()[:num_var_genes]\n\n inst_df = inst_df.loc[keep_var_genes]\n\n if 'meta' not in inst_type:\n inst_df.index = [inst_type.upper() + '_' + x for x in inst_df.index.tolist()]\n\n else:\n keep_meta = [metadata for metadata in keep_meta_base if metadata in inst_df.columns]\n inst_df = inst_df[keep_meta].transpose()\n inst_df.index = [ x.split('-')[0].upper() + '_der_' +\n '_'.join( x.split('-')[1:]).replace('num_unique', 'unique_gene_count')\n for x in inst_df.index.tolist()]\n\n print(inst_type, inst_df.shape)\n\n if df_cyto is None:\n df_cyto = inst_df\n else:\n df_cyto = df_cyto.append(inst_df)\n\n df_export = df_cyto.transpose()\n\n cells = df_export.index.tolist()\n index_cells = [str(x/100) for x in range(len(cells))]\n df_export.index = index_cells\n\n # Add noise\n data_columns = [x for x in df_export.columns if '_der_' not in x]\n\n not_derived_dataframe_shape = df_export[data_columns].shape\n\n # center the noise about zero\n rand_mat = np.random.rand(not_derived_dataframe_shape[0], not_derived_dataframe_shape[1]) - 0.5\n\n df_noise = pd.DataFrame(data=rand_mat, index=index_cells, columns=data_columns).round(2)\n df_export[data_columns] += df_noise\n\n ser_index = pd.Series(data=index_cells, index=cells)\n df['meta_cell']['Cytobank-Index'] = ser_index\n\n df_export.index.name = 'cell_index'\n\n # replace inf and nans\n df_export[df_export == np.inf] = inf_replace\n df_export.fillna(0, inplace=True)\n\n\n df['cyto-export'] = df_export\n\n return df\n\n# # alternate lambda function\n# def sum_field(dataframe, field):\n# return dataframe[field].sum(axis=0)\n\n# list_ser_functions = {**{inst_type+'-umi-sum':(lambda y,inst_type=inst_type: sum_field(y,inst_type))\\\n# for inst_type in ['gex', 'adt', 'hto']},\n# }\n\n# for key,value in list_ser_functions.items():\n# list_ser.append(value(df))\n# df['meta_cell'] = pd.DataFrame(data=list_ser).transpose()\n\n\ndef load_prod_vdj(inst_path):\n inst_df = pd.read_csv(inst_path)\n print('all contigs', inst_df.shape)\n ser_prod = inst_df['productive']\n\n keep_contigs = ser_prod[ser_prod == True].index.tolist()\n inst_df = inst_df.loc[keep_contigs]\n print('productive contigs', inst_df.shape)\n return inst_df\n\ndef concat_contig(ser_row):\n inst_v = str(ser_row['v_gene'])\n inst_d = str(ser_row['d_gene'])\n inst_j = str(ser_row['j_gene'])\n inst_c = str(ser_row['c_gene'])\n inst_cdr3 = str(ser_row['cdr3'])\n\n # do not include c gene in clonotype definition (do not include c_gene)\n inst_contig = inst_v + '_' + inst_d + '_' + inst_j + '_' + inst_cdr3\n\n return inst_contig\n\ndef get_unique_contigs(inst_df):\n '''\n Define contigs as the merge of v, d, j, and cdr3 genes\n Then, find all unique contigs.\n '''\n all_contigs = []\n for inst_index in inst_df.index.tolist():\n ser_row = inst_df.loc[inst_index]\n inst_contig = concat_contig(ser_row)\n all_contigs.append(inst_contig)\n unique_contigs = sorted(list(set(all_contigs)))\n return unique_contigs\n\ndef assign_ids_to_contigs(unique_contigs):\n '''\n Generate a unique contig id for all contigs\n return dictionary of contig-to-id and vice versa\n '''\n contig_id_dict = {}\n id_contig_dict = {}\n for inst_index in range(len(unique_contigs)):\n inst_id = 'contig-id-' + str(inst_index)\n inst_contig = unique_contigs[inst_index]\n contig_id_dict[inst_contig] = inst_id\n id_contig_dict[inst_id] = inst_contig\n\n return contig_id_dict, id_contig_dict\n\ndef get_bc_contig_combos(inst_df, contig_id_dict):\n '''\n Loop through the merged (across samples) filtered contigs\n which has one row per contig\n\n Define the contig (concat vdj genes) and find its unique id\n using contig_id_dict\n\n Assemble list of contigs associated with each barcode (dict\n with barcode keys)\n '''\n bc_contig_combos = {}\n for inst_row in inst_df.index.tolist():\n ser_row = inst_df.loc[inst_row]\n inst_bc = ser_row['barcode']\n inst_contig = concat_contig(ser_row)\n inst_id = contig_id_dict[inst_contig]\n\n if inst_bc not in bc_contig_combos:\n bc_contig_combos[inst_bc] = []\n\n bc_contig_combos[inst_bc].append(inst_id)\n\n return bc_contig_combos\n\ndef generate_new_clonotypes(bc_contig_combos):\n '''\n Define contig combinations as a new set of clones\n Number the new clones (rank by abundance)\n\n Look up contig combo for each barcode (e.g. clone)\n Look up new clone name for contig comb\n '''\n\n # find most abundant contig combos (clones)\n contig_id_combos = []\n for inst_bc in bc_contig_combos:\n inst_combo = '_'.join(sorted(bc_contig_combos[inst_bc]))\n contig_id_combos.append(inst_combo)\n ser_combos = pd.Series(contig_id_combos).value_counts()\n\n # number new clones (contig combos) based on abundance\n inst_id = 1\n combo_clone_dict = {}\n for inst_combo in ser_combos.index.tolist():\n new_clone_name = 'custom-clone-' + str(inst_id)\n combo_clone_dict[inst_combo] = new_clone_name\n inst_id = inst_id + 1\n\n # make dictionary of new clones for each barcode\n cell_new_clone = {}\n for inst_bc in bc_contig_combos:\n inst_combo = '_'.join(sorted(bc_contig_combos[inst_bc]))\n new_clone = combo_clone_dict[inst_combo]\n cell_new_clone[inst_bc] = new_clone\n\n return cell_new_clone\n\ndef add_uniform_noise(df_ini):\n df = deepcopy(df_ini)\n rows = df.index.tolist()\n cols = df.columns.tolist()\n\n # generate random matrix\n np.random.seed(99)\n num_rows = df.shape[0]\n num_cols = df.shape[1]\n mat = np.random.rand(num_rows, num_cols)\n\n # make random noise dataframe centered about zero\n df_noise = pd.DataFrame(data=mat, columns=cols, index=rows).round(2) - 0.5\n\n df_new = df + df_noise\n\n return df_new\n\ndef filter_sparse_matrix_by_list(feat, feature_type='gex', keep_rows='all', keep_cols='all'):\n '''\n This function filters sparse data by lists of rows/cols.\n '''\n\n feat_filt = deepcopy(feat)\n\n # get all cols from any feature\n tmp_feat = list(feat_filt.keys())[0]\n cols = feat_filt[tmp_feat]['barcodes']\n\n # Feature (row) Level Filtering\n #################################\n # apply to single feature\n if isinstance(keep_rows, list):\n\n # get initial feature list\n rows_orig = feat_filt[feature_type]['features']\n\n index_dict = dict((value, idx) for idx,value in enumerate(rows_orig))\n rows_idx = [index_dict[x] for x in keep_rows]\n\n # copy feature data of interest\n inst_mat = deepcopy(feat_filt[feature_type]['mat'])\n inst_mat = inst_mat[rows_idx,:]\n\n # filter rows for single feature\n feat_filt[feature_type]['barcodes'] = cols\n feat_filt[feature_type]['features'] = keep_rows\n feat_filt[feature_type]['mat'] = inst_mat\n\n # Cell (col) Level Filtering\n #################################\n # apply to all features\n if isinstance(keep_cols, list):\n\n index_dict = dict((value, idx) for idx,value in enumerate(cols))\n cols_idx = [index_dict[x] for x in keep_cols]\n\n # filter all features by columns\n for inst_feat in feat:\n\n # get initial feature list\n rows_orig = feat_filt[inst_feat]['features']\n\n inst_mat = deepcopy(feat_filt[inst_feat]['mat'])\n inst_mat = inst_mat[:,cols_idx]\n\n # filter single feature by columns\n feat_filt[inst_feat]['barcodes'] = keep_cols\n feat_filt[inst_feat]['features'] = rows_orig\n feat_filt[inst_feat]['mat'] = inst_mat\n\n return feat_filt\n\ndef preserve_genes_most_variant(input_df, genes_most_variant=500):\n gene_variance = (input_df['gex']['mat'].power(2)).mean(1) - (\n np.power(input_df['gex']['mat'].mean(1), 2))\n gene_variance_sorted = sorted([(index, variance) for index, variance in enumerate(gene_variance)],\n key=(lambda x: x[1]), reverse=True)\n feature_data_gene_variance_filtered = filter_sparse_matrix_by_list(input_df,\n feature_type='gex',\n keep_rows=[input_df['gex'][\n 'features'][\n each_gene_variance[0]] for\n each_gene_variance in\n gene_variance_sorted[\n :genes_most_variant]])\n\n return feature_data_gene_variance_filtered\n\ndef filter_ribo_mito_from_list(all_genes):\n\n # find ribosomal genes\n ribo_rpl = [x for x in all_genes if 'RPL' in x]\n ribo_rps = [x for x in all_genes if 'RPS' in x]\n ribo_genes = ribo_rpl + ribo_rps\n\n\n # Find mitochondrial genes\n list_mito_genes = ['MTRNR2L11', 'MTRF1', 'MTRNR2L12', 'MTRNR2L13', 'MTRF1L', 'MTRNR2L6', 'MTRNR2L7',\n 'MTRNR2L10', 'MTRNR2L8', 'MTRNR2L5', 'MTRNR2L1', 'MTRNR2L3', 'MTRNR2L4']\n\n mito_genes = [x for x in all_genes if 'MT-' == x[:3] or\n x.split('_')[0] in list_mito_genes]\n\n\n # filter genes\n keep_genes = [x for x in all_genes if x not in ribo_genes]\n keep_genes = [x for x in keep_genes if x not in mito_genes]\n\n return keep_genes\n\n\ndef calc_feat_sum_and_unique_count_across_cells(feat_data, inst_feat):\n # barcodes = (feat_data[inst_feat]['barcodes'] if 'barcodes' in feat_data[inst_feat].keys() else feat_data[inst_feat].columns)\n barcodes = feat_data[inst_feat]['barcodes']\n mat = deepcopy(feat_data[inst_feat]['mat'])\n\n # sum umi of measured features\n arr_sum = np.asarray(mat.sum(axis=0))[0]\n ser_sum = pd.Series(arr_sum, index=barcodes, name=inst_feat + '-umi-sum')\n\n # save ash version of umi sum\n ser_sum_ash = np.arcsinh(ser_sum/5)\n ser_sum_ash.name = inst_feat + '-umi-sum-ash'\n\n # count number of measured features\n mat[mat > 1] = 1\n arr_count = np.asarray(mat.sum(axis=0))[0]\n ser_count = pd.Series(arr_count, index=barcodes, name=inst_feat + '-num-unique')\n\n inst_df = pd.concat([ser_sum, ser_sum_ash, ser_count], axis=1)\n\n return inst_df\n\n\ndef sample_meta(df_meta_ini, sample_name):\n list_index = []\n list_data = []\n\n df_meta = deepcopy(df_meta_ini)\n\n # proprtion of singlets\n #########################\n ser_cell_per = df_meta['cell-per-bead'].value_counts()\n\n num_singlets = ser_cell_per.loc['singlet']\n num_total = ser_cell_per.sum()\n\n # number of singlets\n list_index.append('number-singlets')\n list_data.append(num_singlets)\n\n # get singlets only\n df_meta = df_meta[df_meta['cell-per-bead'] == 'singlet']\n\n # proportion of dead cells\n ##############################\n ser_dead = df_meta['dead-cell-mito'].value_counts()\n prop_dead = 1 - ser_dead['live-cell'] / ser_dead.sum()\n\n list_index.append('proportion-dead')\n list_data.append(prop_dead)\n\n # assemble initial metadata series\n ser_meta_ini = pd.Series(list_data, index=list_index)\n\n # Calculate average metadata\n meta_list = ['gex-umi-sum', 'gex-num-unique', 'gex-mito-proportion-umi', 'gex-ribo-avg', 'gex-mito-avg']\n ser_meta_mean = df_meta[meta_list].mean()\n\n ser_meta_mean.index = [x + '-mean' for x in ser_meta_mean.index.tolist()]\n\n ser_meta = pd.concat([ser_meta_ini, ser_meta_mean])\n ser_meta.name = sample_name\n\n return ser_meta\n\ndef merge_lanes(lane_dirs, merge_dir, data_types=['gex', 'adt', 'hto', 'meta_cell'],\n return_df=True):\n\n lane_dirs = sorted(lane_dirs)\n\n if return_df:\n df = {}\n\n for inst_type in data_types:\n\n df_merge = None\n\n print('\\n' + inst_type + '\\n----------------')\n\n # collect data\n for inst_dir in lane_dirs:\n\n inst_lane = inst_dir.split('/')[-1]\n inst_file = inst_dir + '/' + inst_type + '.parquet'\n if os.path.exists(inst_file):\n\n inst_df = pd.read_parquet(inst_file)\n\n # if meta add lane category\n if 'meta' in inst_type:\n ser_lane = pd.Series(data=inst_lane, index=inst_df.index.tolist())\n inst_df['Lane_10x'] = ser_lane\n\n print(inst_lane, inst_df.shape)\n\n # merge on the fly\n if df_merge is None:\n df_merge = deepcopy(inst_df)\n else:\n if 'meta' in inst_type:\n df_merge = pd.concat([df_merge, inst_df], axis=0, sort=True)\n else:\n df_merge = pd.concat([df_merge, inst_df], axis=1)\n print('df_merge', df_merge.shape)\n\n print('merged', inst_type, df_merge.shape)\n\n # sort columns and rows\n cols = sorted(df_merge.columns.tolist())\n rows = sorted(df_merge.index.tolist())\n df_merge = df_merge.loc[rows, cols]\n\n df_merge.to_parquet(merge_dir + '/' + inst_type + '.parquet')\n\n if return_df:\n # save to dictionary\n df[inst_type] = df_merge\n\n if return_df:\n return df\n\ndef load_kb_vel_feature_matrix(inst_path, inst_sample, to_csc=True):\n\n # Load barcodes\n #################\n filename = inst_path + inst_sample + '.barcodes.txt'\n f = open(filename, 'r')\n lines = f.readlines()\n f.close()\n\n barcodes = []\n for inst_bc in lines:\n inst_bc = inst_bc.strip().split('\\t')\n\n barcodes.append(inst_bc[0])\n\n\n # Load genes\n #################\n filename = inst_path + inst_sample + '.genes.txt'\n f = open(filename, 'r')\n lines = f.readlines()\n f.close()\n\n genes = []\n for inst_gene in lines:\n inst_gene = inst_gene.strip().split('\\t')\n\n genes.append(inst_gene[0])\n\n # Load Matrix\n #################\n mat = io.mmread(inst_path + inst_sample +'.mtx').transpose()\n mat = mat.tocsc()\n\n print(len(genes), len(barcodes), mat.shape)\n\n feature_data = {}\n feature_data['gex'] = {}\n feature_data['gex']['mat'] = mat\n feature_data['gex']['features'] = genes\n feature_data['gex']['barcodes'] = barcodes\n\n return feature_data\n\ndef find_suspected_adt_multiplets(adt_name, df, thresh=0.75, num_pos_marker_multiplet=30, find_debris=False,\n x_max=25, y_max=3000):\n '''\n This function identifies susspected ADT multiplets based on the ditribution of the\n number of markers a cell is 'positive' for. Positive is defined as above a certain\n threshold. adt_name defines what adt dataset should be used for this function and\n is usually a z-scored filtered ADT dataset. We can toggle whether we will identify debris.\n '''\n\n df_adt = df[adt_name]\n\n # binarize\n df_bin = deepcopy(df_adt)\n df_bin[df_bin < thresh] = 0\n df_bin[df_bin >= thresh] = 1\n\n sum_bin = df_bin.sum(axis=0)\n\n found_debris = sum_bin[sum_bin == 0].index.tolist()\n found_multi = sum_bin[sum_bin >= num_pos_marker_multiplet].index.tolist()\n drop_cells = found_debris + found_multi\n if find_debris:\n print('debris', len(found_debris))\n print('multiplet', len(found_multi))\n\n drop_dict = {}\n list_cell_per_event = []\n for inst_cell in df_bin.columns.tolist():\n\n if inst_cell in found_multi:\n inst_type = 'multiplet'\n else:\n inst_type = 'singlet'\n\n if find_debris:\n if inst_cell in found_debris:\n inst_type = 'debris'\n\n list_cell_per_event.append(inst_type)\n\n ser_cell_per_bead = pd.Series(list_cell_per_event, index=df_bin.columns.tolist())\n\n\n df['meta_cell']['adt-cell-per-bead'] = ser_cell_per_bead\n df['meta_cell']['adt-num-positive'] = sum_bin\n\n # plot distribution\n keep_adt_positive = one_dim_filter(df['meta_cell'],\n name='adt-num-positive',\n thresh=num_pos_marker_multiplet,\n x_max=x_max, y_max=y_max, keep='below')\n\n return df\n\ndef two_dim_filter(df_data_ini, x_name, y_name, x_thresh=None, y_thresh=None, x_keep='above', y_keep='above', plot_num_cells='all', x_max=None, y_max=None, is_meta=True,\n title=None):\n\n '''\n Working on generalizing function for two dimensional gating from metadata and any other data source.\n '''\n\n if title is None:\n title = ''\n\n\n if is_meta == True:\n df_data = df_data_ini\n else:\n df_data = deepcopy(df_data_ini.transpose())\n\n # auto set x and y thresholds\n if x_max is None:\n if 'ash' in x_name:\n x_max = 10\n else:\n x_max = 100\n\n if y_max is None:\n if 'ash' in y_name:\n y_max = 10\n else:\n y_max = 100\n\n\n # convert to non-ash values for interpretation\n if 'ash' in x_name:\n x_thresh_umi = np.sinh(x_thresh) * 5\n else:\n x_thresh_umi = None\n\n if 'ash' in y_name:\n y_thresh_umi = np.sinh(y_thresh) * 5\n else:\n y_thresh_umi = None\n\n\n # initialize everything to red (what will be dropped, debris)\n ser_colors = pd.Series('red', index=df_data.index.tolist())\n\n color_list = []\n ser_x = df_data[x_name]\n if x_keep == 'above':\n x_pass = ser_x[ser_x >= x_thresh].index.tolist()\n elif x_keep == 'below':\n x_pass = ser_x[ser_x < x_thresh].index.tolist()\n\n ser_y = df_data[y_name]\n if y_keep == 'above':\n y_pass = ser_y[ser_y >= y_thresh].index.tolist()\n elif y_keep == 'below':\n y_pass = ser_y[ser_y < y_thresh].index.tolist()\n\n if 'ash' in x_name:\n print(x_name, ' thresh ash: ', x_thresh, ' UMI: ', x_thresh_umi.round(0))\n else:\n print(x_name, ' thresh: ', x_thresh)\n\n if 'ash' in y_name:\n print(y_name, ' thresh ash: ', y_thresh, ' UMI: ', y_thresh_umi.round(0))\n else:\n print(y_name, ' thresh: ', y_thresh)\n\n\n print(x_name, ' keep: ', len(x_pass))\n print(y_name, ' keep: ', len(y_pass))\n\n keep_barcodes = list(set(x_pass + y_pass))\n print('keep_barcodes: ', len(keep_barcodes))\n\n ser_colors[keep_barcodes] = 'blue'\n\n # only include top 100K cells\n if plot_num_cells == 'all':\n plot_cells = df_data.index.tolist()\n else:\n plot_cells = df_data[x_name].sort_values(ascending=False).index.tolist()[:int(plot_num_cells)]\n\n ser_colors = ser_colors[plot_cells]\n\n color_list = ser_colors.get_values()\n\n df_data.loc[plot_cells].plot(kind='scatter', x=x_name, y=y_name,\n s=1, alpha=0.5, figsize=(10,10),\n ylim=(0,y_max), xlim=(0,x_max), c=color_list, title=title)\n\n drop_barcodes = sorted(list(set(df_data.index.tolist()).difference(keep_barcodes)))\n\n return keep_barcodes, drop_barcodes\n\ndef make_ct_list(df_hto, meta_hto):\n '''assign cells to debris/singlet/multiplet based on threshold only (hto-ash-scale)'''\n\n ser_list = []\n\n for inst_row in df_hto.index.tolist():\n\n # get data for a HTO\n inst_ser = deepcopy(df_hto.loc[inst_row])\n\n # load threshold level for this HTO\n inst_thresh = meta_hto.loc[inst_row, 'hto-threshold-ash-scale']\n\n # binarize HTO values about threshold\n inst_ser[inst_ser < inst_thresh] = 0\n inst_ser[inst_ser >= inst_thresh] = 1\n\n # assemble list of series to make dataframe later\n ser_list.append(inst_ser)\n\n # find cells that are positive for this HTO\n pos_hto = inst_ser[inst_ser==1].index.tolist()\n\n # generate binarized dataframe\n df_binary = pd.concat(ser_list, axis=1).transpose()\n\n # find singlets\n ser_sum = df_binary.sum(axis=0)\n ct_list = {}\n ct_list['debris'] = ser_sum[ser_sum == 0].index.tolist()\n ct_list['singlet'] = ser_sum[ser_sum == 1].index.tolist()\n ct_list['multiplet'] = ser_sum[ser_sum > 1].index.tolist()\n\n return ct_list\n\ndef calc_s2n_and_s2t(df_hto, meta_hto, meta_cell, inf_replace):\n\n # signal-to-noise refers to the highest vs second highest HTO\n #\n # signal-to-threshold refers to the highest HTO vs the\n # manually set threshold for that HTO\n\n # do not use ash data, calc signal to noise on HTO UMI data\n # find the highest hto\n ser_max_hto = df_hto.idxmax(axis=0)\n meta_cell['hto-max-name'] = ser_max_hto\n\n list_first = []\n list_second = []\n list_thresh = []\n list_cells = []\n\n for inst_cell in df_hto.columns.tolist():\n inst_ser = df_hto[inst_cell].sort_values(ascending=False)\n inst_first = inst_ser.array[0]\n inst_second = inst_ser.array[1]\n\n\n inst_max_hto_name = ser_max_hto[inst_cell]\n inst_max_hto_thresh = meta_hto['hto-threshold-ash-scale'][inst_max_hto_name]\n\n list_first.append(inst_first)\n list_second.append(inst_second)\n list_thresh.append(inst_max_hto_thresh)\n list_cells.append(inst_cell)\n\n ser_first = pd.Series(data=list_first, index=list_cells, name='first highest HTO')\n ser_second = pd.Series(data=list_second, index=list_cells, name='second highest HTO')\n ser_thresh = pd.Series(data=list_thresh, index=list_cells, name='threshold HTO')\n\n # df_comp = pd.concat([ser_first, ser_second, ser_thresh], axis=1).transpose()\n\n meta_cell['hto-max-umi-ash-scale'] = ser_first\n\n # calc signal-to-noise\n ###########################\n # sn_ratio = df_comp.loc['first highest HTO']/df_comp.loc['second highest HTO']\n sn_ratio = ser_first/ser_second\n meta_cell['hto-sn'] = sn_ratio\n # replace infinities with large number\n sn_ratio = sn_ratio.replace(np.Inf, inf_replace)\n\n # calc signal-to-threshold\n ###########################\n # sn_ratio = df_comp.loc['first highest HTO']/df_comp.loc['threshold HTO']\n st_ratio = ser_first/ser_thresh\n meta_cell['hto-st'] = st_ratio\n # replace infinities with large number\n st_ratio = st_ratio.replace(np.Inf, inf_replace)\n\n return meta_cell\n\ndef assign_htos(df_hto, meta_hto, meta_cell, sn_thresh, inf_replace=1000, perform_sn_adjustment=True):\n\n # assign cells to debris/singlet/multiplet based on threshold only (ash normalized)\n ######################################################################################\n ct_list = make_ct_list(df_hto, meta_hto)\n\n print('De-hash distributions: HTO Threshold Only')\n print('-----------------------------------------')\n print('debris', len(ct_list['debris']))\n print('singlet', len(ct_list['singlet']))\n print('multiplet', len(ct_list['multiplet']))\n\n # initialize dehash-thresh: debris/singlet/multiplet based on ct_list\n if 'dehash-thresh' not in meta_cell.columns.tolist():\n ser_type = pd.Series(np.nan, index=meta_cell.index)\n meta_cell['dehash-thresh'] = ser_type\n\n # save dehash-thresh\n ######################\n for inst_type in ct_list:\n meta_cell.loc[ct_list[inst_type], 'dehash-thresh'] = inst_type\n\n # Calc signal-to-noise and signal-to-threshold ratios\n ######################################################\n meta_cell = calc_s2n_and_s2t(df_hto, meta_hto, meta_cell, inf_replace)\n\n # Assign Cells to Samples based on Threshold Alone\n ####################################################\n list_samples = []\n for inst_cell in meta_cell.index.tolist():\n\n inst_type = meta_cell.loc[inst_cell, 'dehash-thresh']\n\n if inst_type == 'singlet':\n inst_hto = meta_cell.loc[inst_cell, 'hto-max-name']\n inst_sample = meta_hto.loc[inst_hto]['Associated Sample']\n else:\n inst_sample = 'N.A.'\n\n list_samples.append(inst_sample)\n\n ser_sample = pd.Series(list_samples, index=meta_cell.index.tolist())\n meta_cell['Sample-thresh'] = ser_sample\n\n if perform_sn_adjustment:\n # Assign Cells to Samples Based on Signal to Noise Adjustment\n cells = meta_cell.index.tolist()\n for inst_cell in cells:\n inst_type = meta_cell.loc[inst_cell, 'dehash-thresh']\n inst_sn = meta_cell.loc[inst_cell, 'hto-sn']\n inst_max_hto = meta_cell.loc[inst_cell, 'hto-max-name']\n\n # change singlet to multiplet if low sn\n if inst_type == 'singlet':\n if inst_sn < sn_thresh['singlets']:\n # convert to multiplet\n meta_cell.loc[inst_cell, 'dehash-thresh-sn'] = 'multiplet'\n meta_cell.loc[inst_cell, 'Sample-thresh-sn'] = 'N.A.'\n else:\n meta_cell.loc[inst_cell, 'dehash-thresh-sn'] = 'singlet'\n meta_cell.loc[inst_cell, 'Sample-thresh-sn'] = meta_hto.loc[inst_max_hto]['Associated Sample']\n elif inst_type == 'debris':\n if inst_sn >= sn_thresh['debris']:\n meta_cell.loc[inst_cell, 'dehash-thresh-sn'] = 'singlet'\n meta_cell.loc[inst_cell, 'Sample-thresh-sn'] = meta_hto.loc[inst_max_hto]['Associated Sample']\n else:\n meta_cell.loc[inst_cell, 'dehash-thresh-sn'] = 'debris'\n meta_cell.loc[inst_cell, 'Sample-thresh-sn'] = 'N.A.'\n elif inst_type == 'multiplet':\n if inst_sn >= sn_thresh['multiplets']:\n meta_cell.loc[inst_cell, 'dehash-thresh-sn'] = 'singlet'\n meta_cell.loc[inst_cell, 'Sample-thresh-sn'] = meta_hto.loc[inst_max_hto]['Associated Sample']\n else:\n meta_cell.loc[inst_cell, 'dehash-thresh-sn'] = 'multiplet'\n meta_cell.loc[inst_cell, 'Sample-thresh-sn'] = 'N.A.'\n\n\n ser_counts = meta_cell['dehash-thresh-sn'].value_counts()\n\n print('De-hash distributions: HTO Threshold and SN')\n print('--------------------------------------------')\n print('debris', ser_counts['debris'])\n print('singlet', ser_counts['singlet'])\n print('multiplet', ser_counts['multiplet'])\n\n return meta_cell\n\ndef plot_hto_sn_vs_gex_umi(df):\n\n if 'hto-sn-ash' not in df['meta_cell']:\n df['meta_cell']['hto-sn-ash'] = np.arcsinh(df['meta_cell']['hto-sn']/5)\n\n color_dict = {\n \"singlet\":'blue',\n \"debris\":'red',\n \"multiplet\":'yellow',\n }\n\n list_dehash = list(df['meta_cell']['cell-per-bead'].get_values())\n color_list = [color_dict[x] for x in list_dehash]\n\n df['meta_cell'].plot(kind='scatter',\n x='gex-umi-sum-ash',\n y='hto-sn-ash', alpha=0.25, s=10, figsize=(10,10), c=color_list, ylim=(0,5))\n\ndef load_s3_parquet(bucket_path, filename, cols=None):\n\n import s3fs\n import pyarrow.parquet as pq\n fs = s3fs.S3FileSystem()\n\n if cols == None:\n df = pq.read_table(bucket_path + filename, use_pandas_metadata=True, filesystem=fs).to_pandas()\n else:\n df = pq.read_table(bucket_path + filename, use_pandas_metadata=True, filesystem=fs, columns=cols).to_pandas()\n\n return df\n\ndef add_filter(cat_title, inst_cat_search, cat_filter_list=None):\n '''\n Add a single filtering step; e.g. filter for barcodes with\n this category of this category type.\n\n To do: support exclusion of category by adding third element to tuple.\n '''\n if cat_filter_list == None:\n cat_filter_list = []\n\n if type(inst_cat_search) is list:\n inst_cat_search = tuple(inst_cat_search)\n\n cat_filter_list.append((cat_title, inst_cat_search))\n return cat_filter_list\n\ndef filter_meta_using_cat_filter_list(df_meta_ini, cat_filter_list):\n df_meta = deepcopy(df_meta_ini)\n\n for cat_tuple in cat_filter_list:\n inst_cat_type = cat_tuple[0]\n inst_cat_search = cat_tuple[1]\n\n if type(inst_cat_search) is not tuple:\n # find indexes of barcodes that match requested caetgory\n inst_cat = inst_cat_search\n cat_ser = df_meta[inst_cat_type]\n found_barcodes = cat_ser[cat_ser == inst_cat].index.tolist()\n else:\n # find indexes of barcodes that match requested categories\n found_barcodes = []\n for inst_cat in inst_cat_search:\n cat_ser = df_meta[inst_cat_type]\n inst_found = cat_ser[cat_ser == inst_cat].index.tolist()\n found_barcodes.extend(inst_found)\n\n # apply progressive filters to metadata\n df_meta = df_meta.loc[found_barcodes]\n\n return df_meta\n\ndef set_gex_debris_thresh(meta_cell, xlim=7, ylim=100, thresh=1):\n\n ser_gex_ash = meta_cell['gex-umi-sum-ash']\n\n n, bins, patches = plt.hist(ser_gex_ash, bins=100, range=(0, xlim))\n\n colors = []\n for inst_bin in bins:\n if inst_bin <= thresh:\n colors.append('red')\n else:\n colors.append('blue')\n\n # apply the same color for each class to match the map\n for patch, color in zip(patches, colors):\n patch.set_facecolor(color)\n\n\n plt.ylim((0,ylim))\n\n keep_barcodes = ser_gex_ash[ser_gex_ash >= thresh].index.tolist()\n\n print('gex-ash-umi thresh', thresh, '; gex-umi thresh', np.sinh(thresh) * 5)\n print('keeping', len(keep_barcodes), 'cells')\n\n return keep_barcodes\n\ndef one_dim_filter(meta_cell, name, thresh, keep, x_max=None, y_max=1500):\n\n if x_max is None:\n if 'ash' in name:\n x_max = 10\n else:\n x_max = 1000\n\n\n ser_data = meta_cell[name]\n\n n, bins, patches = plt.hist(ser_data, bins=100, range=(0, x_max))\n\n colors = []\n for inst_bin in bins:\n if keep == 'above':\n if inst_bin <= thresh:\n colors.append('red')\n else:\n colors.append('blue')\n else:\n if inst_bin > thresh:\n colors.append('red')\n else:\n colors.append('blue')\n\n # apply the same color for each class to match the map\n for patch, color in zip(patches, colors):\n patch.set_facecolor(color)\n\n plt.ylim((0,y_max))\n\n if keep == 'above':\n keep_barcodes = ser_data[ser_data >= thresh].index.tolist()\n elif keep == 'below':\n keep_barcodes = ser_data[ser_data < thresh].index.tolist()\n\n\n if 'ash' in name:\n\n print(name, thresh, '; gex-umi thresh', np.sinh(thresh) * 5)\n else:\n print(name, thresh)\n\n print('keeping', len(keep_barcodes), 'cells')\n\n drop_barcodes = sorted(list(set(meta_cell.index.tolist()).difference(keep_barcodes)))\n\n if len(drop_barcodes) > 0:\n print('dropping', len(drop_barcodes), 'cells')\n else:\n print('dropping no cells')\n\n return keep_barcodes, drop_barcodes\n\ndef sort_all_dataframes(df):\n for inst_type in df:\n print('sorting', inst_type)\n inst_df = df[inst_type]\n # sort columns and rows\n cols = sorted(inst_df.columns.tolist())\n rows = sorted(inst_df.index.tolist())\n inst_df = inst_df.loc[rows, cols]\n\n return df\n\n\n\n\ndef calc_meta_features_from_sp(sp, df):\n\n # initialize standard scaler that can calc mean/std from sparse csr/csc matrix\n scaler = StandardScaler(with_mean=False)\n\n for inst_type in ['filtered', 'debris']:\n\n for inst_feat in sp[inst_type].keys():\n\n # get row names\n rows = sp[inst_type][inst_feat]['features']\n\n # number of cells measured\n #################################\n inst_name = inst_feat + '-' + inst_type + '-num-meas'\n mat_bin = deepcopy(sp[inst_type][inst_feat]['mat'])\n mat_bin[mat_bin >= 1] = 1\n mat_sum = mat_bin.sum(axis=1)\n arr_sum = np.asarray(mat_sum[:,0])\n list_sum = [x[0] for x in arr_sum.tolist()]\n ser_num_meas = pd.Series(list_sum, index=rows)\n df['meta_' + inst_feat][inst_name] = ser_num_meas\n\n # Initialize mean and std calc\n mat = sp[inst_type][inst_feat]['mat'].transpose().tocsr()\n scaler.fit(mat)\n\n # mean\n ######################\n inst_name = inst_feat + '-' + inst_type + '-mean'\n ser_mean = pd.Series(scaler.mean_, index=rows)\n df['meta_' + inst_feat][inst_name] = ser_mean\n\n # std\n ######################\n inst_name = inst_feat + '-' + inst_type + '-std'\n ser_std = pd.Series(np.sqrt(scaler.var_), index=rows)\n df['meta_' + inst_feat][inst_name] = ser_std\n\n return df\n\ndef make_df_from_cols(cols):\n inst_col = cols[0]\n\n cat_titles = []\n for inst_info in inst_col[1:]:\n inst_title = inst_info.split(': ')[0]\n cat_titles.append(inst_title)\n\n clean_cols = []\n for inst_col in cols:\n inst_clean = []\n for inst_info in inst_col:\n if ': ' in inst_info:\n inst_clean.append(inst_info.split(': ')[1])\n else:\n inst_clean.append(inst_info)\n clean_cols.append(tuple(inst_clean))\n\n df_ini = pd.DataFrame(data=clean_cols).set_index(0)\n mat = df_ini.values\n rows = df_ini.index.tolist()\n\n df_meta = pd.DataFrame(data=mat, index=rows, columns=cat_titles)\n\n return df_meta\n\ndef subsample_cats(df, cat_index, num_samples, sample_type='subsample', random_state=99):\n\n cols = df.columns.tolist()\n cats = sorted(list(set([x[cat_index] for x in cols])))\n\n all_ds_info = []\n ser_ds_info = None\n orig_cols = []\n\n if sample_type == 'subsample':\n keep_cols = []\n for inst_cat in cats:\n keep_cat_cols = [x for x in cols if x[cat_index] == inst_cat]\n\n if len(keep_cat_cols) > num_samples:\n keep_cat_cols = random.sample(keep_cat_cols, num_samples)\n\n keep_cols.extend(keep_cat_cols)\n\n df_sample = df[keep_cols]\n\n elif sample_type == 'downsample':\n\n df_list = []\n\n for inst_cat in cats:\n keep_cat_cols = [x for x in cols if x[cat_index] == inst_cat]\n df_cat = df[keep_cat_cols]\n orig_cols.extend(df_cat.columns.tolist())\n\n if df_cat.shape[1] > num_samples:\n net.load_df(df_cat)\n ds_info = net.downsample(axis='col', ds_type='kmeans', num_samples=num_samples, random_state=random_state)\n df_ds = net.export_df()\n\n else:\n net.load_df(df_cat)\n ds_info = net.downsample(axis='col', ds_type='kmeans', num_samples=df_cat.shape[1], random_state=random_state)\n df_ds = net.export_df()\n\n ds_info = [ inst_cat.split(': ')[1] + '_cluster-' + str(x) for x in list(ds_info)]\n all_ds_info.extend(ds_info)\n\n # clean df_ds columns\n new_cols = []\n for inst_col in df_ds.columns.tolist():\n new_col = list(inst_col)\n new_col[0] = inst_cat.split(': ')[1] + '_' + new_col[0].split(': ')[1]\n new_cols.append(tuple(new_col))\n df_ds.columns = new_cols\n\n df_list.append(df_ds)\n\n # concatenate df_sample\n df_sample = pd.concat(df_list, axis=1)\n\n # drop majority from column name\n df_sample.columns = [tuple(s.replace('Majority-','') for s in tup) for tup in df_sample.columns.tolist()]\n\n ser_ds_info = pd.Series(data=all_ds_info, index=orig_cols)\n\n\n elif sample_type == 'mean':\n\n df_list = []\n for inst_cat in cats:\n keep_cat_cols = [x for x in cols if x[cat_index] == inst_cat]\n df_cat = df[keep_cat_cols]\n\n ser_mean = df_cat.mean(axis=1)\n ser_mean.name = inst_cat.split(': ')[1]\n\n df_list.append(ser_mean)\n df_sample = pd.concat(df_list, axis=1)\n\n\n return df_sample, ser_ds_info" ]
[ [ "pandas.read_parquet", "numpy.random.rand", "numpy.asarray", "sklearn.preprocessing.StandardScaler", "numpy.random.seed", "pandas.DataFrame", "matplotlib.pyplot.ylim", "scipy.io.mmread", "matplotlib.pyplot.subplots", "matplotlib.pyplot.hist", "numpy.sinh", "numpy.sqrt", "pandas.concat", "pandas.read_csv", "pandas.Series", "numpy.arcsinh", "numpy.log2" ] ]
edinburgh-university-OOSA/env_geog
[ "7e442a4ac26e67515ebd48160f99ae97bd61be61" ]
[ "prep_data/week1/makeGround.py" ]
[ "'''\nScript to generate ground data\nProduces field tree data for\nuse in week 1's exercise\n'''\n\n\n#########################################\n\nimport argparse\nimport numpy as np\nfrom math import exp\n\n#########################################\n\nif __name__==\"__main__\":\n def readCommands():\n '''\n Get commandline arguments\n '''\n p = argparse.ArgumentParser(description=(\"An illustration of a command line parser\"))\n p.add_argument(\"--output\",dest=\"outName\",type=str,default='test.csv',help=(\"Output filename\\nDefault=test.csv\"))\n p.add_argument(\"--nPlots\", dest =\"nPlots\", type=int, default=1, help=(\"Number of plots\\nDefault = 1\"))\n p.add_argument(\"--meanMass\", dest =\"meanB\", type=float, default=150, help=(\"Mean biomass in Mg/ha\\nDefault = 150 Mg/ha\"))\n p.add_argument(\"--plotSize\", dest =\"pSize\", type=float, default=20, help=(\"Plot side length in metres\\nDefault = 20 m\"))\n cmdargs = p.parse_args()\n return cmdargs\n\n\n\n#########################################\n\ndef muukonen3(dbh,beta0,beta1,beta2):\n '''Biomass allometric equation 3 from Muukonen et al'''\n\n biomass=exp(beta0+beta1*dbh/(dbh+beta2))\n\n return(biomass)\n\n#########################################\n\nclass plotData():\n '''Class to hold data for 1 plot'''\n\n def __init__(self,biomass,dataStruct):\n '''Class initialiser'''\n\n # probality of alive\n probAlive=0.95\n\n # allocate arrays\n self.sp=[]\n self.dbh=np.array((),dtype=float)\n self.biomass=np.array((),dtype=float)\n self.alive=[]\n\n # loop over adding trees until full\n self.nTrees=0\n totB=0.0\n while(totB<biomass):\n # pick species\n spInd=np.random.randint(size=1,low=0,high=dataStruct.nSp)[0]\n\n # set properties\n self.dbh=np.append(self.dbh,np.random.poisson(lam=dataStruct.meanDBH[spInd]))\n self.sp.append(dataStruct.spList[spInd])\n self.biomass=np.append(self.biomass,muukonen3(self.dbh[self.nTrees],dataStruct.beta0[spInd],dataStruct.beta1[spInd],dataStruct.beta2[spInd]))\n if(np.random.random()<=probAlive):\n self.alive.append(\"alive\")\n else:\n self.alive.append(\"dead\")\n\n # add up living biomass\n if(self.alive[self.nTrees]==\"alive\"):\n totB+=self.biomass[self.nTrees]\n\n self.nTrees+=1\n\n print(\"nTrees\",self.nTrees,\"biomass\",biomass/(1000*20**2)*100**2)\n\n return\n\n\n#########################################\n\nclass generateData():\n '''Class to hold generated plot data'''\n\n ###########################\n\n def __init__(self,biomasses):\n '''Class initialiser'''\n\n # make a species list\n self.makeSpecies()\n\n # allocate space\n self.nPlots=biomasses.shape[0]\n self.plots=np.empty((self.nPlots),plotData)\n\n # loop over plots and populate. Biomass is in kg/ha\n for i in range(0,self.nPlots):\n self.plots[i]=plotData(biomasses[i],self)\n\n return\n\n\n ###########################\n\n def makeSpecies(self,spList=[\"PA\",\"PS\",\"QR\",\"FS\"]):\n '''Make a list of species'''\n self.nSp=4\n\n # species list\n self.spList=spList\n\n # is conifer or not\n self.isConifer=np.empty((self.nSp),dtype=bool)\n self.isConifer[0]=True # Picea abies. Norway spruce\n self.isConifer[1]=True # Pinus sylvestrus. Scots pine\n self.isConifer[2]=False # Quercus robur. English Oak\n self.isConifer[3]=False # Fagus sylvatica. Beech\n\n # mean DBH, in cm\n self.meanDBH=np.empty((self.nSp),dtype=float)\n self.meanDBH[0]=15\n self.meanDBH[1]=25\n self.meanDBH[2]=30\n self.meanDBH[3]=18\n\n # allometric parameters\n # Muukkonen, eg 2. b=B0.DBH**B1\n # Muukkonen, eg 3. b=exp(B0+B1*dbh/(dbh+B2))\n self.beta0=np.empty((self.nSp),dtype=float)\n self.beta1=np.empty((self.nSp),dtype=float)\n self.beta2=np.empty((self.nSp),dtype=float)\n self.beta0[0]=-1.694 # Picea abies. Norway spruce\n self.beta1[0]=10.825\n self.beta2[0]=11.816\n self.beta0[1]=-2.688 # Pinus sylvestrus. Scots pine\n self.beta1[1]=10.745\n self.beta2[1]=8.062\n self.beta0[2]=-0.604 # Quercus robur. English Oak\n self.beta1[2]=10.677\n self.beta2[2]=15.9\n self.beta0[3]=-0.006 # Fagus sylvatica. Beech\n self.beta1[3]=10.933\n self.beta2[3]=21.216\n\n return\n\n\n ###########################\n\n def writeCSV(self,outName):\n '''Write plot data to a csv'''\n\n # open\n f=open(outName,'w')\n line=\"plot,treeN,species,dbh,state\\n\"\n f.write(line)\n\n # loop over plots\n for i in range(0,self.nPlots):\n # loop over trees\n for j in range(0,self.plots[i].nTrees):\n line=str(i+1)+\",\"+str(j)+\",\"+str(self.plots[i].sp[j])+\",\"+\\\n str(self.plots[i].dbh[j])+\",\"+str(self.plots[i].alive[j])+\"\\n\" \n f.write(line)\n\n f.close()\n print(\"Written to\",outName)\n return\n\n\n#########################################\n\nif __name__ == '__main__':\n '''Main block'''\n\n # read command line\n cmd=readCommands()\n\n # set biomass values\n biomasses=np.random.uniform(low=0.0,high=350000*cmd.pSize**2/(100**2),size=cmd.nPlots)\n\n # generate plot data\n data=generateData(biomasses)\n\n # write data\n data.writeCSV(cmd.outName)\n\n\n#########################################\n\n" ]
[ [ "numpy.array", "numpy.empty", "numpy.random.poisson", "numpy.random.uniform", "numpy.random.randint", "numpy.random.random" ] ]
kayzhu/keras-tuner
[ "32240940cd5814a905aadf8e646497649cbbb046" ]
[ "keras_tuner/applications/augment.py" ]
[ "# Copyright 2020 Google LLC. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\nimport tensorflow.keras as keras\nfrom tensorflow.keras import layers\n\ntry:\n from tensorflow.keras.layers.experimental import ( # isort:skip\n preprocessing,\n ) # pytype: disable=import-error\nexcept ImportError:\n preprocessing = None\n\nfrom keras_tuner.engine import hypermodel\n\n# dict of functions that create layers for transforms.\n# Each function takes a factor (0 to 1) for the strength\n# of the transform.\nif preprocessing is not None:\n TRANSFORMS = {\n \"translate_x\": lambda x: preprocessing.RandomTranslation(x, 0),\n \"translate_y\": lambda y: preprocessing.RandomTranslation(0, y),\n \"rotate\": preprocessing.RandomRotation,\n \"contrast\": preprocessing.RandomContrast,\n }\n\n\nclass HyperImageAugment(hypermodel.HyperModel):\n \"\"\"A image augmentation hypermodel.\n\n The `HyperImageAugment` class searches for the best combination of image\n augmentation operations in Keras preprocessing layers. The input shape of\n the model should be (height, width, channels). The output of the model is\n of the same shape as the input.\n\n Args:\n input_shape: Optional shape tuple, e.g. `(256, 256, 3)`.\n input_tensor: Optional Keras tensor (i.e. output of `layers.Input()`)\n to use as image input for the model.\n rotate: A number between [0, 1], a list of two numbers between [0, 1]\n or None. Configures the search space of the factor of random\n rotation transform in the augmentation. A factor is chosen for each\n trial. It sets maximum of clockwise and counterclockwise rotation\n in terms of fraction of pi, among all samples in the trial.\n Default is 0.5. When `rotate` is a single number, the search range is\n [0, `rotate`].\n The transform is off when set to None.\n translate_x: A number between [0, 1], a list of two numbers between [0, 1]\n or None. Configures the search space of the factor of random\n horizontal translation transform in the augmentation. A factor is\n chosen for each trial. It sets maximum of horizontal translation in\n terms of ratio over the width among all samples in the trial.\n Default is 0.4. When `translate_x` is a single number, the search range\n is [0, `translate_x`].\n The transform is off when set to None.\n translate_y: A number between [0, 1], a list of two numbers between [0, 1]\n or None. Configures the search space of the factor of random vertical\n translation transform in the augmentation. A factor is chosen for each\n trial. It sets maximum of vertical translation in terms of ratio over\n the height among all samples in the trial. Default is 0.4. When\n `translate_y` is a single number ,the search range is [0, `translate_y`].\n The transform is off when set to None.\n contrast: A number between [0, 1], a list of two numbers between [0, 1]\n or None. Configures the search space of the factor of random contrast\n transform in the augmentation. A factor is chosen for each trial. It\n sets maximum ratio of contrast change among all samples in the trial.\n Default is 0.3. When `contrast` is a single number, the search rnage is\n [0, `contrast`].\n The transform is off when set to None.\n augment_layers: None, int or list of two ints, controlling the number\n of augment applied. Default is 3.\n When `augment_layers` is 0, all transform are applied sequentially.\n When `augment_layers` is nonzero, or a list of two ints, a simple\n version of RandAugment(https://arxiv.org/abs/1909.13719) is used.\n A search space for 'augment_layers' is created to search [0,\n `augment_layers`], or between the two ints if a `augment_layers` is\n a list. For each trial, the hyperparameter 'augment_layers'\n determines number of layers of augment transforms are applied,\n each randomly picked from all available transform types with equal\n probability on each sample.\n **kwargs: Additional keyword arguments that apply to all hypermodels.\n See `keras_tuner.HyperModel`.\n\n Example:\n\n ```python\n hm_aug = HyperImageAugment(input_shape=(32, 32, 3),\n augment_layers=0,\n rotate=[0.2, 0.3],\n translate_x=0.1,\n translate_y=None,\n contrast=None)\n ```\n\n Then the hypermodel `hm_aug` will search 'factor_rotate' between [0.2, 0.3]\n and 'factor_translate_x' between [0, 0.1]. These two augments are applied\n on all samples with factor picked per each trial.\n\n ```python\n hm_aug = HyperImageAugment(input_shape=(32, 32, 3),\n translate_x=0.5,\n translate_y=[0.2, 0.4]\n contrast=None)\n ```\n\n Then the hypermodel `hm_aug` will search 'factor_rotate' between [0, 0.2],\n 'factor_translate_x' between [0, 0.5], 'factor_translate_y' between\n [0.2, 0.4]. It will use RandAugment, searching 'augment_layers'\n between [0, 3]. Each layer on each sample will be chosen from rotate,\n translate_x and translate_y.\n \"\"\"\n\n def __init__(\n self,\n input_shape=None,\n input_tensor=None,\n rotate=0.5,\n translate_x=0.4,\n translate_y=0.4,\n contrast=0.3,\n augment_layers=3,\n **kwargs,\n ):\n if preprocessing is None:\n raise ImportError(\n \"HyperImageAugment requires tensorflow>=2.3.0, \"\n f\"but the current version is {tf.__version__}.\"\n )\n\n if input_shape is None and input_tensor is None:\n raise ValueError(\n \"You must specify either `input_shape` or `input_tensor`.\"\n )\n\n self.transforms = []\n self._register_transform(\"rotate\", rotate)\n self._register_transform(\"translate_x\", translate_x)\n self._register_transform(\"translate_y\", translate_y)\n self._register_transform(\"contrast\", contrast)\n\n self.input_shape = input_shape\n self.input_tensor = input_tensor\n\n if augment_layers:\n self.model_name = \"image_rand_augment\"\n try:\n augment_layers_min = augment_layers[0]\n augment_layers_max = augment_layers[1]\n except TypeError:\n augment_layers_min = 0\n augment_layers_max = augment_layers\n if not (\n isinstance(augment_layers_min, int)\n and isinstance(augment_layers_max, int)\n ):\n raise ValueError(\n \"Keyword argument `augment_layers` must be int,\"\n \"but received {}. \".format(augment_layers)\n )\n\n self.augment_layers_min = augment_layers_min\n self.augment_layers_max = augment_layers_max\n else:\n # Separatedly tune and apply all augment transforms if\n # `randaug_count` is set to 0.\n self.model_name = \"image_augment\"\n\n super(HyperImageAugment, self).__init__(**kwargs)\n\n def build(self, hp):\n if self.input_tensor is not None:\n inputs = keras.utils.get_source_inputs(self.input_tensor)\n x = self.input_tensor\n else:\n inputs = layers.Input(shape=self.input_shape)\n x = inputs\n\n if self.model_name == \"image_rand_augment\":\n x = self._build_randaug_layers(x, hp)\n else:\n x = self._build_fixedaug_layers(x, hp)\n\n model = keras.Model(inputs, x, name=self.model_name)\n return model\n\n def _build_randaug_layers(self, inputs, hp):\n augment_layers = hp.Int(\n \"augment_layers\",\n self.augment_layers_min,\n self.augment_layers_max,\n default=self.augment_layers_min,\n )\n x = inputs\n for _ in range(augment_layers):\n # selection tensor determines operation for each sample.\n batch_size = tf.shape(x)[0]\n selection = tf.random.uniform(\n [batch_size, 1, 1, 1], maxval=len(self.transforms), dtype=\"int32\"\n )\n\n for i, (transform, (f_min, f_max)) in enumerate(self.transforms):\n # Factor for each transform is determined per each trial.\n factor = hp.Float(f\"factor_{transform}\", f_min, f_max, default=f_min)\n if factor == 0:\n continue\n transform_layer = TRANSFORMS[transform](factor)\n x_trans = transform_layer(x)\n\n # For each sample, apply the transform if and only if\n # selection matches the transform index `i`\n x = tf.where(tf.equal(i, selection), x_trans, x)\n return x\n\n def _build_fixedaug_layers(self, inputs, hp):\n x = inputs\n for transform, (factor_min, factor_max) in self.transforms:\n transform_factor = hp.Float(\n f\"factor_{transform}\",\n factor_min,\n factor_max,\n step=0.05,\n default=factor_min,\n )\n if transform_factor == 0:\n continue\n transform_layer = TRANSFORMS[transform](transform_factor)\n x = transform_layer(x)\n return x\n\n def _register_transform(self, transform_name, transform_params):\n \"\"\"Register a transform and format parameters for tuning the transform.\n\n Args:\n transform_name: A string, the name of the transform.\n trnasform_params: A number between [0, 1], a list of two numbers\n between [0, 1] or None. If set to a single number x, the\n corresponding transform factor will be between [0, x].\n If set to a list of 2 numbers [x, y], the factor will be\n between [x, y]. If set to None, the transform will be excluded.\n \"\"\"\n if not transform_params:\n return\n\n try:\n transform_factor_min = transform_params[0]\n transform_factor_max = transform_params[1]\n if len(transform_params) > 2:\n raise ValueError(\n \"Length of keyword argument {} must not exceed 2.\".format(\n transform_name\n )\n )\n except TypeError:\n transform_factor_min = 0\n transform_factor_max = transform_params\n\n if not (\n isinstance(transform_factor_max, (int, float))\n and isinstance(transform_factor_min, (int, float))\n ):\n raise ValueError(\n \"Keyword argument {} must be int or float, \"\n \"but received {}. \".format(transform_name, transform_params)\n )\n\n self.transforms.append(\n (transform_name, (transform_factor_min, transform_factor_max))\n )\n" ]
[ [ "tensorflow.shape", "tensorflow.keras.layers.Input", "tensorflow.equal", "tensorflow.keras.layers.experimental.preprocessing.RandomTranslation", "tensorflow.keras.Model", "tensorflow.keras.utils.get_source_inputs" ] ]
HarshCasper/FinMind
[ "7b7571e443525edcd52c7f53e7fb0daca42b1f60" ]
[ "tests/BackTestSystem/test_utils.py" ]
[ "import datetime\n\nimport pandas as pd\nimport pytest\n\nfrom FinMind.BackTestSystem.utils import (\n get_asset_underlying_type,\n get_underlying_trading_tax,\n calculate_Datenbr,\n calculate_sharp_ratio,\n convert_Return2Annual,\n convert_period_days2years,\n)\n\ntestdata_get_asset_underlying_type = [\n (\n \"2330\",\n pd.DataFrame(\n [\n {\n \"industry_category\": \"半導體業\",\n \"stock_id\": \"2330\",\n \"stock_name\": \"台積電\",\n \"type\": \"twse\",\n \"date\": \"2020-05-31\",\n }\n ]\n ),\n )\n]\n\n\[email protected](\n \"stock_id, return_value\",\n testdata_get_asset_underlying_type,\n)\ndef test_get_asset_underlying_type(stock_id, return_value, mocker):\n mock_load = mocker.patch(\"FinMind.Data.Load.FinData\")\n mock_load.return_value = return_value\n underlying_type = get_asset_underlying_type(stock_id)\n assert underlying_type == \"半導體業\"\n\n\ntestdata_get_underlying_trading_tax = [(\"半導體\", 0.003), (\"ETF\", 0.001)]\n\n\[email protected](\n \"underlying_type, expected\",\n testdata_get_underlying_trading_tax,\n)\ndef test_get_underlying_trading_tax(underlying_type, expected):\n resp = get_underlying_trading_tax(underlying_type)\n assert resp == expected\n\n\ntestdata_calculate_Datenbr = [\n (\"2020-01-01\", \"2020-01-05\", 4),\n (\"2019-12-29\", \"2020-01-03\", 5),\n]\n\n\[email protected](\n \"day1, day2, expected\",\n testdata_calculate_Datenbr,\n)\ndef test_calculate_Datenbr(day1, day2, expected):\n resp = calculate_Datenbr(day1, day2)\n assert resp == expected\n\n\ntestdata_calculate_sharp_ratio = [(0.05, 0.01, 79.37), (0.1, 0.21, 7.56)]\n\n\[email protected](\n \"retrun, std, expected\",\n testdata_calculate_sharp_ratio,\n)\ndef test_calculate_sharp_ratio(retrun, std, expected):\n resp = calculate_sharp_ratio(retrun, std)\n assert resp == expected\n\n\ntestdata_convert_Return2Annual = [(0.2, 2, 0.0954), (0.5, 5, 0.0845)]\[email protected](\n \"period_return, period_years, expected\",\n testdata_convert_Return2Annual,\n)\ndef test_convert_Return2Annual(period_return, period_years, expected):\n resp = convert_Return2Annual(period_return, period_years)\n assert resp == expected\n\n\ntestdata_convert_period_days2years = [(180, 0.4931506849315068), (30, 0.0821917808219178)]\[email protected](\n \"days, expected\",\n testdata_convert_period_days2years,\n)\ndef test_convert_period_days2years(days, expected):\n resp = convert_period_days2years(days)\n assert resp == expected\n\n" ]
[ [ "pandas.DataFrame" ] ]
vladchimescu/bioimg-py3
[ "f40b4747157fb4203ebb9ddebc29f742bc128689" ]
[ "bioimg/base/plot.py" ]
[ "#!/usr/env/bin python3\n\"\"\"\nFunctions and classes for static plots\n\"\"\"\nimport matplotlib.pyplot as plt\nfrom skimage import color\nimport numpy as np\nimport matplotlib.colors as mcolors\n\ncolor_dict = {'red': 0, 'orange': 0.1,\n 'yellow': 0.16, 'green': 0.3,\n 'cyan': 0.5, 'blue': 0.6,\n 'purple': 0.8, 'magenta': 0.9,\n 'white': None}\n\ndef rescale_array(a):\n '''Rescale float numpy array to (0,1)-range\n '''\n if (a.dtype == np.float64) or (a.dtype == np.float32):\n return (a - a.min()) / (a.max() - a.min())\n return a\n\ndef plot_channels(images, nrow, ncol, titles=None,\n scale_x=4, scale_y=4, cmap=None,\n hspace=0.2, wspace=0.2, bottom=0,\n top=0.7):\n '''Plot images as a grid of subplots\n ---------------------------------\n A list of image arrays is plotted in a matrix layout\n\n Parameters\n ----------\n images : list\n List of np.array (image arrays). Ararys can be\n either greyscale or color 2D images\n nrow : int\n Number of rows\n ncol : int\n Numbr of columns\n titles : list or array\n List-like, plot subtitles\n scale_x : int\n Figure width parameter: w = scale_x * ncol\n scale_y : int\n Figure height parameter: h = scale_y * nrow\n cmap : string\n Name of the matplotlib colormap. Default to viridis\n hspace : float (optional)\n proportion of height reserved for spacing between subplots\n wspace : float (optional)\n proportion of width reserved for spacing between subplots\n bottom : float (optional)\n bottom of the subplots of the figure\n top : float (optional)\n top of the subplots of the figure\n '''\n plt.figure(figsize=(scale_x * ncol, scale_y * nrow))\n plt.subplots_adjust(hspace=hspace, wspace=wspace, top=top, bottom=bottom)\n for i in range(len(images)):\n plt.subplot(nrow, ncol, i + 1)\n plt.imshow(images[i], cmap=cmap)\n if titles is not None:\n plt.title(titles[i], size=12)\n plt.xticks(())\n plt.yticks(())\n\ndef colorize(image, hue, saturation=1):\n \"\"\" Add color of the given hue to an RGB image.\n\n By default, set the saturation to 1 so that the colors pop!\n \"\"\"\n if hue is None:\n return image\n hsv = color.rgb2hsv(image)\n hsv[:, :, 1] = saturation\n hsv[:, :, 0] = hue\n return color.hsv2rgb(hsv)\n\n\ndef combine_channels(images, colors, blend=None, gamma=None):\n '''Plot images as an RGB overlay\n ------------------------------\n A list of image arrays is combined into a single\n color image.\n\n Parameters\n ----------\n images : list\n List of np.array (image arrays). List elements are\n interpreted as individual color channels\n colors : list of strings\n List of color names: one of red, orange, yellow,\n green, cyan, blue, purple, magenta, white\n blend : list of floats (optional)\n Controls color blending in the image overlay\n gamma : list or array (optional)\n Gamma correction factor for individual images\n '''\n # rescale each channel to be in the range (0,1)\n images = [rescale_array(img) for img in images]\n if blend is None:\n blend = [0.5] * len(images)\n if gamma is not None:\n images = [img**g for img, g in zip(images, gamma)]\n\n \n images = [color.gray2rgb(img) for img in images]\n # color the images\n images = [colorize(img, hue=color_dict[c])\n for img, c in zip(images, colors)]\n images = [b * img for img, b in zip(images, blend)]\n # make sure that the images are in (0,1) range if dtype='float' \n return rescale_array(sum(images))\n\n\ndef show_bbox(img, bbox, color='white', lw=2, size=12):\n '''Display bounding boxes of the segmentation\n ------------------------------------------\n Show the original intensity image or RGB overlay\n together with the bounding boxes of labelled regions\n\n Parameters\n ----------\n img : array\n Intensity or RGB image\n bbox: list / array of tuples\n Each tuple represents the bounding box image\n coordinates (xmin, xmax, ymin, ymax)\n color : string\n Color of the bounding boxes\n lw : float\n Linewidth of the bounding boxes\n size : int\n Figure size\n '''\n fig, ax = plt.subplots(1, 1, figsize=(size, size))\n ax.imshow(img)\n for bb in bbox:\n start = (bb[0], bb[2])\n extent = (bb[1] - bb[0],\n bb[3] - bb[2])\n rec = plt.Rectangle(xy=start,\n width=extent[1],\n height=extent[0], color=color,\n linewidth=lw, fill=False)\n ax.add_patch(rec)\n ax.axis('off')\n\n\ndef make_colormap(seq):\n \"\"\"Return a LinearSegmentedColormap\n seq: a sequence of floats and RGB-tuples. The floats should be increasing\n and in the interval (0,1).\n \"\"\"\n seq = [(None,) * 3, 0.0] + list(seq) + [1.0, (None,) * 3]\n cdict = {'red': [], 'green': [], 'blue': []}\n for i, item in enumerate(seq):\n if isinstance(item, float):\n r1, g1, b1 = seq[i - 1]\n r2, g2, b2 = seq[i + 1]\n cdict['red'].append([item, r1, r2])\n cdict['green'].append([item, g1, g2])\n cdict['blue'].append([item, b1, b2])\n return mcolors.LinearSegmentedColormap('CustomMap', cdict)\n\ndef diverge_map(high=(0.565, 0.392, 0.173), low=(0.094, 0.310, 0.635)):\n '''\n low and high are colors that will be used for the two\n ends of the spectrum. they can be either color strings\n or rgb color tuples\n '''\n c = mcolors.ColorConverter().to_rgb\n if isinstance(low, str): low = c(low)\n if isinstance(high, str): high = c(high)\n return make_colormap([low, c('white'), 0.5, c('white'), high])\n\n" ]
[ [ "matplotlib.colors.ColorConverter", "matplotlib.pyplot.title", "matplotlib.pyplot.subplots", "matplotlib.pyplot.figure", "matplotlib.pyplot.yticks", "matplotlib.pyplot.Rectangle", "matplotlib.colors.LinearSegmentedColormap", "matplotlib.pyplot.imshow", "matplotlib.pyplot.subplots_adjust", "matplotlib.pyplot.xticks", "matplotlib.pyplot.subplot" ] ]
viadee/xair
[ "557534b022a6ff90b3fb5d50d0592bf73187644a" ]
[ "xai_xps/src/Utils.py" ]
[ "import json\nimport logging\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport skfuzzy as fuzz\nimport sys\nfrom cerberus import Validator\nfrom types import SimpleNamespace\n\nCONFIG_FILE = \"./resources/config/config.json\"\nANTE_CONFIG_FILE = \"./resources/config/antecedent_config.json\"\nCONFIG_VALIDATION = \"./../resources/validation/config_schema.json\"\n\n__config = None\n__ante_config = None\n\n\ndef get_config(config_file=CONFIG_FILE, reload=False):\n \"\"\"\n @param config_file: config file path (default: \"./resources/config/config.json\")\n @param reload: whether or not to reload config file (default: False)\n\n @returns config as namespace\n \"\"\"\n global __config\n config_file = config_file if config_file is not None else CONFIG_FILE\n if __config is None or config_file != CONFIG_FILE or reload:\n with open(config_file) as f:\n __config = json.load(f, object_hook=lambda d: SimpleNamespace(**d))\n return __config\n\n\ndef get_ante_config(config_file=ANTE_CONFIG_FILE, reload=False):\n \"\"\"\n Loads antecedent config. If no config file is available yet,\n initial config file gets recreated from python config file.\n\n @param config_file: antecedent config file path (default: \"./resources/config/antecedent_config.json\")\n @param reload: whether or not to reload config file (default: False)\n\n @returns config as namespace\n \"\"\"\n global __ante_config\n config_file = config_file if config_file is not None else ANTE_CONFIG_FILE\n if __ante_config is None or config_file != ANTE_CONFIG_FILE or reload:\n try:\n with open(config_file) as f:\n __ante_config = json.load(f)\n except FileNotFoundError:\n # if file is deleted, generate new one from convenience file (default antecedents!!)\n from resources.config.antecedent_config import antecedents as ac\n # if no json file of ante_config is available, generate from antecedent_config.py\n with open(config_file, 'w') as fp:\n json.dump(ac, fp)\n __ante_config = ac\n return __ante_config\n\n\ndef plot_membership(var_name, x, mfx, save=False):\n \"\"\"\n Plot membership function.\n Defuzzification of a membership function, returning a defuzzified value of the function at x, using various \n defuzzification methods (COG,MOM,SOM,LOM)\n\n @param var_name: Name of variable\n @param x: Independent variable (1d array or iterable, length N)\n @param mfx: Fuzzy membership function (1d array of iterable, length N)\n \n \"\"\"\n # Defuzzify this membership function five ways\n print(\"--- \", var_name, \", x: \", x, \" mfx: \", mfx)\n defuzz_centroid = fuzz.defuzz(x, mfx, 'centroid') # Same as skfuzzy.centroid\n # defuzz_bisector = fuzz.defuzz(x, mfx, 'bisector')\n defuzz_mom = fuzz.defuzz(x, mfx, 'mom')\n defuzz_som = fuzz.defuzz(x, mfx, 'som')\n defuzz_lom = fuzz.defuzz(x, mfx, 'lom')\n\n # Collect info for vertical lines\n # labels = ['centroid', 'bisector', 'mean of maximum', 'min of maximum', 'max of maximum']\n labels = ['COG', 'Mean of Maximum', 'Min of Maximum', 'Max of Maximum']\n xvals = [defuzz_centroid,\n # defuzz_bisector,\n defuzz_mom,\n defuzz_som,\n defuzz_lom]\n colors = [\"b\", \"g\", \"r\", \"y\", \"m\"]\n ymax = [fuzz.interp_membership(x, mfx, i) for i in xvals]\n\n # Display and compare defuzzification results against membership function\n plt.figure(figsize=(8, 4))\n\n plt.plot(x, mfx, 'k')\n for xv, y, label, color in zip(xvals, ymax, labels, colors):\n plt.vlines(xv, 0, y, label=label, color=color)\n plt.ylabel('Zugehörigkeitswert')\n plt.xlabel('Diskursuniversum ({})'.format(var_name))\n #plt.ylabel('Fuzzy membership')\n #plt.xlabel('Universe variable ({})'.format(var_name))\n #plt.ylim(-0.1, 1.1)\n plt.legend(loc=2)\n\n if save:\n plt.savefig(f\"./../defuzz_{var_name}.png\",dpi=300)\n\n plt.show()\n\n\ndef get_logger(name, activate_console_logs=True, log_file=get_config().resource_files.log_file, log_level=logging.INFO):\n '''\n Returns logger which logs to console on a specified level\n\n @param activate_console_logs: If true, logs are also printed in the console. Default: True\n @param log_file: File to write logs to. Default: config.resource_files.log_file\n @param log_level: Level of which to log events\n\n @returns Logger\n '''\n logFormatter = logging.Formatter(\"%(asctime)s [%(threadName)-12.12s] [%(levelname)-5.5s] %(message)s\")\n rootLogger = logging.getLogger(name)\n\n fileHandler = logging.FileHandler(log_file)\n fileHandler.setFormatter(logFormatter)\n fileHandler.setLevel(log_level)\n rootLogger.addHandler(fileHandler)\n\n if activate_console_logs:\n consoleHandler = logging.StreamHandler() # (sys.stdout)\n consoleHandler.setFormatter(logFormatter)\n consoleHandler.setLevel(log_level)\n rootLogger.addHandler(consoleHandler)\n rootLogger.setLevel(logging.DEBUG)\n\n return rootLogger\n\n\n############################ KNOWLEDGE AQUISITION ############################\n\ndef create_exclusion_criteria(name, label, help_txt, section, method_rating):\n \"\"\"\n Adds rating of criteria to dataframe and returns antecedent configuration\n to be added to ante_config\n Attention: Formulate Boolean criteria, that application of the method is\n possible when input variable >= Rating of method in dataframe\n\n @param name: name of criterion\n @param label: label of criterion\n @param help_txt: help text\n @param section: section to display criterion in\n @param method_rating: Rating for every available XAI-method (1d array, length of available methods)\n \"\"\"\n excl_criteria_df = pd.read_csv(get_config().resource_files.rating_bool, index_col=[0])\n assert len(method_rating) == len(\n excl_criteria_df.columns), f\"Length of given method ratings ({len(method_rating)}) doesn't match length of \" \\\n f\"rating dataframe ({len(excl_criteria_df.columns)})\"\n\n # add to boolean rating dataframe\n excl_criteria_df.loc[name] = method_rating\n # save dataframe back\n excl_criteria_df.to_csv(get_config().resource_files.rating_bool)\n\n return {\n \"label\": label,\n \"type\": \"exclusion_criteria\",\n \"dtypes\": {\n \"fuzzy\": \"bool\",\n \"crisp\": \"bool\"\n },\n \"frontend\": {\n \"type\": \"checkbox\",\n \"section\": section,\n \"help\": help_txt,\n \"initialValue\": True,\n \"rating\": \"bool\"\n }\n }\n\n\ndef add_method(name, label, visualization, exclusion_ratings, fuzzy_ratings):\n \"\"\"\n Add methods to backend application of XAIR\n\n @param name: name of method\n @param label: label to be displayed\n @param visualization: 1 if visualization method, 0 otherwise\n @param exclusion_ratings: 1d array of method rating regarding exclusion criteria (length: number of exclusion criteria)\n @param fuzzy_ratings: 1d array of method rating regarding fuzzy criteria (length: number of all possible fuzzy criteria levels)\n\n \"\"\"\n # add exclusion criteria rating\n excl_criteria_df = pd.read_csv(get_config().resource_files.rating_bool, index_col=[0])\n excl_criteria_df[label] = exclusion_ratings\n excl_criteria_df.to_csv(get_config().resource_files.rating_bool)\n\n # add fuzzy rating\n fuzzy_df = pd.read_csv(get_config().resource_files.rating_fuzzy, index_col=[0, 1])\n fuzzy_df[label] = fuzzy_ratings\n fuzzy_df.to_csv(get_config().resource_files.rating_fuzzy)\n\n # add to alternatives in config\n with open(get_config().resource_files.consequent_config) as f:\n c = json.load(f)\n c[name] = {'label': label,\n 'visualization': visualization}\n with open(get_config().resource_files.consequent_config, 'w') as fp:\n json.dump(c, fp)\n\n return True\n\n\ndef create_criteria(label,\n help_txt,\n crit_type,\n standalone_impact,\n disable_processing,\n section,\n input_type,\n rating,\n universe,\n mem_funcs,\n dtypes,\n init_value=None,\n max_value=None,\n min_value=None\n ):\n \"\"\"\n Create criterion JSON format from input values\n\n @retuns criterion JSON structure\n \"\"\"\n\n crit = {\"label\": label,\n \"universe\": universe,\n \"mem_funcs\": mem_funcs,\n \"rating\": list(rating.keys()),\n \"type\": crit_type,\n \"dtypes\": dtypes,\n \"rules\": {\n \"standalone_impact\": standalone_impact,\n \"disable_processing\": disable_processing\n },\n \"frontend\": {\n \"type\": input_type,\n \"section\": section,\n \"help\": help_txt,\n \"rating\": rating\n }}\n if input_type not in [\"list\", \"text\"]:\n assert init_value is not None, \"Initial value for frontend must be given for number/range inputs.\"\n assert max_value is not None, \"Max value for frontend must be given for number/range inputs.\"\n assert min_value is not None, f\"Min value for frontend must be given for number/range inputs. ({min_value})\"\n\n crit[\"frontend\"][\"initialValue\"] = init_value\n crit[\"frontend\"][\"max\"] = max_value\n crit[\"frontend\"][\"min\"] = min_value\n crit[\"frontend\"][\"range_min\"] = list(rating.values())[0]\n crit[\"frontend\"][\"range_max\"] = list(rating.values())[-1]\n\n return crit\n" ]
[ [ "matplotlib.pyplot.savefig", "matplotlib.pyplot.plot", "matplotlib.pyplot.legend", "matplotlib.pyplot.vlines", "matplotlib.pyplot.figure", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.show" ] ]
BrianPugh/pugh_torch
[ "d620a518d78ec03556c5089bfc76e4cf7bd0cd70" ]
[ "pugh_torch/tests/mappings/test_color.py" ]
[ "import pytest\nimport numpy as np\nimport pugh_torch as pt\n\n\ndef test_turbo_auto_range():\n x = np.arange(20).reshape(4, 5)\n\n actual = pt.mappings.turbo(x)\n\n assert actual.shape == (4, 5, 3)\n\n expected = np.array(\n [\n [\n [0.18995, 0.07176, 0.23217],\n [0.24234, 0.21941, 0.56942],\n [0.27103, 0.35926, 0.81156],\n [0.27543, 0.50115, 0.96594],\n [0.23288, 0.62923, 0.99202],\n ],\n [\n [0.13886, 0.76279, 0.8955],\n [0.09267, 0.86554, 0.7623],\n [0.17377, 0.94053, 0.61938],\n [0.35043, 0.98477, 0.45002],\n [0.56026, 0.99873, 0.28623],\n ],\n [\n [0.70553, 0.97255, 0.21032],\n [0.84133, 0.89986, 0.20926],\n [0.93909, 0.80439, 0.22744],\n [0.99163, 0.68408, 0.20706],\n [0.99153, 0.54036, 0.1491],\n ],\n [\n [0.94977, 0.37729, 0.07905],\n [0.88066, 0.25334, 0.03521],\n [0.77377, 0.15028, 0.01148],\n [0.64223, 0.0738, 0.00401],\n [0.4796, 0.01583, 0.01055],\n ],\n ]\n )\n\n assert np.allclose(actual, expected)\n\n\ndef test_turbo_many_dim():\n shape = (5, 5, 5, 5, 5)\n x = np.random.rand(*shape)\n actual = pt.mappings.turbo(x)\n assert actual.shape == (*shape, 3)\n" ]
[ [ "numpy.allclose", "numpy.array", "numpy.random.rand", "numpy.arange" ] ]
adams314/health-equity-tracker
[ "2c6b63381a79227009376a255325d43300dda7cf" ]
[ "python/tests/test_gcs_to_bq.py" ]
[ "import json\nfrom datetime import datetime, timezone\nfrom textwrap import dedent\nfrom unittest import TestCase\nfrom unittest.mock import MagicMock, Mock, patch\n\nimport numpy as np\nfrom freezegun import freeze_time\nfrom pandas import DataFrame\nfrom pandas.testing import assert_frame_equal\n\nfrom ingestion import gcs_to_bq_util # pylint: disable=no-name-in-module\n\n\nclass GcsToBqTest(TestCase):\n\n _test_data = [[\"label1\", \"label2\", \"label3\"],\n [\"valuea\", \"valueb\", \"valuec\"],\n [\"valued\", \"valuee\", \"valuef\"]]\n\n def testLoadValuesBlobAsDataframe(self):\n \"\"\"Tests that data in json list format is loaded into a\n pandas.DataFrame object using the first row as a header.\"\"\"\n mock_attrs = {\n 'download_as_string.return_value': json.dumps(self._test_data)}\n mock_blob = Mock(**mock_attrs)\n frame = gcs_to_bq_util.load_values_blob_as_dataframe(mock_blob)\n\n self.assertListEqual(list(frame.columns.array),\n [\"label1\", \"label2\", \"label3\"])\n self.assertEqual(frame.size, 6)\n test_frame = DataFrame(\n data=self._test_data[1:], columns=self._test_data[0], index=[1, 2])\n assert_frame_equal(frame, test_frame)\n\n @freeze_time(\"2020-01-01\")\n def testAppendDataframeToBq_AutoSchema(self):\n \"\"\"Tests that autodetect is used when no column_types are provided to\n append_dataframe_to_bq.\"\"\"\n test_frame = DataFrame(\n data=self._test_data[1:], columns=self._test_data[0], index=[1, 2])\n\n with patch('ingestion.gcs_to_bq_util.bigquery.Client') as mock_client:\n # Set up mock calls\n mock_instance = mock_client.return_value\n mock_table = Mock()\n mock_instance.dataset.return_value = mock_table\n mock_table.table.return_value = 'test-project.test-dataset.table'\n\n gcs_to_bq_util.append_dataframe_to_bq(\n test_frame.copy(deep=True), \"test-dataset\", \"table\")\n\n mock_instance.load_table_from_json.assert_called()\n call_args = mock_instance.load_table_from_json.call_args\n test_frame['ingestion_ts'] = datetime(\n 2020, 1, 1, tzinfo=timezone.utc).strftime(\n \"%Y-%m-%d %H:%M:%S.%f %Z\")\n self.assertEqual(call_args.args[0],\n json.loads(test_frame.to_json(orient='records')))\n job_config = call_args.kwargs['job_config']\n self.assertTrue(job_config.autodetect)\n\n @freeze_time(\"2020-01-01\")\n def testAppendDataframeToBq_IgnoreColModes(self):\n \"\"\"Tests that col_modes is ignored when no column_types are provided\n to append_dataframe_to_bq.\"\"\"\n test_frame = DataFrame(\n data=self._test_data[1:], columns=self._test_data[0], index=[1, 2])\n\n with patch('ingestion.gcs_to_bq_util.bigquery.Client') as mock_client:\n # Set up mock calls\n mock_instance = mock_client.return_value\n mock_table = Mock()\n mock_instance.dataset.return_value = mock_table\n mock_table.table.return_value = 'test-project.test-dataset.table'\n\n gcs_to_bq_util.append_dataframe_to_bq(\n test_frame.copy(deep=True), \"test-dataset\", \"table\",\n col_modes={'label1': 'REPEATED', 'label2': 'REQUIRED'})\n\n mock_instance.load_table_from_json.assert_called()\n call_args = mock_instance.load_table_from_json.call_args\n test_frame['ingestion_ts'] = datetime(\n 2020, 1, 1, tzinfo=timezone.utc).strftime(\n \"%Y-%m-%d %H:%M:%S.%f %Z\")\n self.assertEqual(call_args.args[0],\n json.loads(test_frame.to_json(orient='records')))\n job_config = call_args.kwargs['job_config']\n self.assertTrue(job_config.autodetect)\n\n @freeze_time(\"2020-01-01\")\n def testAppendDataframeToBq_SpecifySchema(self):\n \"\"\"Tests that the BigQuery schema is properly defined when column_types\n are provided to append_dataframe_to_bq.\"\"\"\n test_frame = DataFrame(\n data=self._test_data[1:], columns=self._test_data[0], index=[1, 2])\n\n with patch('ingestion.gcs_to_bq_util.bigquery.Client') as mock_client:\n # Set up mock calls\n mock_instance = mock_client.return_value\n mock_table = Mock()\n mock_instance.dataset.return_value = mock_table\n mock_table.table.return_value = 'test-project.test-dataset.table'\n\n column_types = {label: 'STRING' for label in test_frame.columns}\n col_modes = {'label1': 'REPEATED',\n 'label2': 'REQUIRED'}\n gcs_to_bq_util.append_dataframe_to_bq(\n test_frame.copy(deep=True), 'test-dataset', 'table',\n column_types=column_types, col_modes=col_modes)\n\n mock_instance.load_table_from_json.assert_called()\n call_args = mock_instance.load_table_from_json.call_args\n test_frame['ingestion_ts'] = datetime(\n 2020, 1, 1, tzinfo=timezone.utc).strftime(\n '%Y-%m-%d %H:%M:%S.%f %Z')\n self.assertEqual(call_args.args[0],\n json.loads(test_frame.to_json(orient='records')))\n job_config = call_args.kwargs['job_config']\n self.assertFalse(job_config.autodetect)\n\n expected_cols = ['label1', 'label2', 'label3', 'ingestion_ts']\n expected_types = ['STRING', 'STRING', 'STRING', 'TIMESTAMP']\n expected_modes = ['REPEATED', 'REQUIRED', 'NULLABLE', 'NULLABLE']\n self.assertListEqual([field.name for field in job_config.schema],\n expected_cols)\n self.assertListEqual(\n [field.field_type for field in job_config.schema],\n expected_types)\n self.assertListEqual([field.mode for field in job_config.schema],\n expected_modes)\n\n @patch('ingestion.gcs_to_bq_util.storage.Client')\n def testLoadCsvAsDataFrame_ParseTypes(self, mock_bq: MagicMock):\n # Write data to an temporary file\n test_file_path = '/tmp/test_file.csv'\n test_data = dedent(\n \"\"\"\n col1,col2,col3,col4\n 20201209,13,text,\"2,937\"\n 20210105,\"1,400\",string,\n \"\"\")\n with open(test_file_path, 'w') as f:\n f.write(test_data)\n\n df = gcs_to_bq_util.load_csv_as_dataframe(\n 'gcs_bucket', 'test_file.csv', parse_dates=['col1'], thousands=',')\n # With parse_dates, col1 should be interpreted as numpy datetime. With\n # thousands=',', numeric columns should be interpreted correctly even if\n # they are written as strings with commas. Numeric cols with null values\n # are inferred as floats.\n expected_types = {'col1': np.dtype('datetime64[ns]'), 'col2': np.int64,\n 'col3': np.object, 'col4': np.float64}\n for col in df.columns:\n self.assertEqual(df[col].dtype, expected_types[col])\n\n # Re-write the test data since load_csv_as_dataframe removes the file.\n with open(test_file_path, 'w') as f:\n f.write(test_data)\n df = gcs_to_bq_util.load_csv_as_dataframe('gcs_bucket', 'test_file.csv')\n # Without the additional read_csv args, the data are inferred to the\n # default np.object type.\n expected_types = {'col1': np.int64, 'col2': np.object,\n 'col3': np.object, 'col4': np.object}\n for col in df.columns:\n self.assertEqual(df[col].dtype, expected_types[col])\n" ]
[ [ "pandas.DataFrame", "pandas.testing.assert_frame_equal", "numpy.dtype" ] ]
akashsengupta1997/HierachicalProbalistic3DHuman
[ "9893133313afa0cc22323263b1df16871c36ae74" ]
[ "utils/label_conversions.py" ]
[ "import numpy as np\nimport torch\n\n\nCOCO_JOINTS = {\n 'Right Ankle': 16, 'Right Knee': 14, 'Right Hip': 12,\n 'Left Hip': 11, 'Left Knee': 13, 'Left Ankle': 15,\n 'Right Wrist': 10, 'Right Elbow': 8, 'Right Shoulder': 6,\n 'Left Shoulder': 5, 'Left Elbow': 7, 'Left Wrist': 9,\n 'Right Ear': 4, 'Left Ear': 3, 'Right Eye': 2, 'Left Eye': 1,\n 'Nose': 0\n}\n\n# The SMPL model (im smpl_official.py) returns a large superset of joints.\n# Different subsets are used during training - e.g. H36M 3D joints convention and COCO 2D joints convention.\n# Joint label conversions from SMPL to H36M/COCO/LSP\nALL_JOINTS_TO_COCO_MAP = [24, 26, 25, 28, 27, 16, 17, 18, 19, 20, 21, 1, 2, 4, 5, 7, 8] # Using OP Hips\nALL_JOINTS_TO_H36M_MAP = list(range(73, 90))\nH36M_TO_J17 = [6, 5, 4, 1, 2, 3, 16, 15, 14, 11, 12, 13, 8, 10, 0, 7, 9]\nH36M_TO_J14 = H36M_TO_J17[:14]\n\n# Joint label and body part seg label matching\n# 24 part seg: COCO Joints\nTWENTYFOUR_PART_SEG_TO_COCO_JOINTS_MAP = {19: 7,\n 21: 7,\n 20: 8,\n 22: 8,\n 4: 9,\n 3: 10,\n 12: 13,\n 14: 13,\n 11: 14,\n 13: 14,\n 5: 15,\n 6: 16}\n\n\ndef convert_densepose_seg_to_14part_labels(densepose_seg):\n \"\"\"\n Convert 24 body-part labels (DensePose convention) to 14 body-part labels.\n \"\"\"\n if isinstance(densepose_seg, torch.Tensor):\n fourteen_part_seg = torch.zeros_like(densepose_seg)\n elif isinstance(densepose_seg, np.ndarray):\n fourteen_part_seg = np.zeros_like(densepose_seg)\n\n fourteen_part_seg[densepose_seg == 1] = 1\n fourteen_part_seg[densepose_seg == 2] = 1\n fourteen_part_seg[densepose_seg == 3] = 11\n fourteen_part_seg[densepose_seg == 4] = 12\n fourteen_part_seg[densepose_seg == 5] = 14\n fourteen_part_seg[densepose_seg == 6] = 13\n fourteen_part_seg[densepose_seg == 7] = 8\n fourteen_part_seg[densepose_seg == 8] = 6\n fourteen_part_seg[densepose_seg == 9] = 8\n fourteen_part_seg[densepose_seg == 10] = 6\n fourteen_part_seg[densepose_seg == 11] = 9\n fourteen_part_seg[densepose_seg == 12] = 7\n fourteen_part_seg[densepose_seg == 13] = 9\n fourteen_part_seg[densepose_seg == 14] = 7\n fourteen_part_seg[densepose_seg == 15] = 2\n fourteen_part_seg[densepose_seg == 16] = 4\n fourteen_part_seg[densepose_seg == 17] = 2\n fourteen_part_seg[densepose_seg == 18] = 4\n fourteen_part_seg[densepose_seg == 19] = 3\n fourteen_part_seg[densepose_seg == 20] = 5\n fourteen_part_seg[densepose_seg == 21] = 3\n fourteen_part_seg[densepose_seg == 22] = 5\n fourteen_part_seg[densepose_seg == 23] = 10\n fourteen_part_seg[densepose_seg == 24] = 10\n\n return fourteen_part_seg\n\n\ndef convert_multiclass_to_binary_labels(multiclass_labels):\n \"\"\"\n Converts multiclass segmentation labels into a binary mask.\n \"\"\"\n if isinstance(multiclass_labels, torch.Tensor):\n binary_labels = torch.zeros_like(multiclass_labels)\n elif isinstance(multiclass_labels, np.ndarray):\n binary_labels = np.zeros_like(multiclass_labels)\n\n binary_labels[multiclass_labels != 0] = 1\n\n return binary_labels\n\n\ndef convert_2Djoints_to_gaussian_heatmaps(joints2D, img_wh, std=4):\n \"\"\"\n :param joints2D: (N, 2) array, 2D joint locations.\n :return heatmaps: (img_wh, img_wh, N) array, 2D joint heatmaps (channels last).\n \"\"\"\n xx, yy = np.meshgrid(np.arange(img_wh),\n np.arange(img_wh))\n xx = xx[None, :, :].astype(np.float32)\n yy = yy[None, :, :].astype(np.float32)\n\n j2d_u = joints2D[:, 0, None, None]\n j2d_v = joints2D[:, 1, None, None]\n heatmap = np.exp(-(((xx - j2d_u) / std) ** 2) / 2 - (((yy - j2d_v) / std) ** 2) / 2).transpose(1, 2, 0)\n return heatmap\n\n\ndef convert_2Djoints_to_gaussian_heatmaps_torch(joints2D,\n img_wh,\n std=4):\n \"\"\"\n :param joints2D: (B, N, 2) tensor - batch of 2D joints.\n :param img_wh: int, dimensions of square heatmaps\n :param std: standard deviation of gaussian blobs\n :return heatmaps: (B, N, img_wh, img_wh) - batch of 2D joint heatmaps (channels first).\n \"\"\"\n device = joints2D.device\n\n xx, yy = torch.meshgrid(torch.arange(img_wh, device=device),\n torch.arange(img_wh, device=device))\n xx = xx[None, None, :, :].float()\n yy = yy[None, None, :, :].float()\n\n j2d_u = joints2D[:, :, 0, None, None] # Horizontal coord (columns)\n j2d_v = joints2D[:, :, 1, None, None] # Vertical coord (rows)\n heatmap = torch.exp(-(((xx - j2d_v) / std) ** 2) / 2 - (((yy - j2d_u) / std) ** 2) / 2)\n return heatmap\n\n\ndef convert_heatmaps_to_2Djoints_coordinates_torch(joints2D_heatmaps,\n eps=1e-6):\n \"\"\"\n Convert 2D joint heatmaps into coordinates using argmax.\n :param joints2D_heatmaps: (N, K, H, W) array of 2D joint heatmaps.\n :param eps: heatmap max threshold to count as detected joint.\n :return: joints2D: (N, K, 2) array of 2D joint coordinates.\n joints2D_vis: (N, K) bool array of 2D joint visibilties.\n \"\"\"\n batch_size = joints2D_heatmaps.shape[0]\n num_joints = joints2D_heatmaps.shape[1]\n width = joints2D_heatmaps.shape[3]\n\n # Joints 2D given by max heatmap indices.\n # Since max and argmax are over batched 2D arrays, first flatten to 1D.\n max_vals_flat, max_indices_flat = torch.max(joints2D_heatmaps.view(batch_size, num_joints, -1),\n dim=-1) # (N, K)\n # Convert 1D max indices to 2D max indices i.e. (x, y) coordinates.\n joints2D = torch.zeros(batch_size, num_joints, 2, device=joints2D_heatmaps.device) # (N, K, 2)\n joints2D[:, :, 0] = max_indices_flat % width # x-coordinate\n joints2D[:, :, 1] = torch.floor(max_indices_flat / float(width)) # y-coordinate\n\n # If heatmap is 0 everywhere (i.e. max value = 0), then no 2D coordinates\n # should be returned for that heatmap (i.e. joints2D not visible).\n # Following returns 1 for heatmaps with visible 2D coordinates (max val > eps) and -1 for heatmaps without.\n joints2D_vis = max_vals_flat > eps\n joints2D[torch.logical_not(joints2D_vis)] = -1\n\n return joints2D, joints2D_vis\n\n" ]
[ [ "torch.zeros", "numpy.zeros_like", "torch.arange", "numpy.exp", "torch.logical_not", "numpy.arange", "torch.zeros_like", "torch.exp" ] ]
sorami/transformers
[ "a1cecf55c1cd622acc929671c67dfc43ea943df4" ]
[ "src/transformers/trainer.py" ]
[ "# coding=utf-8\n# Copyright 2020-present the HuggingFace Inc. team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nThe Trainer class, to easily train a 🤗 Transformers from scratch or finetune it on a new task.\n\"\"\"\n\nimport collections\nimport inspect\nimport math\nimport os\nimport re\nimport shutil\nimport time\nimport warnings\nfrom pathlib import Path\nfrom typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Tuple, Union\n\n\n# Integrations must be imported before ML frameworks:\nfrom .integrations import ( # isort: split\n default_hp_search_backend,\n get_reporting_integration_callbacks,\n hp_params,\n is_fairscale_available,\n is_optuna_available,\n is_ray_tune_available,\n run_hp_search_optuna,\n run_hp_search_ray,\n init_deepspeed,\n)\n\nimport numpy as np\nimport torch\nfrom packaging import version\nfrom torch import nn\nfrom torch.utils.data.dataloader import DataLoader\nfrom torch.utils.data.dataset import Dataset\nfrom torch.utils.data.distributed import DistributedSampler\nfrom torch.utils.data.sampler import RandomSampler, SequentialSampler\n\nfrom .data.data_collator import DataCollator, DataCollatorWithPadding, default_data_collator\nfrom .file_utils import WEIGHTS_NAME, is_apex_available, is_datasets_available, is_in_notebook, is_torch_tpu_available\nfrom .modeling_utils import PreTrainedModel\nfrom .models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING\nfrom .optimization import Adafactor, AdamW, get_scheduler\nfrom .tokenization_utils_base import PreTrainedTokenizerBase\nfrom .trainer_callback import (\n CallbackHandler,\n DefaultFlowCallback,\n PrinterCallback,\n ProgressCallback,\n TrainerCallback,\n TrainerControl,\n TrainerState,\n)\nfrom .trainer_pt_utils import (\n DistributedLengthGroupedSampler,\n DistributedTensorGatherer,\n LabelSmoother,\n LengthGroupedSampler,\n SequentialDistributedSampler,\n distributed_broadcast_scalars,\n distributed_concat,\n nested_concat,\n nested_detach,\n nested_numpify,\n nested_xla_mesh_reduce,\n reissue_pt_warnings,\n)\nfrom .trainer_utils import (\n PREFIX_CHECKPOINT_DIR,\n BestRun,\n EvalPrediction,\n HPSearchBackend,\n PredictionOutput,\n TrainOutput,\n default_compute_objective,\n default_hp_space,\n set_seed,\n speed_metrics,\n)\nfrom .training_args import ParallelMode, TrainingArguments\nfrom .utils import logging\n\n\n_is_native_amp_available = False\n\nDEFAULT_CALLBACKS = [DefaultFlowCallback]\nDEFAULT_PROGRESS_CALLBACK = ProgressCallback\n\nif is_in_notebook():\n from .utils.notebook import NotebookProgressCallback\n\n DEFAULT_PROGRESS_CALLBACK = NotebookProgressCallback\n\nif is_apex_available():\n from apex import amp\n\nif version.parse(torch.__version__) >= version.parse(\"1.6\"):\n _is_native_amp_available = True\n from torch.cuda.amp import autocast\n\nif is_datasets_available():\n import datasets\n\nif is_torch_tpu_available():\n import torch_xla.core.xla_model as xm\n import torch_xla.debug.metrics as met\n import torch_xla.distributed.parallel_loader as pl\n\nif is_fairscale_available():\n from fairscale.nn.data_parallel import ShardedDataParallel as ShardedDDP\n from fairscale.optim import OSS\n from fairscale.optim.grad_scaler import ShardedGradScaler\n\n\nif TYPE_CHECKING:\n import optuna\n\nlogger = logging.get_logger(__name__)\n\n\ndef _model_unwrap(model: nn.Module) -> nn.Module:\n # since there could be multiple levels of wrapping, unwrap recursively\n if hasattr(model, \"module\"):\n return _model_unwrap(model.module)\n else:\n return model\n\n\nclass Trainer:\n \"\"\"\n Trainer is a simple but feature-complete training and eval loop for PyTorch, optimized for 🤗 Transformers.\n\n Args:\n model (:class:`~transformers.PreTrainedModel` or :obj:`torch.nn.Module`, `optional`):\n The model to train, evaluate or use for predictions. If not provided, a ``model_init`` must be passed.\n\n .. note::\n\n :class:`~transformers.Trainer` is optimized to work with the :class:`~transformers.PreTrainedModel`\n provided by the library. You can still use your own models defined as :obj:`torch.nn.Module` as long as\n they work the same way as the 🤗 Transformers models.\n args (:class:`~transformers.TrainingArguments`, `optional`):\n The arguments to tweak for training. Will default to a basic instance of\n :class:`~transformers.TrainingArguments` with the ``output_dir`` set to a directory named `tmp_trainer` in\n the current directory if not provided.\n data_collator (:obj:`DataCollator`, `optional`):\n The function to use to form a batch from a list of elements of :obj:`train_dataset` or :obj:`eval_dataset`.\n Will default to :func:`~transformers.default_data_collator` if no ``tokenizer`` is provided, an instance of\n :func:`~transformers.DataCollatorWithPadding` otherwise.\n train_dataset (:obj:`torch.utils.data.dataset.Dataset`, `optional`):\n The dataset to use for training. If it is an :obj:`datasets.Dataset`, columns not accepted by the\n ``model.forward()`` method are automatically removed.\n eval_dataset (:obj:`torch.utils.data.dataset.Dataset`, `optional`):\n The dataset to use for evaluation. If it is an :obj:`datasets.Dataset`, columns not accepted by the\n ``model.forward()`` method are automatically removed.\n tokenizer (:class:`PreTrainedTokenizerBase`, `optional`):\n The tokenizer used to preprocess the data. If provided, will be used to automatically pad the inputs the\n maximum length when batching inputs, and it will be saved along the model to make it easier to rerun an\n interrupted training or reuse the fine-tuned model.\n model_init (:obj:`Callable[[], PreTrainedModel]`, `optional`):\n A function that instantiates the model to be used. If provided, each call to\n :meth:`~transformers.Trainer.train` will start from a new instance of the model as given by this function.\n\n The function may have zero argument, or a single one containing the optuna/Ray Tune trial object, to be\n able to choose different architectures according to hyper parameters (such as layer count, sizes of inner\n layers, dropout probabilities etc).\n compute_metrics (:obj:`Callable[[EvalPrediction], Dict]`, `optional`):\n The function that will be used to compute metrics at evaluation. Must take a\n :class:`~transformers.EvalPrediction` and return a dictionary string to metric values.\n callbacks (List of :obj:`~transformers.TrainerCallback`, `optional`):\n A list of callbacks to customize the training loop. Will add those to the list of default callbacks\n detailed in :doc:`here <callback>`.\n\n If you want to remove one of the default callbacks used, use the :meth:`Trainer.remove_callback` method.\n optimizers (:obj:`Tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR`, `optional`): A tuple\n containing the optimizer and the scheduler to use. Will default to an instance of\n :class:`~transformers.AdamW` on your model and a scheduler given by\n :func:`~transformers.get_linear_schedule_with_warmup` controlled by :obj:`args`.\n\n Important attributes:\n\n - **model** -- Always points to the core model. If using a transformers model, it will be a\n :class:`~transformers.PreTrainedModel` subclass.\n - **model_wrapped** -- Always points to the most external model in case one or more other modules wrap the\n original model. This is the model that should be used for the forward pass. For example, under ``DeepSpeed``,\n the inner model is wrapped in ``DeepSpeed`` and then again in ``torch.nn.DistributedDataParallel``. If the\n inner model hasn't been wrapped, then ``self.model_wrapped`` is the same as ``self.model``.\n - **is_model_parallel** -- Whether or not a model has been switched to a model parallel mode (different from\n data parallelism, this means some of the model layers are split on different GPUs).\n \"\"\"\n\n def __init__(\n self,\n model: Union[PreTrainedModel, torch.nn.Module] = None,\n args: TrainingArguments = None,\n data_collator: Optional[DataCollator] = None,\n train_dataset: Optional[Dataset] = None,\n eval_dataset: Optional[Dataset] = None,\n tokenizer: Optional[\"PreTrainedTokenizerBase\"] = None,\n model_init: Callable[[], PreTrainedModel] = None,\n compute_metrics: Optional[Callable[[EvalPrediction], Dict]] = None,\n callbacks: Optional[List[TrainerCallback]] = None,\n optimizers: Tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR] = (None, None),\n ):\n if args is None:\n output_dir = \"tmp_trainer\"\n logger.info(f\"No `TrainingArguments` passed, using `output_dir={output_dir}`.\")\n args = TrainingArguments(output_dir=output_dir)\n self.args = args\n # Seed must be set before instantiating the model when using model\n set_seed(self.args.seed)\n self.hp_name = None\n self.deepspeed = None\n\n if model is None:\n if model_init is not None:\n self.model_init = model_init\n model = self.call_model_init()\n else:\n raise RuntimeError(\"`Trainer` requires either a `model` or `model_init` argument\")\n else:\n if model_init is not None:\n warnings.warn(\n \"`Trainer` requires either a `model` or `model_init` argument, but not both. \"\n \"`model_init` will overwrite your model when calling the `train` method. This will become a fatal error in the next release.\",\n FutureWarning,\n )\n self.model_init = model_init\n\n if hasattr(model, \"is_parallelizable\") and model.is_parallelizable and model.model_parallel:\n self.is_model_parallel = True\n else:\n self.is_model_parallel = False\n\n default_collator = default_data_collator if tokenizer is None else DataCollatorWithPadding(tokenizer)\n self.data_collator = data_collator if data_collator is not None else default_collator\n self.train_dataset = train_dataset\n self.eval_dataset = eval_dataset\n self.tokenizer = tokenizer\n\n # Model parallel\n if not self.is_model_parallel:\n model = model.to(args.device)\n else:\n # Force n_gpu to 1 to avoid DataParallel.\n self.args._n_gpu = 1\n\n # later use `self.model is self.model_wrapped` to check if it's wrapped or not\n self.model_wrapped = model\n self.model = model\n\n self.compute_metrics = compute_metrics\n self.optimizer, self.lr_scheduler = optimizers\n if model_init is not None and (self.optimizer is not None or self.lr_scheduler is not None):\n raise RuntimeError(\n \"Passing a `model_init` is incompatible with providing the `optimizers` argument.\"\n \"You should subclass `Trainer` and override the `create_optimizer_and_scheduler` method.\"\n )\n default_callbacks = DEFAULT_CALLBACKS + get_reporting_integration_callbacks(self.args.report_to)\n callbacks = default_callbacks if callbacks is None else default_callbacks + callbacks\n self.callback_handler = CallbackHandler(\n callbacks, self.model, self.tokenizer, self.optimizer, self.lr_scheduler\n )\n self.add_callback(PrinterCallback if self.args.disable_tqdm else DEFAULT_PROGRESS_CALLBACK)\n\n # Will be set to True by `self._setup_loggers()` on first call to `self.log()`.\n self._loggers_initialized = False\n\n # Create output directory if needed\n if self.is_world_process_zero():\n os.makedirs(self.args.output_dir, exist_ok=True)\n if is_torch_tpu_available() and isinstance(self.model, PreTrainedModel):\n # Set an xla_device flag on the model's config.\n # We'll find a more elegant and not need to do this in the future.\n self.model.config.xla_device = True\n if not callable(self.data_collator) and callable(getattr(self.data_collator, \"collate_batch\", None)):\n raise ValueError(\"The `data_collator` should be a simple callable (function, class with `__call__`).\")\n\n if args.max_steps > 0:\n logger.info(\"max_steps is given, it will override any value given in num_train_epochs\")\n\n # Enforce rules on using datasets with no __len__\n if train_dataset is not None and not isinstance(train_dataset, collections.abc.Sized) and args.max_steps <= 0:\n raise ValueError(\"train_dataset does not implement __len__, max_steps has to be specified\")\n if eval_dataset is not None and not isinstance(eval_dataset, collections.abc.Sized):\n raise ValueError(\"eval_dataset must implement __len__\")\n\n if is_datasets_available():\n if isinstance(train_dataset, datasets.Dataset):\n self._remove_unused_columns(self.train_dataset, description=\"training\")\n if isinstance(eval_dataset, datasets.Dataset):\n self._remove_unused_columns(self.eval_dataset, description=\"evaluation\")\n\n # Setup Sharded DDP training\n self.sharded_dpp = False\n if args.sharded_ddp:\n if args.deepspeed:\n raise ValueError(\n \"Using --sharded_ddp together with --deepspeed is not possible, deactivate one of those flags.\"\n )\n\n if args.local_rank == -1:\n raise ValueError(\"Using sharded DDP only works in distributed training.\")\n elif not is_fairscale_available():\n raise ImportError(\"Sharded DDP training requires fairscale: `pip install fairscale`.\")\n else:\n self.sharded_dpp = True\n\n # Mixed precision setup\n self.use_apex = False\n self.use_amp = False\n self.fp16_backend = None\n\n if args.fp16:\n if args.fp16_backend == \"auto\":\n self.fp16_backend = \"amp\" if _is_native_amp_available else \"apex\"\n else:\n self.fp16_backend = args.fp16_backend\n logger.info(f\"Using {self.fp16_backend} fp16 backend\")\n\n if args.fp16 and not args.deepspeed: # deepspeed manages its own fp16\n if self.fp16_backend == \"amp\":\n self.use_amp = True\n self.scaler = ShardedGradScaler() if self.sharded_dpp else torch.cuda.amp.GradScaler()\n else:\n if not is_apex_available():\n raise ImportError(\n \"Using FP16 with APEX but APEX is not installed, please refer to https://www.github.com/nvidia/apex.\"\n )\n self.use_apex = True\n\n # Label smoothing\n if self.args.label_smoothing_factor != 0:\n self.label_smoother = LabelSmoother(epsilon=self.args.label_smoothing_factor)\n else:\n self.label_smoother = None\n\n self.state = TrainerState()\n self.control = TrainerControl()\n # Internal variable for total_flos used to count as tensors (for distributed + TPU), will be sent in the\n # state at each call to self.log.\n self._total_flos = None\n self.hp_search_backend = None\n self.use_tune_checkpoints = False\n default_label_names = (\n [\"start_positions\", \"end_positions\"]\n if type(self.model) in MODEL_FOR_QUESTION_ANSWERING_MAPPING.values()\n else [\"labels\"]\n )\n self.label_names = default_label_names if self.args.label_names is None else self.args.label_names\n self.control = self.callback_handler.on_init_end(self.args, self.state, self.control)\n\n def add_callback(self, callback):\n \"\"\"\n Add a callback to the current list of :class:`~transformer.TrainerCallback`.\n\n Args:\n callback (:obj:`type` or :class:`~transformer.TrainerCallback`):\n A :class:`~transformer.TrainerCallback` class or an instance of a :class:`~transformer.TrainerCallback`.\n In the first case, will instantiate a member of that class.\n \"\"\"\n self.callback_handler.add_callback(callback)\n\n def pop_callback(self, callback):\n \"\"\"\n Remove a callback from the current list of :class:`~transformer.TrainerCallback` and returns it.\n\n If the callback is not found, returns :obj:`None` (and no error is raised).\n\n Args:\n callback (:obj:`type` or :class:`~transformer.TrainerCallback`):\n A :class:`~transformer.TrainerCallback` class or an instance of a :class:`~transformer.TrainerCallback`.\n In the first case, will pop the first member of that class found in the list of callbacks.\n\n Returns:\n :class:`~transformer.TrainerCallback`: The callback removed, if found.\n \"\"\"\n return self.callback_handler.pop_callback(callback)\n\n def remove_callback(self, callback):\n \"\"\"\n Remove a callback from the current list of :class:`~transformer.TrainerCallback`.\n\n Args:\n callback (:obj:`type` or :class:`~transformer.TrainerCallback`):\n A :class:`~transformer.TrainerCallback` class or an instance of a :class:`~transformer.TrainerCallback`.\n In the first case, will remove the first member of that class found in the list of callbacks.\n \"\"\"\n self.callback_handler.remove_callback(callback)\n\n def _remove_unused_columns(self, dataset: \"datasets.Dataset\", description: Optional[str] = None):\n if not self.args.remove_unused_columns:\n return\n # Inspect model forward signature to keep only the arguments it accepts.\n signature = inspect.signature(self.model.forward)\n signature_columns = list(signature.parameters.keys())\n # Labels may be named label or label_ids, the default data collator handles that.\n signature_columns += [\"label\", \"label_ids\"]\n columns = [k for k in signature_columns if k in dataset.column_names]\n ignored_columns = list(set(dataset.column_names) - set(signature_columns))\n dset_description = \"\" if description is None else f\"in the {description} set \"\n logger.info(\n f\"The following columns {dset_description}don't have a corresponding argument in `{self.model.__class__.__name__}.forward` and have been ignored: {', '.join(ignored_columns)}.\"\n )\n dataset.set_format(type=dataset.format[\"type\"], columns=columns)\n\n def _get_train_sampler(self) -> Optional[torch.utils.data.sampler.Sampler]:\n if isinstance(self.train_dataset, torch.utils.data.IterableDataset) or not isinstance(\n self.train_dataset, collections.abc.Sized\n ):\n return None\n\n # Gather the number of processes and this process index.\n if self.args.parallel_mode == ParallelMode.TPU:\n num_processes = xm.xrt_world_size()\n process_index = xm.get_ordinal()\n elif self.args.parallel_mode == ParallelMode.DISTRIBUTED:\n num_processes = torch.distributed.get_world_size()\n process_index = torch.distributed.get_rank()\n else:\n num_processes = 1\n process_index = 0\n\n # Build the sampler.\n if self.args.group_by_length:\n if num_processes <= 1:\n return LengthGroupedSampler(self.train_dataset, self.args.train_batch_size)\n else:\n return DistributedLengthGroupedSampler(\n self.train_dataset, self.args.train_batch_size, num_replicas=num_processes, rank=process_index\n )\n\n else:\n if num_processes <= 1:\n return RandomSampler(self.train_dataset)\n else:\n return DistributedSampler(self.train_dataset, num_replicas=num_processes, rank=process_index)\n\n def get_train_dataloader(self) -> DataLoader:\n \"\"\"\n Returns the training :class:`~torch.utils.data.DataLoader`.\n\n Will use no sampler if :obj:`self.train_dataset` does not implement :obj:`__len__`, a random sampler (adapted\n to distributed training if necessary) otherwise.\n\n Subclass and override this method if you want to inject some custom behavior.\n \"\"\"\n if self.train_dataset is None:\n raise ValueError(\"Trainer: training requires a train_dataset.\")\n train_sampler = self._get_train_sampler()\n\n return DataLoader(\n self.train_dataset,\n batch_size=self.args.train_batch_size,\n sampler=train_sampler,\n collate_fn=self.data_collator,\n drop_last=self.args.dataloader_drop_last,\n num_workers=self.args.dataloader_num_workers,\n )\n\n def _get_eval_sampler(self, eval_dataset: Dataset) -> Optional[torch.utils.data.sampler.Sampler]:\n if is_torch_tpu_available():\n return SequentialDistributedSampler(eval_dataset, num_replicas=xm.xrt_world_size(), rank=xm.get_ordinal())\n elif self.args.local_rank != -1:\n return SequentialDistributedSampler(eval_dataset)\n else:\n return SequentialSampler(eval_dataset)\n\n def get_eval_dataloader(self, eval_dataset: Optional[Dataset] = None) -> DataLoader:\n \"\"\"\n Returns the evaluation :class:`~torch.utils.data.DataLoader`.\n\n Subclass and override this method if you want to inject some custom behavior.\n\n Args:\n eval_dataset (:obj:`torch.utils.data.dataset.Dataset`, `optional`):\n If provided, will override :obj:`self.eval_dataset`. If it is an :obj:`datasets.Dataset`, columns not\n accepted by the ``model.forward()`` method are automatically removed. It must implement :obj:`__len__`.\n \"\"\"\n if eval_dataset is None and self.eval_dataset is None:\n raise ValueError(\"Trainer: evaluation requires an eval_dataset.\")\n elif eval_dataset is not None and not isinstance(eval_dataset, collections.abc.Sized):\n raise ValueError(\"eval_dataset must implement __len__\")\n elif is_datasets_available() and isinstance(eval_dataset, datasets.Dataset):\n self._remove_unused_columns(eval_dataset, description=\"evaluation\")\n eval_dataset = eval_dataset if eval_dataset is not None else self.eval_dataset\n eval_sampler = self._get_eval_sampler(eval_dataset)\n\n return DataLoader(\n eval_dataset,\n sampler=eval_sampler,\n batch_size=self.args.eval_batch_size,\n collate_fn=self.data_collator,\n drop_last=self.args.dataloader_drop_last,\n num_workers=self.args.dataloader_num_workers,\n )\n\n def get_test_dataloader(self, test_dataset: Dataset) -> DataLoader:\n \"\"\"\n Returns the test :class:`~torch.utils.data.DataLoader`.\n\n Subclass and override this method if you want to inject some custom behavior.\n\n Args:\n test_dataset (:obj:`torch.utils.data.dataset.Dataset`, `optional`):\n The test dataset to use. If it is an :obj:`datasets.Dataset`, columns not accepted by the\n ``model.forward()`` method are automatically removed. It must implement :obj:`__len__`.\n \"\"\"\n if not isinstance(test_dataset, collections.abc.Sized):\n raise ValueError(\"test_dataset must implement __len__\")\n elif is_datasets_available() and isinstance(test_dataset, datasets.Dataset):\n self._remove_unused_columns(test_dataset, description=\"test\")\n test_sampler = self._get_eval_sampler(test_dataset)\n\n # We use the same batch_size as for eval.\n return DataLoader(\n test_dataset,\n sampler=test_sampler,\n batch_size=self.args.eval_batch_size,\n collate_fn=self.data_collator,\n drop_last=self.args.dataloader_drop_last,\n )\n\n def create_optimizer_and_scheduler(self, num_training_steps: int):\n \"\"\"\n Setup the optimizer and the learning rate scheduler.\n\n We provide a reasonable default that works well. If you want to use something else, you can pass a tuple in the\n Trainer's init through :obj:`optimizers`, or subclass and override this method in a subclass.\n \"\"\"\n if self.optimizer is None:\n no_decay = [\"bias\", \"LayerNorm.weight\"]\n optimizer_grouped_parameters = [\n {\n \"params\": [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay)],\n \"weight_decay\": self.args.weight_decay,\n },\n {\n \"params\": [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay)],\n \"weight_decay\": 0.0,\n },\n ]\n optimizer_cls = Adafactor if self.args.adafactor else AdamW\n if self.args.adafactor:\n optimizer_cls = Adafactor\n optimizer_kwargs = {\"scale_parameter\": False, \"relative_step\": False}\n else:\n optimizer_cls = AdamW\n optimizer_kwargs = {\n \"betas\": (self.args.adam_beta1, self.args.adam_beta2),\n \"eps\": self.args.adam_epsilon,\n }\n optimizer_kwargs[\"lr\"] = self.args.learning_rate\n if self.sharded_dpp:\n self.optimizer = OSS(\n params=optimizer_grouped_parameters,\n optim=optimizer_cls,\n **optimizer_kwargs,\n )\n else:\n self.optimizer = optimizer_cls(optimizer_grouped_parameters, **optimizer_kwargs)\n\n if self.lr_scheduler is None:\n self.lr_scheduler = get_scheduler(\n self.args.lr_scheduler_type,\n self.optimizer,\n num_warmup_steps=self.args.warmup_steps,\n num_training_steps=num_training_steps,\n )\n\n def num_examples(self, dataloader: DataLoader) -> int:\n \"\"\"\n Helper to get number of samples in a :class:`~torch.utils.data.DataLoader` by accessing its dataset.\n\n Will raise an exception if the underlying dataset dese not implement method :obj:`__len__`\n \"\"\"\n return len(dataloader.dataset)\n\n def _hp_search_setup(self, trial: Union[\"optuna.Trial\", Dict[str, Any]]):\n \"\"\" HP search setup code \"\"\"\n self._trial = trial\n\n if self.hp_search_backend is None or trial is None:\n return\n\n params = self.hp_space(trial) if self.hp_search_backend == HPSearchBackend.OPTUNA else trial\n for key, value in params.items():\n if not hasattr(self.args, key):\n raise AttributeError(\n f\"Trying to set {key} in the hyperparameter search but there is no corresponding field in `TrainingArguments`.\"\n )\n old_attr = getattr(self.args, key, None)\n # Casting value to the proper type\n if old_attr is not None:\n value = type(old_attr)(value)\n setattr(self.args, key, value)\n if self.hp_search_backend == HPSearchBackend.OPTUNA:\n logger.info(\"Trial:\", trial.params)\n\n def _report_to_hp_search(\n self, trial: Union[\"optuna.Trial\", Dict[str, Any]], epoch: int, metrics: Dict[str, float]\n ):\n if self.hp_search_backend is None or trial is None:\n return\n self.objective = self.compute_objective(metrics.copy())\n if self.hp_search_backend == HPSearchBackend.OPTUNA:\n import optuna\n\n trial.report(self.objective, epoch)\n if trial.should_prune():\n raise optuna.TrialPruned()\n elif self.hp_search_backend == HPSearchBackend.RAY:\n from ray import tune\n\n if self.state.global_step % self.args.save_steps == 0:\n self._tune_save_checkpoint()\n tune.report(objective=self.objective, **metrics)\n\n def _tune_save_checkpoint(self):\n from ray import tune\n\n if not self.use_tune_checkpoints:\n return\n with tune.checkpoint_dir(step=self.state.global_step) as checkpoint_dir:\n self.args.output_dir = checkpoint_dir\n output_dir = os.path.join(self.args.output_dir, f\"{PREFIX_CHECKPOINT_DIR}-{self.state.global_step}\")\n self.save_model(output_dir)\n if self.is_world_process_zero():\n self.state.save_to_json(os.path.join(output_dir, \"trainer_state.json\"))\n torch.save(self.optimizer.state_dict(), os.path.join(output_dir, \"optimizer.pt\"))\n torch.save(self.lr_scheduler.state_dict(), os.path.join(output_dir, \"scheduler.pt\"))\n\n def call_model_init(self, trial=None):\n model_init_argcount = len(inspect.signature(self.model_init).parameters)\n if model_init_argcount == 0:\n model = self.model_init()\n elif model_init_argcount == 1:\n model = self.model_init(trial)\n else:\n raise RuntimeError(\"model_init should have 0 or 1 argument.\")\n\n if model is None:\n raise RuntimeError(\"model_init should not return None.\")\n\n return model\n\n def train(self, model_path: Optional[str] = None, trial: Union[\"optuna.Trial\", Dict[str, Any]] = None):\n \"\"\"\n Main training entry point.\n\n Args:\n model_path (:obj:`str`, `optional`):\n Local path to the model if the model to train has been instantiated from a local path. If present,\n training will resume from the optimizer/scheduler states loaded here.\n trial (:obj:`optuna.Trial` or :obj:`Dict[str, Any]`, `optional`):\n The trial run or the hyperparameter dictionary for hyperparameter search.\n \"\"\"\n # This might change the seed so needs to run first.\n self._hp_search_setup(trial)\n\n # Model re-init\n if self.model_init is not None:\n # Seed must be set before instantiating the model when using model_init.\n set_seed(self.args.seed)\n\n model = self.call_model_init(trial)\n if not self.is_model_parallel:\n model = model.to(self.args.device)\n\n self.model = model\n self.model_wrapped = model\n\n # Reinitializes optimizer and scheduler\n self.optimizer, self.lr_scheduler = None, None\n\n # Keeping track whether we can can len() on the dataset or not\n train_dataset_is_sized = isinstance(self.train_dataset, collections.abc.Sized)\n\n # Data loader and number of training steps\n train_dataloader = self.get_train_dataloader()\n\n # Setting up training control variables:\n # number of training epochs: num_train_epochs\n # number of training steps per epoch: num_update_steps_per_epoch\n # total number of training steps to execute: max_steps\n if train_dataset_is_sized:\n num_update_steps_per_epoch = len(train_dataloader) // self.args.gradient_accumulation_steps\n num_update_steps_per_epoch = max(num_update_steps_per_epoch, 1)\n if self.args.max_steps > 0:\n max_steps = self.args.max_steps\n num_train_epochs = self.args.max_steps // num_update_steps_per_epoch + int(\n self.args.max_steps % num_update_steps_per_epoch > 0\n )\n else:\n max_steps = math.ceil(self.args.num_train_epochs * num_update_steps_per_epoch)\n num_train_epochs = math.ceil(self.args.num_train_epochs)\n else:\n # see __init__. max_steps is set when the dataset has no __len__\n max_steps = self.args.max_steps\n num_train_epochs = 1\n num_update_steps_per_epoch = max_steps\n\n if self.args.deepspeed:\n model, optimizer, lr_scheduler = init_deepspeed(self, num_training_steps=max_steps)\n self.model = model.module\n self.model_wrapped = model # will get further wrapped in DDP\n self.deepspeed = model # DeepSpeedEngine object\n self.optimizer = optimizer\n self.lr_scheduler = lr_scheduler\n else:\n self.create_optimizer_and_scheduler(num_training_steps=max_steps)\n\n self.state = TrainerState()\n self.state.is_hyper_param_search = trial is not None\n\n # Check if saved optimizer or scheduler states exist\n self._load_optimizer_and_scheduler(model_path)\n\n model = self.model_wrapped\n\n # Mixed precision training with apex (torch < 1.6)\n if self.use_apex:\n model, self.optimizer = amp.initialize(model, self.optimizer, opt_level=self.args.fp16_opt_level)\n\n # Multi-gpu training (should be after apex fp16 initialization)\n if self.args.n_gpu > 1:\n model = torch.nn.DataParallel(model)\n\n # Distributed training (should be after apex fp16 initialization)\n if self.sharded_dpp:\n model = ShardedDDP(model, self.optimizer)\n elif self.args.local_rank != -1:\n model = torch.nn.parallel.DistributedDataParallel(\n model,\n device_ids=[self.args.local_rank],\n output_device=self.args.local_rank,\n find_unused_parameters=(\n not getattr(model.config, \"gradient_checkpointing\", False)\n if isinstance(model, PreTrainedModel)\n else True\n ),\n )\n # find_unused_parameters breaks checkpointing as per\n # https://github.com/huggingface/transformers/pull/4659#issuecomment-643356021\n\n # for the rest of this function `model` is the outside model, whether it was wrapped or not\n if model is not self.model:\n self.model_wrapped = model\n\n # important: at this point:\n # self.model is the Transformers Model\n # self.model_wrapped is DDP(Transformers Model), DDP(Deepspeed(Transformers Model)), etc.\n\n # Train!\n if is_torch_tpu_available():\n total_train_batch_size = self.args.train_batch_size * xm.xrt_world_size()\n else:\n total_train_batch_size = (\n self.args.train_batch_size\n * self.args.gradient_accumulation_steps\n * (torch.distributed.get_world_size() if self.args.local_rank != -1 else 1)\n )\n\n num_examples = (\n self.num_examples(train_dataloader)\n if train_dataset_is_sized\n else total_train_batch_size * self.args.max_steps\n )\n\n logger.info(\"***** Running training *****\")\n logger.info(f\" Num examples = {num_examples}\")\n logger.info(f\" Num Epochs = {num_train_epochs}\")\n logger.info(f\" Instantaneous batch size per device = {self.args.per_device_train_batch_size}\")\n logger.info(f\" Total train batch size (w. parallel, distributed & accumulation) = {total_train_batch_size}\")\n logger.info(f\" Gradient Accumulation steps = {self.args.gradient_accumulation_steps}\")\n logger.info(f\" Total optimization steps = {max_steps}\")\n\n self.state.epoch = 0\n start_time = time.time()\n epochs_trained = 0\n steps_trained_in_current_epoch = 0\n\n # Check if continuing training from a checkpoint\n if model_path and os.path.isfile(os.path.join(model_path, \"trainer_state.json\")):\n self.state = TrainerState.load_from_json(os.path.join(model_path, \"trainer_state.json\"))\n epochs_trained = self.state.global_step // num_update_steps_per_epoch\n if not self.args.ignore_data_skip:\n steps_trained_in_current_epoch = self.state.global_step % (num_update_steps_per_epoch)\n steps_trained_in_current_epoch *= self.args.gradient_accumulation_steps\n else:\n steps_trained_in_current_epoch = 0\n\n logger.info(\" Continuing training from checkpoint, will skip to saved global_step\")\n logger.info(f\" Continuing training from epoch {epochs_trained}\")\n logger.info(f\" Continuing training from global step {self.state.global_step}\")\n if not self.args.ignore_data_skip:\n logger.info(\n f\" Will skip the first {epochs_trained} epochs then the first {steps_trained_in_current_epoch} \"\n \"batches in the first epoch.\"\n )\n\n # Update the references\n self.callback_handler.model = self.model\n self.callback_handler.optimizer = self.optimizer\n self.callback_handler.lr_scheduler = self.lr_scheduler\n self.callback_handler.train_dataloader = train_dataloader\n self.state.trial_name = self.hp_name(trial) if self.hp_name is not None else None\n self.state.trial_params = hp_params(trial) if trial is not None else None\n # This should be the same if the state has been saved but in case the training arguments changed, it's safer\n # to set this after the load.\n self.state.max_steps = max_steps\n self.state.num_train_epochs = num_train_epochs\n self.state.is_local_process_zero = self.is_local_process_zero()\n self.state.is_world_process_zero = self.is_world_process_zero()\n\n # tr_loss is a tensor to avoid synchronization of TPUs through .item()\n tr_loss = torch.tensor(0.0).to(self.args.device)\n # _total_loss_scalar is updated everytime .item() has to be called on tr_loss and stores the sum of all losses\n self._total_loss_scalar = 0.0\n self._globalstep_last_logged = 0\n self._total_flos = self.state.total_flos\n model.zero_grad()\n\n self.control = self.callback_handler.on_train_begin(self.args, self.state, self.control)\n\n # Skip the first epochs_trained epochs to get the random state of the dataloader at the right point.\n if not self.args.ignore_data_skip:\n for epoch in range(epochs_trained):\n # We just need to begin an iteration to create the randomization of the sampler.\n for _ in train_dataloader:\n break\n\n for epoch in range(epochs_trained, num_train_epochs):\n if isinstance(train_dataloader, DataLoader) and isinstance(train_dataloader.sampler, DistributedSampler):\n train_dataloader.sampler.set_epoch(epoch)\n\n if is_torch_tpu_available():\n parallel_loader = pl.ParallelLoader(train_dataloader, [self.args.device]).per_device_loader(\n self.args.device\n )\n epoch_iterator = parallel_loader\n else:\n epoch_iterator = train_dataloader\n\n # Reset the past mems state at the beginning of each epoch if necessary.\n if self.args.past_index >= 0:\n self._past = None\n\n steps_in_epoch = len(epoch_iterator) if train_dataset_is_sized else self.args.max_steps\n self.control = self.callback_handler.on_epoch_begin(self.args, self.state, self.control)\n\n for step, inputs in enumerate(epoch_iterator):\n\n # Skip past any already trained steps if resuming training\n if steps_trained_in_current_epoch > 0:\n steps_trained_in_current_epoch -= 1\n continue\n\n if (step + 1) % self.args.gradient_accumulation_steps == 0:\n self.control = self.callback_handler.on_step_begin(self.args, self.state, self.control)\n\n if ((step + 1) % self.args.gradient_accumulation_steps != 0) and self.args.local_rank != -1:\n # Avoid unnecessary DDP synchronization since there will be no backward pass on this example.\n with model.no_sync():\n tr_loss += self.training_step(model, inputs)\n else:\n tr_loss += self.training_step(model, inputs)\n self._total_flos += self.floating_point_ops(inputs)\n\n if (step + 1) % self.args.gradient_accumulation_steps == 0 or (\n # last step in epoch but step is always smaller than gradient_accumulation_steps\n steps_in_epoch <= self.args.gradient_accumulation_steps\n and (step + 1) == steps_in_epoch\n ):\n # Gradient clipping\n if self.args.max_grad_norm is not None and self.args.max_grad_norm > 0 and not self.deepspeed:\n # deepspeed does its own clipping\n\n if self.use_amp:\n # AMP: gradients need unscaling\n self.scaler.unscale_(self.optimizer)\n\n if hasattr(self.optimizer, \"clip_grad_norm\"):\n # Some optimizers (like the sharded optimizer) have a specific way to do gradient clipping\n self.optimizer.clip_grad_norm(self.args.max_grad_norm)\n else:\n # Revert to normal clipping otherwise, handling Apex or full precision\n torch.nn.utils.clip_grad_norm_(\n amp.master_params(self.optimizer) if self.use_apex else model.parameters(),\n self.args.max_grad_norm,\n )\n\n # Optimizer step\n if self.deepspeed:\n self.deepspeed.step()\n elif is_torch_tpu_available():\n xm.optimizer_step(self.optimizer)\n elif self.use_amp:\n self.scaler.step(self.optimizer)\n self.scaler.update()\n else:\n self.optimizer.step()\n\n self.lr_scheduler.step()\n model.zero_grad()\n self.state.global_step += 1\n self.state.epoch = epoch + (step + 1) / steps_in_epoch\n self.control = self.callback_handler.on_step_end(self.args, self.state, self.control)\n\n self._maybe_log_save_evaluate(tr_loss, model, trial, epoch)\n\n if self.control.should_epoch_stop or self.control.should_training_stop:\n break\n\n self.control = self.callback_handler.on_epoch_end(self.args, self.state, self.control)\n self._maybe_log_save_evaluate(tr_loss, model, trial, epoch)\n\n if self.args.tpu_metrics_debug or self.args.debug:\n if is_torch_tpu_available():\n # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)\n xm.master_print(met.metrics_report())\n else:\n logger.warning(\n \"You enabled PyTorch/XLA debug metrics but you don't have a TPU \"\n \"configured. Check your training configuration if this is unexpected.\"\n )\n if self.control.should_training_stop:\n break\n\n if self.args.past_index and hasattr(self, \"_past\"):\n # Clean the state at the end of training\n delattr(self, \"_past\")\n\n logger.info(\"\\n\\nTraining completed. Do not forget to share your model on huggingface.co/models =)\\n\\n\")\n if self.args.load_best_model_at_end and self.state.best_model_checkpoint is not None:\n logger.info(\n f\"Loading best model from {self.state.best_model_checkpoint} (score: {self.state.best_metric}).\"\n )\n if isinstance(self.model, PreTrainedModel):\n self.model = self.model.from_pretrained(self.state.best_model_checkpoint)\n if not self.is_model_parallel:\n self.model = self.model.to(self.args.device)\n else:\n state_dict = torch.load(os.path.join(self.state.best_model_checkpoint, WEIGHTS_NAME))\n self.model.load_state_dict(state_dict)\n\n if self.deepspeed:\n self.deepspeed.load_checkpoint(\n self.state.best_model_checkpoint, load_optimizer_states=False, load_lr_scheduler_states=False\n )\n\n metrics = speed_metrics(\"train\", start_time, self.state.max_steps)\n if self._total_flos is not None:\n self.store_flos()\n metrics[\"total_flos\"] = self.state.total_flos\n self.log(metrics)\n\n self.control = self.callback_handler.on_train_end(self.args, self.state, self.control)\n # add remaining tr_loss\n self._total_loss_scalar += tr_loss.item()\n\n return TrainOutput(self.state.global_step, self._total_loss_scalar / self.state.global_step, metrics)\n\n def _maybe_log_save_evaluate(self, tr_loss, model, trial, epoch):\n if self.control.should_log:\n logs: Dict[str, float] = {}\n tr_loss_scalar = tr_loss.item()\n # reset tr_loss to zero\n tr_loss -= tr_loss\n\n logs[\"loss\"] = round(tr_loss_scalar / (self.state.global_step - self._globalstep_last_logged), 4)\n # backward compatibility for pytorch schedulers\n logs[\"learning_rate\"] = (\n self.lr_scheduler.get_last_lr()[0]\n if version.parse(torch.__version__) >= version.parse(\"1.4\")\n else self.lr_scheduler.get_lr()[0]\n )\n self._total_loss_scalar += tr_loss_scalar\n self._globalstep_last_logged = self.state.global_step\n\n self.log(logs)\n\n metrics = None\n if self.control.should_evaluate:\n metrics = self.evaluate()\n self._report_to_hp_search(trial, epoch, metrics)\n\n if self.control.should_save:\n self._save_checkpoint(model, trial, metrics=metrics)\n self.control = self.callback_handler.on_save(self.args, self.state, self.control)\n\n def _save_checkpoint(self, model, trial, metrics=None):\n # In all cases, including ddp/dp/deepspeed, self.model is always a reference to the model we\n # want to save.\n assert _model_unwrap(model) is self.model, \"internal model should be a reference to self.model\"\n\n # Save model checkpoint\n checkpoint_folder = f\"{PREFIX_CHECKPOINT_DIR}-{self.state.global_step}\"\n\n if self.hp_search_backend is not None and trial is not None:\n if self.hp_search_backend == HPSearchBackend.OPTUNA:\n run_id = trial.number\n else:\n from ray import tune\n\n run_id = tune.get_trial_id()\n run_name = self.hp_name(trial) if self.hp_name is not None else f\"run-{run_id}\"\n output_dir = os.path.join(self.args.output_dir, run_name, checkpoint_folder)\n else:\n output_dir = os.path.join(self.args.output_dir, checkpoint_folder)\n\n self.store_flos()\n\n self.save_model(output_dir)\n if self.deepspeed:\n self.deepspeed.save_checkpoint(output_dir)\n\n # Save optimizer and scheduler\n if self.sharded_dpp:\n self.optimizer.consolidate_state_dict()\n\n if is_torch_tpu_available():\n xm.rendezvous(\"saving_optimizer_states\")\n xm.save(self.optimizer.state_dict(), os.path.join(output_dir, \"optimizer.pt\"))\n with warnings.catch_warnings(record=True) as caught_warnings:\n xm.save(self.lr_scheduler.state_dict(), os.path.join(output_dir, \"scheduler.pt\"))\n reissue_pt_warnings(caught_warnings)\n elif self.is_world_process_zero() and not self.deepspeed:\n # deepspeed.save_checkpoint above saves model/optim/sched\n torch.save(self.optimizer.state_dict(), os.path.join(output_dir, \"optimizer.pt\"))\n with warnings.catch_warnings(record=True) as caught_warnings:\n torch.save(self.lr_scheduler.state_dict(), os.path.join(output_dir, \"scheduler.pt\"))\n reissue_pt_warnings(caught_warnings)\n\n # Determine the new best metric / best model checkpoint\n if metrics is not None and self.args.metric_for_best_model is not None:\n metric_to_check = self.args.metric_for_best_model\n if not metric_to_check.startswith(\"eval_\"):\n metric_to_check = f\"eval_{metric_to_check}\"\n metric_value = metrics[metric_to_check]\n\n operator = np.greater if self.args.greater_is_better else np.less\n if (\n self.state.best_metric is None\n or self.state.best_model_checkpoint is None\n or operator(metric_value, self.state.best_metric)\n ):\n self.state.best_metric = metric_value\n self.state.best_model_checkpoint = output_dir\n\n # Save the Trainer state\n if self.is_world_process_zero():\n self.state.save_to_json(os.path.join(output_dir, \"trainer_state.json\"))\n\n # Maybe delete some older checkpoints.\n if self.is_world_process_zero():\n self._rotate_checkpoints(use_mtime=True)\n\n def _load_optimizer_and_scheduler(self, model_path):\n \"\"\"If optimizer and scheduler states exist, load them.\"\"\"\n if model_path is None:\n return\n\n if os.path.isfile(os.path.join(model_path, \"optimizer.pt\")) and os.path.isfile(\n os.path.join(model_path, \"scheduler.pt\")\n ):\n # Load in optimizer and scheduler states\n if is_torch_tpu_available():\n # On TPU we have to take some extra precautions to properly load the states on the right device.\n optimizer_state = torch.load(os.path.join(model_path, \"optimizer.pt\"), map_location=\"cpu\")\n with warnings.catch_warnings(record=True) as caught_warnings:\n lr_scheduler_state = torch.load(os.path.join(model_path, \"scheduler.pt\"), map_location=\"cpu\")\n reissue_pt_warnings(caught_warnings)\n\n xm.send_cpu_data_to_device(optimizer_state, self.args.device)\n xm.send_cpu_data_to_device(lr_scheduler_state, self.args.device)\n\n self.optimizer.load_state_dict(optimizer_state)\n self.lr_scheduler.load_state_dict(lr_scheduler_state)\n else:\n self.optimizer.load_state_dict(\n torch.load(os.path.join(model_path, \"optimizer.pt\"), map_location=self.args.device)\n )\n with warnings.catch_warnings(record=True) as caught_warnings:\n self.lr_scheduler.load_state_dict(torch.load(os.path.join(model_path, \"scheduler.pt\")))\n reissue_pt_warnings(caught_warnings)\n\n if self.deepspeed:\n # Not sure how to check if there is a saved deepspeed checkpoint, but since it just return None if it fails to find a deepspeed checkpoint this is sort of a check-n-load function\n self.deepspeed.load_checkpoint(model_path, load_optimizer_states=True, load_lr_scheduler_states=True)\n\n def hyperparameter_search(\n self,\n hp_space: Optional[Callable[[\"optuna.Trial\"], Dict[str, float]]] = None,\n compute_objective: Optional[Callable[[Dict[str, float]], float]] = None,\n n_trials: int = 20,\n direction: str = \"minimize\",\n backend: Optional[Union[\"str\", HPSearchBackend]] = None,\n hp_name: Optional[Callable[[\"optuna.Trial\"], str]] = None,\n **kwargs\n ) -> BestRun:\n \"\"\"\n Launch an hyperparameter search using ``optuna`` or ``Ray Tune``. The optimized quantity is determined by\n :obj:`compute_objective`, which defaults to a function returning the evaluation loss when no metric is provided,\n the sum of all metrics otherwise.\n\n .. warning::\n\n To use this method, you need to have provided a ``model_init`` when initializing your\n :class:`~transformers.Trainer`: we need to reinitialize the model at each new run. This is incompatible\n with the ``optimizers`` argument, so you need to subclass :class:`~transformers.Trainer` and override the\n method :meth:`~transformers.Trainer.create_optimizer_and_scheduler` for custom optimizer/scheduler.\n\n Args:\n hp_space (:obj:`Callable[[\"optuna.Trial\"], Dict[str, float]]`, `optional`):\n A function that defines the hyperparameter search space. Will default to\n :func:`~transformers.trainer_utils.default_hp_space_optuna` or\n :func:`~transformers.trainer_utils.default_hp_space_ray` depending on your backend.\n compute_objective (:obj:`Callable[[Dict[str, float]], float]`, `optional`):\n A function computing the objective to minimize or maximize from the metrics returned by the\n :obj:`evaluate` method. Will default to :func:`~transformers.trainer_utils.default_compute_objective`.\n n_trials (:obj:`int`, `optional`, defaults to 100):\n The number of trial runs to test.\n direction(:obj:`str`, `optional`, defaults to :obj:`\"minimize\"`):\n Whether to optimize greater or lower objects. Can be :obj:`\"minimize\"` or :obj:`\"maximize\"`, you should\n pick :obj:`\"minimize\"` when optimizing the validation loss, :obj:`\"maximize\"` when optimizing one or\n several metrics.\n backend(:obj:`str` or :class:`~transformers.training_utils.HPSearchBackend`, `optional`):\n The backend to use for hyperparameter search. Will default to optuna or Ray Tune, depending on which\n one is installed. If both are installed, will default to optuna.\n kwargs:\n Additional keyword arguments passed along to :obj:`optuna.create_study` or :obj:`ray.tune.run`. For\n more information see:\n\n - the documentation of `optuna.create_study\n <https://optuna.readthedocs.io/en/stable/reference/alias_generated/optuna.create_study.html#optuna.create_study>`__\n - the documentation of `tune.run\n <https://docs.ray.io/en/latest/tune/api_docs/execution.html#tune-run>`__\n\n Returns:\n :class:`transformers.trainer_utils.BestRun`: All the information about the best run.\n \"\"\"\n if backend is None:\n backend = default_hp_search_backend()\n if backend is None:\n raise RuntimeError(\n \"At least one of optuna or ray should be installed. \"\n \"To install optuna run `pip install optuna`.\"\n \"To install ray run `pip install ray[tune]`.\"\n )\n backend = HPSearchBackend(backend)\n if backend == HPSearchBackend.OPTUNA and not is_optuna_available():\n raise RuntimeError(\"You picked the optuna backend, but it is not installed. Use `pip install optuna`.\")\n if backend == HPSearchBackend.RAY and not is_ray_tune_available():\n raise RuntimeError(\n \"You picked the Ray Tune backend, but it is not installed. Use `pip install 'ray[tune]'`.\"\n )\n self.hp_search_backend = backend\n if self.model_init is None:\n raise RuntimeError(\n \"To use hyperparameter search, you need to pass your model through a model_init function.\"\n )\n\n self.hp_space = default_hp_space[backend] if hp_space is None else hp_space\n self.hp_name = hp_name\n self.compute_objective = default_compute_objective if compute_objective is None else compute_objective\n\n run_hp_search = run_hp_search_optuna if backend == HPSearchBackend.OPTUNA else run_hp_search_ray\n best_run = run_hp_search(self, n_trials, direction, **kwargs)\n\n self.hp_search_backend = None\n return best_run\n\n def log(self, logs: Dict[str, float]) -> None:\n \"\"\"\n Log :obj:`logs` on the various objects watching training.\n\n Subclass and override this method to inject custom behavior.\n\n Args:\n logs (:obj:`Dict[str, float]`):\n The values to log.\n \"\"\"\n if self.state.epoch is not None:\n logs[\"epoch\"] = round(self.state.epoch, 2)\n\n self.control = self.callback_handler.on_log(self.args, self.state, self.control, logs)\n output = {**logs, **{\"step\": self.state.global_step}}\n self.state.log_history.append(output)\n\n def _prepare_inputs(self, inputs: Dict[str, Union[torch.Tensor, Any]]) -> Dict[str, Union[torch.Tensor, Any]]:\n \"\"\"\n Prepare :obj:`inputs` before feeding them to the model, converting them to tensors if they are not already and\n handling potential state.\n \"\"\"\n for k, v in inputs.items():\n if isinstance(v, torch.Tensor):\n inputs[k] = v.to(self.args.device)\n\n if self.args.past_index >= 0 and self._past is not None:\n inputs[\"mems\"] = self._past\n\n return inputs\n\n def training_step(self, model: nn.Module, inputs: Dict[str, Union[torch.Tensor, Any]]) -> torch.Tensor:\n \"\"\"\n Perform a training step on a batch of inputs.\n\n Subclass and override to inject custom behavior.\n\n Args:\n model (:obj:`nn.Module`):\n The model to train.\n inputs (:obj:`Dict[str, Union[torch.Tensor, Any]]`):\n The inputs and targets of the model.\n\n The dictionary will be unpacked before being fed to the model. Most models expect the targets under the\n argument :obj:`labels`. Check your model's documentation for all accepted arguments.\n\n Return:\n :obj:`torch.Tensor`: The tensor with training loss on this batch.\n \"\"\"\n\n model.train()\n inputs = self._prepare_inputs(inputs)\n\n if self.use_amp:\n with autocast():\n loss = self.compute_loss(model, inputs)\n else:\n loss = self.compute_loss(model, inputs)\n\n if self.args.n_gpu > 1:\n loss = loss.mean() # mean() to average on multi-gpu parallel training\n\n if self.args.gradient_accumulation_steps > 1:\n loss = loss / self.args.gradient_accumulation_steps\n\n if self.use_amp:\n self.scaler.scale(loss).backward()\n elif self.use_apex:\n with amp.scale_loss(loss, self.optimizer) as scaled_loss:\n scaled_loss.backward()\n elif self.deepspeed:\n self.deepspeed.backward(loss)\n else:\n loss.backward()\n\n return loss.detach()\n\n def compute_loss(self, model, inputs):\n \"\"\"\n How the loss is computed by Trainer. By default, all models return the loss in the first element.\n\n Subclass and override for custom behavior.\n \"\"\"\n if self.label_smoother is not None and \"labels\" in inputs:\n labels = inputs.pop(\"labels\")\n else:\n labels = None\n outputs = model(**inputs)\n # Save past state if it exists\n # TODO: this needs to be fixed and made cleaner later.\n if self.args.past_index >= 0:\n self._past = outputs[self.args.past_index]\n\n if labels is not None:\n return self.label_smoother(outputs, labels)\n else:\n # We don't use .loss here since the model may return tuples instead of ModelOutput.\n return outputs[\"loss\"] if isinstance(outputs, dict) else outputs[0]\n\n def is_local_process_zero(self) -> bool:\n \"\"\"\n Whether or not this process is the local (e.g., on one machine if training in a distributed fashion on several\n machines) main process.\n \"\"\"\n if is_torch_tpu_available():\n return xm.is_master_ordinal(local=True)\n else:\n return self.args.local_rank in [-1, 0]\n\n def is_world_process_zero(self) -> bool:\n \"\"\"\n Whether or not this process is the global main process (when training in a distributed fashion on several\n machines, this is only going to be :obj:`True` for one process).\n \"\"\"\n if is_torch_tpu_available():\n return xm.is_master_ordinal(local=False)\n else:\n return self.args.local_rank == -1 or torch.distributed.get_rank() == 0\n\n def save_model(self, output_dir: Optional[str] = None):\n \"\"\"\n Will save the model, so you can reload it using :obj:`from_pretrained()`.\n\n Will only save from the world_master process (unless in TPUs).\n \"\"\"\n\n if is_torch_tpu_available():\n self._save_tpu(output_dir)\n elif self.is_world_process_zero():\n self._save(output_dir)\n\n def _save_tpu(self, output_dir: Optional[str] = None):\n output_dir = output_dir if output_dir is not None else self.args.output_dir\n logger.info(\"Saving model checkpoint to %s\", output_dir)\n\n if xm.is_master_ordinal():\n os.makedirs(output_dir, exist_ok=True)\n torch.save(self.args, os.path.join(output_dir, \"training_args.bin\"))\n\n # Save a trained model and configuration using `save_pretrained()`.\n # They can then be reloaded using `from_pretrained()`\n xm.rendezvous(\"saving_checkpoint\")\n if not isinstance(self.model, PreTrainedModel):\n logger.info(\"Trainer.model is not a `PreTrainedModel`, only saving its state dict.\")\n state_dict = self.model.state_dict()\n xm.save(state_dict, os.path.join(output_dir, WEIGHTS_NAME))\n else:\n self.model.save_pretrained(output_dir)\n if self.tokenizer is not None and self.is_world_process_zero():\n self.tokenizer.save_pretrained(output_dir)\n\n def _save(self, output_dir: Optional[str] = None):\n output_dir = output_dir if output_dir is not None else self.args.output_dir\n os.makedirs(output_dir, exist_ok=True)\n logger.info(\"Saving model checkpoint to %s\", output_dir)\n # Save a trained model and configuration using `save_pretrained()`.\n # They can then be reloaded using `from_pretrained()`\n if not isinstance(self.model, PreTrainedModel):\n logger.info(\"Trainer.model is not a `PreTrainedModel`, only saving its state dict.\")\n state_dict = self.model.state_dict()\n torch.save(state_dict, os.path.join(output_dir, WEIGHTS_NAME))\n else:\n self.model.save_pretrained(output_dir)\n if self.tokenizer is not None and self.is_world_process_zero():\n self.tokenizer.save_pretrained(output_dir)\n\n # Good practice: save your training arguments together with the trained model\n torch.save(self.args, os.path.join(output_dir, \"training_args.bin\"))\n\n def store_flos(self):\n # Storing the number of floating-point operations that went into the model\n if self._total_flos is not None:\n if self.args.local_rank != -1:\n self.state.total_flos = distributed_broadcast_scalars([self._total_flos]).sum().item()\n else:\n self.state.total_flos = self._total_flos\n\n def _sorted_checkpoints(self, checkpoint_prefix=PREFIX_CHECKPOINT_DIR, use_mtime=False) -> List[str]:\n ordering_and_checkpoint_path = []\n\n glob_checkpoints = [str(x) for x in Path(self.args.output_dir).glob(f\"{checkpoint_prefix}-*\")]\n\n for path in glob_checkpoints:\n if use_mtime:\n ordering_and_checkpoint_path.append((os.path.getmtime(path), path))\n else:\n regex_match = re.match(f\".*{checkpoint_prefix}-([0-9]+)\", path)\n if regex_match and regex_match.groups():\n ordering_and_checkpoint_path.append((int(regex_match.groups()[0]), path))\n\n checkpoints_sorted = sorted(ordering_and_checkpoint_path)\n checkpoints_sorted = [checkpoint[1] for checkpoint in checkpoints_sorted]\n # Make sure we don't delete the best model.\n if self.state.best_model_checkpoint is not None:\n best_model_index = checkpoints_sorted.index(str(Path(self.state.best_model_checkpoint)))\n checkpoints_sorted[best_model_index], checkpoints_sorted[-1] = (\n checkpoints_sorted[-1],\n checkpoints_sorted[best_model_index],\n )\n return checkpoints_sorted\n\n def _rotate_checkpoints(self, use_mtime=False) -> None:\n if self.args.save_total_limit is None or self.args.save_total_limit <= 0:\n return\n\n # Check if we should delete older checkpoint(s)\n checkpoints_sorted = self._sorted_checkpoints(use_mtime=use_mtime)\n if len(checkpoints_sorted) <= self.args.save_total_limit:\n return\n\n number_of_checkpoints_to_delete = max(0, len(checkpoints_sorted) - self.args.save_total_limit)\n checkpoints_to_be_deleted = checkpoints_sorted[:number_of_checkpoints_to_delete]\n for checkpoint in checkpoints_to_be_deleted:\n logger.info(\"Deleting older checkpoint [{}] due to args.save_total_limit\".format(checkpoint))\n shutil.rmtree(checkpoint)\n\n def evaluate(\n self,\n eval_dataset: Optional[Dataset] = None,\n ignore_keys: Optional[List[str]] = None,\n metric_key_prefix: str = \"eval\",\n ) -> Dict[str, float]:\n \"\"\"\n Run evaluation and returns metrics.\n\n The calling script will be responsible for providing a method to compute metrics, as they are task-dependent\n (pass it to the init :obj:`compute_metrics` argument).\n\n You can also subclass and override this method to inject custom behavior.\n\n Args:\n eval_dataset (:obj:`Dataset`, `optional`):\n Pass a dataset if you wish to override :obj:`self.eval_dataset`. If it is an :obj:`datasets.Dataset`,\n columns not accepted by the ``model.forward()`` method are automatically removed. It must implement the\n :obj:`__len__` method.\n ignore_keys (:obj:`Lst[str]`, `optional`):\n A list of keys in the output of your model (if it is a dictionary) that should be ignored when\n gathering predictions.\n metric_key_prefix (:obj:`str`, `optional`, defaults to :obj:`\"eval\"`):\n An optional prefix to be used as the metrics key prefix. For example the metrics \"bleu\" will be named\n \"eval_bleu\" if the prefix is \"eval\" (default)\n\n Returns:\n A dictionary containing the evaluation loss and the potential metrics computed from the predictions. The\n dictionary also contains the epoch number which comes from the training state.\n \"\"\"\n if eval_dataset is not None and not isinstance(eval_dataset, collections.abc.Sized):\n raise ValueError(\"eval_dataset must implement __len__\")\n\n eval_dataloader = self.get_eval_dataloader(eval_dataset)\n start_time = time.time()\n\n output = self.prediction_loop(\n eval_dataloader,\n description=\"Evaluation\",\n # No point gathering the predictions if there are no metrics, otherwise we defer to\n # self.args.prediction_loss_only\n prediction_loss_only=True if self.compute_metrics is None else None,\n ignore_keys=ignore_keys,\n metric_key_prefix=metric_key_prefix,\n )\n\n n_samples = len(eval_dataset if eval_dataset is not None else self.eval_dataset)\n output.metrics.update(speed_metrics(metric_key_prefix, start_time, n_samples))\n self.log(output.metrics)\n\n if self.args.tpu_metrics_debug or self.args.debug:\n # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)\n xm.master_print(met.metrics_report())\n\n self.control = self.callback_handler.on_evaluate(self.args, self.state, self.control, output.metrics)\n return output.metrics\n\n def predict(\n self, test_dataset: Dataset, ignore_keys: Optional[List[str]] = None, metric_key_prefix: str = \"eval\"\n ) -> PredictionOutput:\n \"\"\"\n Run prediction and returns predictions and potential metrics.\n\n Depending on the dataset and your use case, your test dataset may contain labels. In that case, this method\n will also return metrics, like in :obj:`evaluate()`.\n\n Args:\n test_dataset (:obj:`Dataset`):\n Dataset to run the predictions on. If it is an :obj:`datasets.Dataset`, columns not accepted by the\n ``model.forward()`` method are automatically removed. Has to implement the method :obj:`__len__`\n ignore_keys (:obj:`Lst[str]`, `optional`):\n A list of keys in the output of your model (if it is a dictionary) that should be ignored when\n gathering predictions.\n metric_key_prefix (:obj:`str`, `optional`, defaults to :obj:`\"eval\"`):\n An optional prefix to be used as the metrics key prefix. For example the metrics \"bleu\" will be named\n \"eval_bleu\" if the prefix is \"eval\" (default)\n\n .. note::\n\n If your predictions or labels have different sequence length (for instance because you're doing dynamic\n padding in a token classification task) the predictions will be padded (on the right) to allow for\n concatenation into one array. The padding index is -100.\n\n Returns: `NamedTuple` A namedtuple with the following keys:\n\n - predictions (:obj:`np.ndarray`): The predictions on :obj:`test_dataset`.\n - label_ids (:obj:`np.ndarray`, `optional`): The labels (if the dataset contained some).\n - metrics (:obj:`Dict[str, float]`, `optional`): The potential dictionary of metrics (if the dataset\n contained labels).\n \"\"\"\n if test_dataset is not None and not isinstance(test_dataset, collections.abc.Sized):\n raise ValueError(\"test_dataset must implement __len__\")\n\n test_dataloader = self.get_test_dataloader(test_dataset)\n start_time = time.time()\n\n output = self.prediction_loop(\n test_dataloader, description=\"Prediction\", ignore_keys=ignore_keys, metric_key_prefix=metric_key_prefix\n )\n output.metrics.update(speed_metrics(metric_key_prefix, start_time, len(test_dataset)))\n return output\n\n def prediction_loop(\n self,\n dataloader: DataLoader,\n description: str,\n prediction_loss_only: Optional[bool] = None,\n ignore_keys: Optional[List[str]] = None,\n metric_key_prefix: str = \"eval\",\n ) -> PredictionOutput:\n \"\"\"\n Prediction/evaluation loop, shared by :obj:`Trainer.evaluate()` and :obj:`Trainer.predict()`.\n\n Works both with or without labels.\n \"\"\"\n if not isinstance(dataloader.dataset, collections.abc.Sized):\n raise ValueError(\"dataset must implement __len__\")\n prediction_loss_only = (\n prediction_loss_only if prediction_loss_only is not None else self.args.prediction_loss_only\n )\n\n model = self.model\n # multi-gpu eval\n if self.args.n_gpu > 1:\n model = torch.nn.DataParallel(model)\n # Note: in torch.distributed mode, there's no point in wrapping the model\n # inside a DistributedDataParallel as we'll be under `no_grad` anyways.\n\n batch_size = dataloader.batch_size\n num_examples = self.num_examples(dataloader)\n logger.info(\"***** Running %s *****\", description)\n logger.info(\" Num examples = %d\", num_examples)\n logger.info(\" Batch size = %d\", batch_size)\n losses_host: torch.Tensor = None\n preds_host: Union[torch.Tensor, List[torch.Tensor]] = None\n labels_host: Union[torch.Tensor, List[torch.Tensor]] = None\n\n world_size = 1\n if is_torch_tpu_available():\n world_size = xm.xrt_world_size()\n elif self.args.local_rank != -1:\n world_size = torch.distributed.get_world_size()\n world_size = max(1, world_size)\n\n eval_losses_gatherer = DistributedTensorGatherer(world_size, num_examples, make_multiple_of=batch_size)\n if not prediction_loss_only:\n preds_gatherer = DistributedTensorGatherer(world_size, num_examples)\n labels_gatherer = DistributedTensorGatherer(world_size, num_examples)\n\n model.eval()\n\n if is_torch_tpu_available():\n dataloader = pl.ParallelLoader(dataloader, [self.args.device]).per_device_loader(self.args.device)\n\n if self.args.past_index >= 0:\n self._past = None\n\n self.callback_handler.eval_dataloader = dataloader\n\n for step, inputs in enumerate(dataloader):\n loss, logits, labels = self.prediction_step(model, inputs, prediction_loss_only, ignore_keys=ignore_keys)\n if loss is not None:\n losses = loss.repeat(batch_size)\n losses_host = losses if losses_host is None else torch.cat((losses_host, losses), dim=0)\n if logits is not None:\n preds_host = logits if preds_host is None else nested_concat(preds_host, logits, padding_index=-100)\n if labels is not None:\n labels_host = labels if labels_host is None else nested_concat(labels_host, labels, padding_index=-100)\n self.control = self.callback_handler.on_prediction_step(self.args, self.state, self.control)\n\n # Gather all tensors and put them back on the CPU if we have done enough accumulation steps.\n if self.args.eval_accumulation_steps is not None and (step + 1) % self.args.eval_accumulation_steps == 0:\n eval_losses_gatherer.add_arrays(self._gather_and_numpify(losses_host, \"eval_losses\"))\n if not prediction_loss_only:\n preds_gatherer.add_arrays(self._gather_and_numpify(preds_host, \"eval_preds\"))\n labels_gatherer.add_arrays(self._gather_and_numpify(labels_host, \"eval_label_ids\"))\n\n # Set back to None to begin a new accumulation\n losses_host, preds_host, labels_host = None, None, None\n\n if self.args.past_index and hasattr(self, \"_past\"):\n # Clean the state at the end of the evaluation loop\n delattr(self, \"_past\")\n\n # Gather all remaining tensors and put them back on the CPU\n eval_losses_gatherer.add_arrays(self._gather_and_numpify(losses_host, \"eval_losses\"))\n if not prediction_loss_only:\n preds_gatherer.add_arrays(self._gather_and_numpify(preds_host, \"eval_preds\"))\n labels_gatherer.add_arrays(self._gather_and_numpify(labels_host, \"eval_label_ids\"))\n\n eval_loss = eval_losses_gatherer.finalize()\n preds = preds_gatherer.finalize() if not prediction_loss_only else None\n label_ids = labels_gatherer.finalize() if not prediction_loss_only else None\n\n if self.compute_metrics is not None and preds is not None and label_ids is not None:\n metrics = self.compute_metrics(EvalPrediction(predictions=preds, label_ids=label_ids))\n else:\n metrics = {}\n\n if eval_loss is not None:\n metrics[f\"{metric_key_prefix}_loss\"] = eval_loss.mean().item()\n\n # Prefix all keys with metric_key_prefix + '_'\n for key in list(metrics.keys()):\n if not key.startswith(f\"{metric_key_prefix}_\"):\n metrics[f\"{metric_key_prefix}_{key}\"] = metrics.pop(key)\n\n return PredictionOutput(predictions=preds, label_ids=label_ids, metrics=metrics)\n\n def _gather_and_numpify(self, tensors, name):\n \"\"\"\n Gather value of `tensors` (tensor or list/tuple of nested tensors) and convert them to numpy before\n concatenating them to `gathered`\n \"\"\"\n if tensors is None:\n return\n if is_torch_tpu_available():\n tensors = nested_xla_mesh_reduce(tensors, name)\n elif self.args.local_rank != -1:\n tensors = distributed_concat(tensors)\n\n return nested_numpify(tensors)\n\n def prediction_step(\n self,\n model: nn.Module,\n inputs: Dict[str, Union[torch.Tensor, Any]],\n prediction_loss_only: bool,\n ignore_keys: Optional[List[str]] = None,\n ) -> Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]:\n \"\"\"\n Perform an evaluation step on :obj:`model` using obj:`inputs`.\n\n Subclass and override to inject custom behavior.\n\n Args:\n model (:obj:`nn.Module`):\n The model to evaluate.\n inputs (:obj:`Dict[str, Union[torch.Tensor, Any]]`):\n The inputs and targets of the model.\n\n The dictionary will be unpacked before being fed to the model. Most models expect the targets under the\n argument :obj:`labels`. Check your model's documentation for all accepted arguments.\n prediction_loss_only (:obj:`bool`):\n Whether or not to return the loss only.\n ignore_keys (:obj:`Lst[str]`, `optional`):\n A list of keys in the output of your model (if it is a dictionary) that should be ignored when\n gathering predictions.\n\n Return:\n Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]: A tuple with the loss, logits and\n labels (each being optional).\n \"\"\"\n has_labels = all(inputs.get(k) is not None for k in self.label_names)\n inputs = self._prepare_inputs(inputs)\n if ignore_keys is None:\n if hasattr(self.model, \"config\"):\n ignore_keys = getattr(self.model.config, \"keys_to_ignore_at_inference\", [])\n else:\n ignore_keys = []\n\n with torch.no_grad():\n if self.use_amp:\n with autocast():\n outputs = model(**inputs)\n else:\n outputs = model(**inputs)\n if has_labels:\n if self.label_smoother is not None and \"labels\" in inputs:\n loss = self.label_smoother(outputs, inputs[\"labels\"]).mean().detach()\n else:\n loss = (outputs[\"loss\"] if isinstance(outputs, dict) else outputs[0]).mean().detach()\n if isinstance(outputs, dict):\n logits = tuple(v for k, v in outputs.items() if k not in ignore_keys + [\"loss\"])\n else:\n logits = outputs[1:]\n else:\n loss = None\n if isinstance(outputs, dict):\n logits = tuple(v for k, v in outputs.items() if k not in ignore_keys)\n else:\n logits = outputs\n # TODO: this needs to be fixed and made cleaner later.\n if self.args.past_index >= 0:\n self._past = outputs[self.args.past_index if has_labels else self.args.past_index - 1]\n\n if prediction_loss_only:\n return (loss, None, None)\n\n logits = nested_detach(logits)\n if len(logits) == 1:\n logits = logits[0]\n\n if has_labels:\n labels = nested_detach(tuple(inputs.get(name) for name in self.label_names))\n if len(labels) == 1:\n labels = labels[0]\n else:\n labels = None\n\n return (loss, logits, labels)\n\n def floating_point_ops(self, inputs: Dict[str, Union[torch.Tensor, Any]]):\n \"\"\"\n For models that inherit from :class:`~transformers.PreTrainedModel`, uses that method to compute the number of\n floating point operations for every backward + forward pass. If using another model, either implement such a\n method in the model or subclass and override this method.\n\n Args:\n inputs (:obj:`Dict[str, Union[torch.Tensor, Any]]`):\n The inputs and targets of the model.\n\n Returns:\n :obj:`int`: The number of floating-point operations.\n \"\"\"\n if hasattr(self.model, \"floating_point_ops\"):\n return self.model.floating_point_ops(inputs)\n else:\n return 0\n" ]
[ [ "torch.distributed.get_world_size", "torch.cat", "torch.utils.data.dataloader.DataLoader", "torch.utils.data.sampler.RandomSampler", "torch.cuda.amp.autocast", "torch.no_grad", "torch.utils.data.sampler.SequentialSampler", "torch.tensor", "torch.utils.data.distributed.DistributedSampler", "torch.cuda.amp.GradScaler", "torch.distributed.get_rank", "torch.nn.DataParallel" ] ]
marcbadger/tweetynet
[ "048cc26ae3fe74c00a1d8f5a891eca21428c668c" ]
[ "src/tweetynet/curvefit.py" ]
[ "\"\"\"\"code to fit learning curves\r\nadapted from\r\nhttps://github.com/NickleDave/learning-curves/\"\"\"\r\n\r\nimport numpy as np\r\nfrom scipy import optimize\r\n\r\n\r\ndef residual_two_functions(params, x, y1, y1err, y2, y2err):\r\n \"\"\"\r\n returns residuals\r\n between two lines, specified by parameters in variable params,\r\n and data y1 and y2\r\n \"\"\"\r\n\r\n b = params[0]\r\n alpha = params[1]\r\n c = params[2]\r\n beta = params[3]\r\n asymptote = params[4]\r\n diff1 = (y1 - (asymptote + b * alpha ** x)) ** 2 / y1err\r\n diff2 = (y2 - (asymptote + c * beta ** x)) ** 2 / y2err\r\n return np.concatenate((diff1, diff2))\r\n\r\n\r\ndef fit_learning_curve(train_set_size, error_test, error_train=None,\r\n pinit=(1.0, -1.0), funcs=1):\r\n \"\"\"\r\n returns parameters to predict learning curve as a power function with the form\r\n y = a + b * x**alpha\r\n where x is the training set size, i.e., the independent variable\r\n\r\n You provide the function with your data: a vector of the training set sizes you used, and arrays of the error\r\n you found when training models with those training sets. The function then returns the fit parameters.\r\n Based on [1]_.\r\n\r\n Parameters\r\n ----------\r\n train_set_size : ndarray\r\n vector of m integers representing number of samples\r\n in training sets, should increase monotonically\r\n error_test : ndarray\r\n m x n array of errors where error_train[m,n] is\r\n the error measured for replicate n of training a model\r\n with train_set_size[m] samples.\r\n Error is measured on on a test set separate from the training set.\r\n error_train : ndarray\r\n same as error_test except the error is measured on the *training* set.\r\n Default is None.\r\n pinint : list\r\n initial guess for parameters b and alpha, default is [1.0, -1.0]\r\n funcs : int\r\n number of functions to fit, default is 1.\r\n If funcs==1 and only test error is passed as an argument,\r\n a power function is fit just to the test error\r\n If funcs==1 and both test error and train error are passed as arguments,\r\n it is assumed the train error and test error can be fit with same\r\n exponent and scaling parameter.\r\n If funcs==2, both test error and train error must be passed\r\n and each is fit with separate exponent and scaling parameters,\r\n but both share an extra parameter which is the asymptote.\r\n\r\n Returns\r\n -------\r\n a: float\r\n asymptotic value of error predicted for infinite training data\r\n b: float\r\n scaling parameter of power function\r\n alpha: float\r\n exponent parameter of power function\r\n\r\n *** if funcs = 2 ***\r\n c: float\r\n scaling parameter of power function fit to train error (b fits test error)\r\n beta: float\r\n exponent parameter of power function fit to train error (alpha fits test error)\r\n\r\n .. [1] Cortes, Corinna, et al.\r\n \"Learning curves: Asymptotic values and rate of convergence.\"\r\n Advances in Neural Information Processing Systems. 1994.\r\n \"\"\"\r\n\r\n if funcs not in [1, 2]:\r\n raise ValueError('funcs argument should equal 1 or 2')\r\n\r\n if funcs == 2 and error_train is None:\r\n raise ValueError('error_train is a required argument when funcs==2')\r\n\r\n if train_set_size.shape[0] != error_test.shape[0]:\r\n raise ValueError(\r\n 'Number of elements in train_set_size does not match number of columns in error_test')\r\n\r\n fitfunc = lambda p, x: p[0] + p[1] * x\r\n errfunc = lambda p, x, y, err: (y - fitfunc(p, x)) / err\r\n\r\n logx = np.log10(train_set_size)\r\n\r\n if error_train is None: # if we just have test error, fit with power function\r\n y = np.mean(error_test, axis=1)\r\n logy = np.log10(y)\r\n yerr = np.std(error_test, axis=1)\r\n logyerr = yerr / y\r\n out1 = optimize.leastsq(errfunc, pinit,\r\n args=(logx, logy, logyerr), full_output=True)\r\n pfinal = out1[0]\r\n b = 10.0 ** pfinal[0]\r\n alpha = pfinal[1]\r\n return b, alpha\r\n\r\n elif error_train is not None and funcs == 1: # if we have train error too, then try Cortes et al. 1994 approach\r\n err_diff = error_test - error_train\r\n y = np.mean(err_diff, axis=1)\r\n logy = np.log10(y)\r\n yerr = np.std(err_diff, axis=1)\r\n logyerr = yerr / y\r\n out1 = optimize.leastsq(errfunc, pinit,\r\n args=(logx, logy, logyerr), full_output=True)\r\n pfinal = out1[0]\r\n b = (10.0 ** pfinal[0]) / 2\r\n alpha = pfinal[1]\r\n\r\n err_sum = error_test + error_train\r\n y2 = np.mean(err_sum, axis=1)\r\n logy2 = np.log10(y2)\r\n y2err = np.std(err_sum, axis=1)\r\n logy2err = y2err / y\r\n # take mean of logy as best estimate of horizontal line\r\n estimate = np.average(logy2, weights=logy2err)\r\n a = (10.0 ** estimate) / 2\r\n return a, b, alpha\r\n\r\n elif error_train is not None and funcs == 2:\r\n y1 = np.mean(error_test, axis=1)\r\n y1err = np.std(error_test, axis=1)\r\n logy1 = np.log10(y1)\r\n y2 = np.mean(error_train, axis=1)\r\n y2err = np.std(error_train, axis=1)\r\n logy2 = np.log10(y2)\r\n if len(pinit) < 3: # if default pinit from function declaration\r\n # change instead to default pinit in next line\r\n pinit = [1.0, -1.0, 1.0, 1.0, 0.05]\r\n best, cov, info, message, ier = optimize.leastsq(residual_two_functions,\r\n pinit,\r\n args=(train_set_size, y1, y1err, y2, y2err),\r\n full_output=True)\r\n return best\r\n\r\n" ]
[ [ "numpy.concatenate", "numpy.mean", "numpy.std", "scipy.optimize.leastsq", "numpy.average", "numpy.log10" ] ]
OhJaeKwang/gaze_estimation
[ "8fefa9ccb353ae5c164251a61221c369c1a825d2" ]
[ "dataset_tools/resize_data_lable.py" ]
[ "import matplotlib.pyplot as plt\nimport cv2\nimport json\nimport numpy as np\nimport math\nimport pandas as pd\nimport csv\n\norigin_x_shape = 192 # 이미지 가로 길이\norigin_y_shape = 192 # 이미지 세로 길이\n\nresize_width = 160\nresize_height = 96\n\nscale_factor_width = float(resize_width / origin_x_shape) \nscale_factor_height = float(resize_height / origin_y_shape)\n\n\ngt_file = pd.read_csv(\"C:/Users/OJK/Task/Gaze_Estimation/input/UnityEyes_Data/640x480/UE_validation_labels.csv\")\n\n# 레이블링 저장할 파일\ncsv_f = open('C:/Users/OJK/Task/Gaze_Estimation/input/UnityEyes_Data/160x96/UE_validation_labels.csv', 'w', newline='') # csv 저장 파일 열기\nwriter = csv.writer(csv_f)\n\nitems = ['image_name', 'scale', 'center_w', 'center_y'] # csv 파일 아이템 설정\nfor i in range(50):\n items.append('original_{}_x'.format(i))\n items.append('original_{}_y'.format(i))\nitems.append('gaze_x')\nitems.append('gaze_y')\nitems.append('gaze_z')\nitems.append('pitch')\nitems.append('yaw')\nwriter.writerow(items)\n\nfor file_index in range(0,10000):\n img = cv2.imread('C:/Users/OJK/Task/Gaze_Estimation/input/UnityEyes_Data/640x480/Validation_data/crop_gray_img/{}.jpg'.format(file_index+1)) # 이미지 파일 읽기\n img = cv2.resize(img, dsize=(resize_width, resize_height), interpolation=cv2.INTER_AREA) # 이미지 crop\n\n gt_data = gt_file.iloc[file_index,4:].values\n\n interior_raw = gt_data[2:34] # Keypoints 접근\n iris_raw = gt_data[36:-3]\n gaze_vector = gt_data[-3:]\n\n interior, iris = [], [] # 실제 gt 자료형을 위한 리스트\n\n for item in range(0,len(interior_raw),2): # Interior keypoints 자료형 변경\n item_2D = [float(interior_raw[item])*scale_factor_width , float(interior_raw[item+1])*scale_factor_height]\n interior.append(item_2D)\n\n for item in range(0,len(iris_raw),2): # Iris keypoints 자료형 변경\n item_2D = [float(iris_raw[item])*scale_factor_width , float(iris_raw[item+1])*scale_factor_height]\n iris.append(item_2D)\n\n cen_x1, cen_y1 = [], [] # Interior center 좌표 계산\n\n for i in interior:\n cen_x1.append(i[0])\n cen_y1.append(i[1])\n cen1 = ((sum(cen_x1) / len(interior)), (sum(cen_y1) / len(interior)))\n\n cen_x2, cen_y2 = [], [] # Iris center 좌표 계산\n for i in iris:\n cen_x2.append(i[0])\n cen_y2.append(i[1])\n cen2 = ((sum(cen_x2) / len(iris)), (sum(cen_y2) / len(iris)))\n\n # scale, center_w, center_h 계산\n x_min = min(np.min([i[0] for i in interior]), np.min([i[0] for i in iris])) # interiror 마진의 왼쪽 홍채 왼쪽 값중 더 왼쪽 인거\n x_max = max(np.max([i[0] for i in interior]), np.max([i[0] for i in iris]))\n y_min = min(np.min([i[1] for i in interior]), np.min([i[1] for i in iris]))\n y_max = max(np.max([i[1] for i in interior]), np.max([i[1] for i in iris]))\n\n scale = max(math.ceil(x_max) - math.floor(x_min), math.ceil(y_max) - math.floor(y_min)) / 200.0 # scale 계산 , 왜 200으로 나누는 거야?????\n center_w = (math.floor(x_min) + math.ceil(x_max)) / 2.0 # center_w 계산\n center_h = (math.floor(y_min) + math.ceil(y_max)) / 2.0 # center_h 계산\n\n yaw = math.atan2(-gaze_vector[0],-gaze_vector[2])\n pitch = math.asin(gaze_vector[1])\n\n\n img_name = '/Validation_data/img/{}.jpg'.format(file_index+1)\n \n data =[img_name, scale, center_w, center_h]\n\n data.append(cen1[0]) # 눈 경계점 중심\n data.append(cen1[1])\n for j in interior: # 눈 경계점 \n data.append(j[0])\n data.append(j[1])\n data.append(cen2[0]) # 홍채 중심\n data.append(cen2[1])\n for j in iris: # 홍채 경계점\n data.append(j[0])\n data.append(j[1])\n for g in gaze_vector: # gaze 벡터 \n data.append(g)\n data.append(pitch) # pitch yaw 벡터\n data.append(yaw)\n\n # 이미지 저장\n cv2.imwrite(\"C:/Users/OJK/Task/Gaze_Estimation/input/UnityEyes_Data/160x96/Validation_data/img/{}.jpg\".format(file_index+1),img)\n \n # 레이블링 저장\n writer.writerow(data)\n\n\ncsv_f.close() # csv 저장 파일 닫기\n\n" ]
[ [ "numpy.max", "pandas.read_csv", "numpy.min" ] ]
akx/ml-hypersim
[ "2408fbafe580246108585f9c46780dc62f284cfc" ]
[ "code/python/analysis/dataset_generate_scene_labeling_statistics.py" ]
[ "#\n# For licensing see accompanying LICENSE.txt file.\n# Copyright (C) 2020 Apple Inc. All Rights Reserved.\n#\n\nfrom pylab import *\n\nimport argparse\nimport fnmatch\nimport inspect\nimport os\nimport pandas as pd\n\nimport path_utils\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--dataset_dir\", required=True)\nparser.add_argument(\"--analysis_dir\", required=True)\nparser.add_argument(\"--batch_name\", required=True)\nparser.add_argument(\"--scene_names\")\nargs = parser.parse_args()\n\nassert os.path.exists(args.dataset_dir)\n\npath_utils.add_path_to_sys_path(args.dataset_dir, mode=\"relative_to_cwd\", frame=inspect.currentframe())\nimport _dataset_config\n\n\n\nprint(\"[HYPERSIM: DATASET_GENERATE_SCENE_LABELING_STATISTICS] Begin...\")\n\n\n\ndataset_scenes_dir = os.path.join(args.dataset_dir, \"scenes\")\n\nif args.scene_names is not None:\n scenes = [ s for s in _dataset_config.scenes if fnmatch.fnmatch(s[\"name\"], args.scene_names) ]\nelse:\n scenes = _dataset_config.scenes\n\n\n\nmetadata_camera_trajectories_csv_file = os.path.join(args.analysis_dir, \"metadata_camera_trajectories.csv\")\ndf_camera_trajectories = pd.read_csv(metadata_camera_trajectories_csv_file).rename_axis(\"camera_trajectory_id\").reset_index().set_index(\"Animation\")\n\ndf_columns = [\"scene_name\", \"labeling_time_seconds\", \"scene_included_in_dataset\"]\ndf = pd.DataFrame(columns=df_columns)\n\ndef process_scene(s, args):\n\n global df\n\n scene_name = s[\"name\"]\n\n scene_dir = os.path.join(dataset_scenes_dir, scene_name)\n detail_dir = os.path.join(scene_dir, \"_detail\")\n mesh_dir = os.path.join(scene_dir, \"_detail\", \"mesh\")\n\n metadata_cameras_csv_file = os.path.join(detail_dir, \"metadata_cameras.csv\")\n df_cameras = pd.read_csv(metadata_cameras_csv_file)\n cameras = df_cameras.to_records()\n\n # check if scene has been flagged for exclusion\n scene_included_in_dataset = False\n for c in cameras:\n camera_trajectory_name = scene_name + \"_\" + c[\"camera_name\"]\n scene_type = df_camera_trajectories.loc[camera_trajectory_name][\"Scene type\"]\n if scene_type != \"OUTSIDE VIEWING AREA (BAD INITIALIZATION)\" and scene_type != \"OUTSIDE VIEWING AREA (BAD TRAJECTORY)\":\n scene_included_in_dataset = True\n break\n\n if not scene_included_in_dataset:\n print(\"[HYPERSIM: DATASET_GENERATE_SCENE_LABELING_STATISTICS] No good camera trajectories for scene \" + scene_name + \", setting scene_included_in_dataset to False...\")\n\n log_file = os.path.join(mesh_dir, \"metadata_scene_annotation_tool.log\")\n\n if os.path.exists(log_file):\n\n with open(log_file, \"r\") as f:\n lines = f.readlines()\n num_lines = len(lines)\n\n loaded_prefix_str = \"[HYPERSIM: SCENE_ANNOTATION_TOOL] Loaded scene: \"\n unloaded_prefix_str = \"[HYPERSIM: SCENE_ANNOTATION_TOOL] Unloaded scene: \"\n\n labeling_time_seconds = 0.0\n loaded_line = \"\"\n unloaded_line = \"\"\n\n for l in lines:\n\n assert loaded_prefix_str in l or unloaded_prefix_str in l\n\n if loaded_prefix_str in l:\n loaded_line = l\n\n elif unloaded_prefix_str in l:\n\n unloaded_line = l\n \n if loaded_prefix_str in loaded_line:\n\n loaded_time_str = loaded_line[len(loaded_prefix_str):].strip()\n unloaded_time_str = unloaded_line[len(unloaded_prefix_str):].strip()\n \n loaded_time = datetime.datetime.strptime(loaded_time_str, \"%a %b %d %H:%M:%S %Y\")\n unloaded_time = datetime.datetime.strptime(unloaded_time_str, \"%a %b %d %H:%M:%S %Y\")\n\n labeling_time_seconds += (unloaded_time - loaded_time).total_seconds()\n loaded_line = \"\"\n unloaded_line = \"\"\n\n else:\n print(\"[HYPERSIM: DATASET_GENERATE_SCENE_LABELING_STATISTICS] WARNING: ENCOUNTERED UNLOAD TIME WITHOUT CORRESPONDING LOAD TIME...\")\n \n else:\n print(\"[HYPERSIM: DATASET_GENERATE_SCENE_LABELING_STATISTICS] WARNING: UNEXPECTED LINE: \" + l)\n\n df_curr = pd.DataFrame(columns=df_columns, data={\"scene_name\":[scene_name], \"labeling_time_seconds\":[labeling_time_seconds], \"scene_included_in_dataset\":[scene_included_in_dataset]})\n df = df.append(df_curr, ignore_index=True)\n\n labeling_time_minutes = labeling_time_seconds/60.0\n print(\"[HYPERSIM: DATASET_GENERATE_SCENE_LABELING_STATISTICS] \" + scene_name + \" (labeling time minutes = \" + str(labeling_time_minutes) + \")\")\n\n else:\n print(\"[HYPERSIM: DATASET_GENERATE_SCENE_LABELING_STATISTICS] WARNING: LOG FILE DOESN'T EXIST FOR SCENE: \" + scene_name)\n\n\n\nfor s in scenes:\n process_scene(s, args)\n\n\n\nbatch_dir = os.path.join(args.analysis_dir, \"scene_labeling_statistics\", args.batch_name)\nif not os.path.exists(batch_dir): os.makedirs(batch_dir)\nmetadata_labeling_time_csv_file = os.path.join(batch_dir, \"metadata_scene_labeling_time.csv\")\n\ndf.to_csv(metadata_labeling_time_csv_file, index=False)\n\n\n\nprint(\"[HYPERSIM: DATASET_GENERATE_SCENE_LABELING_STATISTICS] Finished.\")\n" ]
[ [ "pandas.DataFrame", "pandas.read_csv" ] ]
liuyangzhuan/autotune
[ "bc24177a617025d2a47bc79563538cc6da45cfa9" ]
[ "Benchmarks/3mm/problem.py" ]
[ "import numpy as np\nfrom numpy import abs, cos, exp, mean, pi, prod, sin, sqrt, sum\nfrom autotune import TuningProblem\nfrom autotune.space import *\nimport os\nimport sys\nimport time\nimport json\nimport math\n\nimport ConfigSpace as CS\nimport ConfigSpace.hyperparameters as CSH\nfrom skopt.space import Real, Integer, Categorical\n\nHERE = os.path.dirname(os.path.abspath(__file__))\nsys.path.insert(1, os.path.dirname(HERE)+ '/plopper')\nfrom plopper import Plopper\nnparams = 10\n\ncs = CS.ConfigurationSpace(seed=1234)\np0= CSH.CategoricalHyperparameter(name='p0', choices=[\"#pragma clang loop(j2) pack array(A) allocate(malloc)\", \" \"], default_value=' ')\np1= CSH.CategoricalHyperparameter(name='p1', choices=[\"#pragma clang loop(i1) pack array(B) allocate(malloc)\", \" \"], default_value=' ')\np2= CSH.CategoricalHyperparameter(name='p2', choices=[\"#pragma clang loop(i1,j1,k1,i2,j2) interchange permutation(j1,k1,i1,j2,i2)\", \" \"], default_value=' ')\np3= CSH.OrdinalHyperparameter(name='p3', sequence=['4','8','16','20','32','50','64','80','96','100','128'], default_value='96')\np4= CSH.OrdinalHyperparameter(name='p4', sequence=['4','8','16','20','32','50','64','80','100','128','2048'], default_value='2048')\np5= CSH.OrdinalHyperparameter(name='p5', sequence=['4','8','16','20','32','50','64','80','100','128','256'], default_value='256')\np6= CSH.CategoricalHyperparameter(name='p6', choices=[\"#pragma clang loop(j2) pack array(C) allocate(malloc)\", \" \"], default_value=' ')\np7= CSH.CategoricalHyperparameter(name='p7', choices=[\"#pragma clang loop(i1) pack array(D) allocate(malloc)\", \" \"], default_value=' ')\np8= CSH.CategoricalHyperparameter(name='p8', choices=[\"#pragma clang loop(j2) pack array(E) allocate(malloc)\", \" \"], default_value=' ')\np9= CSH.CategoricalHyperparameter(name='p9', choices=[\"#pragma clang loop(i1) pack array(F) allocate(malloc)\", \" \"], default_value=' ')\n\ncs.add_hyperparameters([p0, p1, p2, p3, p4, p5, p6, p7, p8, p9])\n\n#cond1 = CS.InCondition(p1, p0, ['#pragma clang loop(j2) pack array(A) allocate(malloc)'])\n#cs.add_condition(cond1)\n\n# problem space\ntask_space = None\n\ninput_space = cs\n\noutput_space = Space([\n Real(0.0, inf, name=\"time\")\n])\n\ndir_path = os.path.dirname(os.path.realpath(__file__))\nkernel_idx = dir_path.rfind('/')\nkernel = dir_path[kernel_idx+1:]\nobj = Plopper(dir_path+'/mmp.c',dir_path)\n\nx1=['p0','p1','p2','p3','p4','p5','p6','p7','p8','p9']\n\ndef myobj(point: dict):\n\n def plopper_func(x):\n x = np.asarray_chkfinite(x) # ValueError if any NaN or Inf\n value = [point[x1[0]],point[x1[1]],point[x1[2]],point[x1[3]],point[x1[4]],point[x1[5]],point[x1[6]],point[x1[7]],point[x1[8]],point[x1[9]]]\n print('VALUES:',point[x1[0]])\n params = [\"P0\",\"P1\",\"P2\",\"P3\",\"P4\",\"P5\",\"P6\",\"P7\",\"P8\",\"P9\"]\n\n result = obj.findRuntime(value, params)\n return result\n\n x = np.array([point[f'p{i}'] for i in range(len(point))])\n results = plopper_func(x)\n print('OUTPUT:%f',results)\n\n return results\n\nProblem = TuningProblem(\n task_space=None,\n input_space=input_space,\n output_space=output_space,\n objective=myobj,\n constraints=None,\n model=None\n )\n" ]
[ [ "numpy.asarray_chkfinite" ] ]
pollen-robotics/pyrobus
[ "ff90e129159ae0569c0b82a49ee5c0de9914441f" ]
[ "pyluos/device.py" ]
[ "# -*- coding: utf-8 -*-\nimport os\nimport sys\nimport json\nimport time\nimport logging\nimport threading\nimport logging.config\nimport numpy as np\n\nfrom datetime import datetime\nfrom collections import defaultdict\n\nfrom .io import discover_hosts, io_from_host, Ws\nfrom .services import name2mod\n\nfrom anytree import AnyNode, RenderTree, DoubleStyle\n\n\ndef run_from_unittest():\n return 'unittest' in sys.services\n\nclass contList(list):\n def __repr__(self):\n s = '-------------------------------------------------\\n'\n s += '{:<20s}{:<20s}{:<5s}\\n'.format(\"Type\", \"Alias\", \"ID\")\n s += '-------------------------------------------------\\n'\n for elem in self:\n s += '{:<20s}{:<20s}{:<5d}\\n'.format(elem.type, elem.alias, elem.id)\n return s\n\nclass nodeList(list):\n def __repr__(self):\n # Display the topology\n s = ''\n prefill = ''\n prechild = False\n for pre, fill, node in RenderTree(self[0], style=DoubleStyle()):\n child = []\n if (node.parent == None):\n branch = \" ┃ \"\n for i,x in enumerate(node.port_table):\n child.append(i)\n else:\n l_port_id = '?'\n for i,x in enumerate(node.parent.port_table):\n if (x == node.id):\n l_port_id = str(i)\n r_port_id = node.port_table.index(min(node.port_table))\n for i,x in enumerate(node.port_table):\n if ((i != r_port_id) and (x != 65535)):\n child.append(i)\n branch = str(l_port_id) + \">┃\" + str(r_port_id) + \" \"\n prefill = (prefill[:len(fill)]) if len(prefill) > len(fill) else prefill\n s +='{:<{fillsize}s}'.format(prefill, fillsize=len(fill))\n if (prechild == True):\n position = -4\n s = s[:position] + '║' + s[position+1:]\n s += \" ┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┓\\n\"\n tmpstr = \"%s╭node %s\" % (branch, node.id)\n s += pre + '{:^10s}'.format(tmpstr)\n if (node.certified == True):\n s += '{:^41s}'.format(\"Certified\") + \"┃\\n\"\n else:\n s += '{:^41s}'.format(\"/!\\\\ Not certified\") + \"┃\\n\"\n s += fill + \" ┃ │ \" + '{:<20s}{:<20s}{:<5s}'.format(\"Type\", \"Alias\", \"ID\")+ \"┃\\n\"\n for y,elem in enumerate(node.services):\n if (y == (len(node.services)-1)):\n s += fill + \" ┃ ╰> \" + '{:<20s}{:<20s}{:<5d}'.format(elem.type, elem.alias, elem.id)+ \"┃\\n\"\n else:\n s += fill + \" ┃ ├> \" + '{:<20s}{:<20s}{:<5d}'.format(elem.type, elem.alias, elem.id) + \"┃\\n\"\n if (not child):\n s += fill + \" >┗━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┛\\n\"\n prechild = False\n else:\n s += fill + \"╔>┗━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┛\\n\"\n prechild = True\n prefill = fill\n return s\n\nclass Device(object):\n _heartbeat_timeout = 5 # in sec.\n _max_alias_length = 15\n _base_log_conf = os.path.join(os.path.dirname(__file__),\n 'logging_conf.json')\n\n def __init__(self, host,\n IO=None,\n log_conf=_base_log_conf,\n test_mode=False,\n background_task=True,\n *args, **kwargs):\n if IO is not None:\n self._io = IO(host=host, *args, **kwargs)\n else:\n self._io = io_from_host(host=host,\n *args, **kwargs)\n\n if os.path.exists(log_conf):\n with open(log_conf) as f:\n config = json.load(f)\n logging.config.dictConfig(config)\n\n self.logger = logging.getLogger(__name__)\n self.logger.info('Connected to \"{}\".'.format(host))\n\n self._send_lock = threading.Lock()\n self._cmd_lock = threading.Lock()\n\n # We force a first poll to setup our model.\n self._setup()\n self.logger.info('Device setup.')\n\n self._last_update = time.time()\n self._running = True\n self._pause = False\n\n if(background_task == True):\n # Setup both poll/push synchronization loops.\n self._poll_bg = threading.Thread(target=self._poll_and_up)\n self._poll_bg.daemon = True\n self._poll_bg.start()\n self._baudrate = 1000000\n\n def close(self):\n self._running = False\n\n if hasattr(self, \"_poll_bg\"):\n self._poll_bg.join(timeout=2.0)\n\n if self._poll_bg.is_alive():\n # _poll_bg didn't terminate within the timeout\n print(\"Warning: device closed on timeout, background thread is still running.\")\n self._io.close()\n\n @property\n def baudrate(self):\n return self._baudrate\n\n @baudrate.setter\n def baudrate(self, baudrate):\n self._send({'baudrate': baudrate})\n self._baudrate = baudrate\n time.sleep(0.01)\n\n def benchmark(self, target_id, data, repetition):\n data = np.array(data, dtype=np.uint8)\n self._bench_settings = {'benchmark': {'target': target_id, 'repetitions': repetition, 'data': [len(data)]}}\n self._bench_Data = data.tobytes()\n self._write( json.dumps(self._bench_settings).encode() + '\\n'.encode() + self._bench_Data)\n\n state = self._poll_once()\n startTime = time.time()\n retry = 0\n while ('benchmark' not in state):\n state = self._poll_once()\n if (time.time()-startTime > 30):\n self._write( json.dumps(self._bench_settings).encode() + '\\n'.encode() + self._bench_Data)\n retry = retry+1\n if (retry == 3):\n return (0, 100)\n startTime = time.time()\n\n #self._pause = False\n return (state['benchmark']['data_rate'], state['benchmark']['fail_rate'])\n\n def pause(self):\n self._pause = True\n time.sleep(1)\n\n def play(self):\n self._pause = False\n\n def _setup(self):\n self.logger.info('Sending detection signal.')\n self._send({})\n time.sleep(0.01)\n self._send({'detection': {}})\n self.logger.info('Waiting for routing table...')\n startTime = time.time()\n state = self._poll_once()\n retry = 0\n while ('routing_table' not in state):\n if ('route_table' in state):\n self.logger.info(\"Watch out the Luos revision you are using on your board is too old to work with this revision on pyluos.\\n Please consider updating Luos on your boards\")\n return\n state = self._poll_once()\n if (time.time()-startTime > 1):\n retry = retry +1\n if retry > 5:\n # detection is not working\n sys.exit(\"Detection failed.\")\n self._send({'detection': {}})\n startTime = time.time()\n # Create nodes\n self._services = []\n self._nodes = []\n for i, node in enumerate(state['routing_table']):\n if ('node_id' not in node):\n self.logger.info(\"Watch out the Luos revision you are using on your board is too old to work with this revision on pyluos.\\n Please consider updating Luos on your boards\")\n parent_elem = None\n # find a parent and create a link\n if (min(node[\"port_table\"]) < node[\"services\"][0][\"id\"]):\n parent_id = min(node[\"port_table\"])\n for elem in self._nodes:\n if (elem.id == parent_id):\n parent_elem = elem\n break;\n # create the node\n self._nodes.append(AnyNode(id=node[\"node_id\"], certified=node[\"certified\"], parent=parent_elem, port_table=node[\"port_table\"]))\n\n filtered_services = contList([mod for mod in node[\"services\"]\n if 'type' in mod and mod['type'] in name2mod.keys()])\n # Create a list of services in the node\n self._nodes[i].services = [\n name2mod[mod['type']](id=mod['id'],\n alias=mod['alias'],\n device=self)\n for mod in filtered_services\n if 'type' in mod and 'id' in mod and 'alias' in mod\n ]\n # Create a list of services of the entire device\n self._services = self._services + self._nodes[i].services\n for mod in self._nodes[i].services:\n setattr(self, mod.alias, mod)\n\n self._cmd = defaultdict(lambda: defaultdict(lambda: None))\n self._cmd_data = []\n self._binary = []\n\n # We push our current state to make sure that\n # both our model and the hardware are synced.\n self._push_once()\n\n @property\n def services(self):\n return contList(self._services)\n\n @property\n def nodes(self):\n return nodeList(self._nodes)\n\n # Poll state from hardware.\n def _poll_once(self):\n self._state = self._io.read()\n if self._state != []:\n self._state['timestamp'] = time.time()\n return self._state\n return []\n\n def _poll_and_up(self):\n while self._running:\n if not self._pause :\n state = self._poll_once()\n if self._state != []:\n self._update(state)\n self._push_once()\n else :\n time.sleep(0.1)\n\n # Update our model with the new state.\n def _update(self, new_state):\n if 'dead_service' in new_state :\n #we have lost a service put a flag on this service\n alias = new_state['dead_service']\n if hasattr(self, alias):\n getattr(self, alias)._kill()\n if 'assert' in new_state :\n # A node assert, print assert informations\n if (('node_id' in new_state['assert']) and ('file' in new_state['assert']) and ('line' in new_state['assert'])):\n s = \"************************* ASSERT *************************\\n\"\n s += \"* Node \" + str(new_state['assert']['node_id']) + \" assert in file \" + new_state['assert']['file'] + \" line \" + str(new_state['assert']['line'])\n s += \"\\n**********************************************************\"\n print (s)\n if 'services' not in new_state:\n return\n\n for alias, mod in new_state['services'].items():\n if hasattr(self, alias):\n getattr(self, alias)._update(mod)\n\n self._last_update = time.time()\n\n def update_cmd(self, alias, key, val):\n with self._cmd_lock:\n self._cmd[alias][key] = val\n\n def update_data(self, alias, key, val, data):\n with self._cmd_lock:\n self._cmd_data.append({alias: {key: val}})\n self._binary.append(data.tobytes())\n\n def _push_once(self):\n with self._cmd_lock:\n if self._cmd:\n self._write( json.dumps({'services': self._cmd}).encode())\n self._cmd = defaultdict(lambda: defaultdict(lambda: None))\n for cmd, binary in zip(self._cmd_data, self._binary):\n time.sleep(0.01)\n self._write( json.dumps({'services': cmd}).encode() + '\\n'.encode() + binary)\n\n self._cmd_data = []\n self._binary = []\n\n\n def _send(self, msg):\n with self._send_lock:\n self._io.send(msg)\n\n def _write(self, data):\n with self._send_lock:\n self._io.write(data)\n" ]
[ [ "numpy.array" ] ]
amtagrwl/fvcore
[ "037302acd51b05c6c88f6c3495b5ea340cc4cb94" ]
[ "fvcore/nn/smooth_l1_loss.py" ]
[ "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n\nimport torch\n\n\ndef smooth_l1_loss(\n input: torch.Tensor, target: torch.Tensor, beta: float, reduction: str = \"none\"\n) -> torch.Tensor:\n \"\"\"\n Smooth L1 loss defined in the Fast R-CNN paper as:\n\n | 0.5 * x ** 2 / beta if abs(x) < beta\n smoothl1(x) = |\n | abs(x) - 0.5 * beta otherwise,\n\n where x = input - target.\n\n Smooth L1 loss is related to Huber loss, which is defined as:\n\n | 0.5 * x ** 2 if abs(x) < beta\n huber(x) = |\n | beta * (abs(x) - 0.5 * beta) otherwise\n\n Smooth L1 loss is equal to huber(x) / beta. This leads to the following\n differences:\n\n - As beta -> 0, Smooth L1 loss converges to L1 loss, while Huber loss\n converges to a constant 0 loss.\n - As beta -> +inf, Smooth L1 converges to a constant 0 loss, while Huber loss\n converges to L2 loss.\n - For Smooth L1 loss, as beta varies, the L1 segment of the loss has a constant\n slope of 1. For Huber loss, the slope of the L1 segment is beta.\n\n Smooth L1 loss can be seen as exactly L1 loss, but with the abs(x) < beta\n portion replaced with a quadratic function such that at abs(x) = beta, its\n slope is 1. The quadratic segment smooths the L1 loss near x = 0.\n\n Args:\n input (Tensor): input tensor of any shape\n target (Tensor): target value tensor with the same shape as input\n beta (float): L1 to L2 change point.\n For beta values < 1e-5, L1 loss is computed.\n reduction: 'none' | 'mean' | 'sum'\n 'none': No reduction will be applied to the output.\n 'mean': The output will be averaged.\n 'sum': The output will be summed.\n\n Returns:\n The loss with the reduction option applied.\n\n Note:\n PyTorch's builtin \"Smooth L1 loss\" implementation does not actually\n implement Smooth L1 loss, nor does it implement Huber loss. It implements\n the special case of both in which they are equal (beta=1).\n See: https://pytorch.org/docs/stable/nn.html#torch.nn.SmoothL1Loss.\n \"\"\"\n if beta < 1e-5:\n # if beta == 0, then torch.where will result in nan gradients when\n # the chain rule is applied due to pytorch implementation details\n # (the False branch \"0.5 * n ** 2 / 0\" has an incoming gradient of\n # zeros, rather than \"no gradient\"). To avoid this issue, we define\n # small values of beta to be exactly l1 loss.\n loss = torch.abs(input - target)\n else:\n n = torch.abs(input - target)\n cond = n < beta\n loss = torch.where(cond, 0.5 * n ** 2 / beta, n - 0.5 * beta)\n\n if reduction == \"mean\":\n loss = loss.mean()\n elif reduction == \"sum\":\n loss = loss.sum()\n return loss\n" ]
[ [ "torch.abs", "torch.where" ] ]
shoguncao/autoMusic
[ "c7b648bc4320d21d89c8c4194dcaf2f835aa1234" ]
[ "magenta/models/music_vae/music_vae_train.py" ]
[ "# Copyright 2017 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"MusicVAE training script.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\n\n# internal imports\nimport tensorflow as tf\n\nfrom magenta.models.music_vae import configs\nfrom magenta.models.music_vae import data\n\nflags = tf.app.flags\nFLAGS = flags.FLAGS\n\nflags.DEFINE_string(\n 'master', 'local',\n 'The TensorFlow master to use.')\nflags.DEFINE_string(\n 'examples_path', None,\n 'Path to a TFRecord file of NoteSequence examples. Overrides the config.')\nflags.DEFINE_string(\n 'run_dir', None,\n 'Path where checkpoints and summary events will be located during '\n 'training and evaluation. Separate subdirectories `train` and `eval` '\n 'will be created within this directory.')\nflags.DEFINE_integer(\n 'num_steps', 200000,\n 'Number of training steps or `None` for infinite.')\nflags.DEFINE_integer(\n 'eval_num_batches', None,\n 'Number of batches to use during evaluation or `None` for all batches '\n 'in the data source.')\nflags.DEFINE_integer(\n 'checkpoints_to_keep', 100,\n 'Maximum number of checkpoints to keep in `train` mode or 0 for infinite.')\nflags.DEFINE_string(\n 'mode', 'train',\n 'Which mode to use (`train` or `eval`).')\nflags.DEFINE_string(\n 'config', '',\n 'The name of the config to use.')\nflags.DEFINE_string(\n 'hparams', '',\n 'A comma-separated list of `name=value` hyperparameter values to merge '\n 'with those in the config.')\nflags.DEFINE_integer(\n 'task', 0,\n 'The task number for this worker.')\nflags.DEFINE_integer(\n 'num_ps_tasks', 0,\n 'The number of parameter server tasks.')\nflags.DEFINE_integer(\n 'num_sync_workers', 0,\n 'The number of synchronized workers.')\nflags.DEFINE_integer(\n 'num_data_threads', 4,\n 'The number of data preprocessing threads.')\nflags.DEFINE_integer(\n 'shuffle_buffer_size', 256,\n 'Size of shuffle buffer.')\nflags.DEFINE_string(\n 'eval_dir_suffix', '',\n 'Suffix to add to eval output directory.')\nflags.DEFINE_string(\n 'log', 'INFO',\n 'The threshold for what messages will be logged: '\n 'DEBUG, INFO, WARN, ERROR, or FATAL.')\n\n\n# Should not be called from within the graph to avoid redundant summaries.\ndef _trial_summary(hparams, examples_path, output_dir):\n \"\"\"Writes a tensorboard text summary of the trial.\"\"\"\n\n examples_path_summary = tf.summary.text(\n 'examples_path', tf.constant(examples_path, name='examples_path'),\n collections=[])\n\n hparams_dict = hparams.values()\n\n # Create a markdown table from hparams.\n header = '| Key | Value |\\n| :--- | :--- |\\n'\n keys = sorted(hparams_dict.keys())\n lines = ['| %s | %s |' % (key, str(hparams_dict[key])) for key in keys]\n hparams_table = header + '\\n'.join(lines) + '\\n'\n\n hparam_summary = tf.summary.text(\n 'hparams', tf.constant(hparams_table, name='hparams'), collections=[])\n\n with tf.Session() as sess:\n writer = tf.summary.FileWriter(output_dir, graph=sess.graph)\n writer.add_summary(examples_path_summary.eval())\n writer.add_summary(hparam_summary.eval())\n writer.close()\n\n\ndef train(train_dir,\n config,\n dataset,\n checkpoints_to_keep=5,\n num_steps=None,\n master='',\n num_sync_workers=0,\n num_ps_tasks=0,\n task=0):\n \"\"\"Train loop.\"\"\"\n tf.gfile.MakeDirs(train_dir)\n is_chief = (task == 0)\n if is_chief:\n _trial_summary(config.hparams, config.train_examples_path, train_dir)\n with tf.Graph().as_default():\n with tf.device(tf.train.replica_device_setter(\n num_ps_tasks, merge_devices=True)):\n config.note_sequence_converter.is_training = True\n train_dataset = (\n dataset\n .repeat()\n .shuffle(buffer_size=FLAGS.shuffle_buffer_size))\n train_dataset = train_dataset.padded_batch(\n config.hparams.batch_size, train_dataset.output_shapes)\n\n iterator = train_dataset.make_one_shot_iterator()\n input_sequence, output_sequence, sequence_length = iterator.get_next()\n input_sequence.set_shape(\n [config.hparams.batch_size, None,\n config.note_sequence_converter.input_depth])\n output_sequence.set_shape(\n [config.hparams.batch_size, None,\n config.note_sequence_converter.output_depth])\n\n model = config.model\n model.build(config.hparams,\n config.note_sequence_converter.output_depth,\n is_training=True)\n\n optimizer = model.train(input_sequence, output_sequence, sequence_length)\n\n hooks = []\n if num_sync_workers:\n optimizer = tf.train.SyncReplicasOptimizer(\n optimizer,\n num_sync_workers)\n hooks.append(optimizer.make_session_run_hook(is_chief))\n\n gvs = optimizer.compute_gradients(model.loss)\n g = config.hparams.grad_clip\n capped_gvs = [(tf.clip_by_value(grad, -g, g), var) for grad, var in gvs]\n train_op = optimizer.apply_gradients(\n capped_gvs, global_step=model.global_step, name='train_step')\n\n logging_dict = {'global_step': model.global_step,\n 'loss': model.loss}\n\n hooks.append(tf.train.LoggingTensorHook(logging_dict, every_n_iter=100))\n if num_steps:\n hooks.append(tf.train.StopAtStepHook(last_step=num_steps))\n\n scaffold = tf.train.Scaffold(\n saver=tf.train.Saver(max_to_keep=checkpoints_to_keep))\n tf.contrib.training.train(\n train_op=train_op,\n logdir=train_dir,\n scaffold=scaffold,\n hooks=hooks,\n save_checkpoint_secs=60,\n master=master,\n is_chief=is_chief)\n\n\ndef evaluate(train_dir,\n eval_dir,\n config,\n dataset,\n num_batches,\n master=''):\n \"\"\"Evaluate the model repeatedly.\"\"\"\n tf.gfile.MakeDirs(eval_dir)\n\n _trial_summary(config.hparams, config.eval_examples_path, eval_dir)\n with tf.Graph().as_default():\n eval_dataset = (\n dataset\n .padded_batch(config.hparams.batch_size, dataset.output_shapes)\n .take(num_batches))\n iterator = eval_dataset.make_one_shot_iterator()\n input_sequence, output_sequence, sequence_length = iterator.get_next()\n input_sequence.set_shape(\n [config.hparams.batch_size, None,\n config.note_sequence_converter.input_depth])\n output_sequence.set_shape(\n [config.hparams.batch_size, None,\n config.note_sequence_converter.output_depth])\n\n model = config.model\n model.build(config.hparams,\n config.note_sequence_converter.output_depth,\n is_training=False)\n\n eval_op = model.eval(input_sequence, output_sequence, sequence_length)\n\n hooks = [\n tf.contrib.training.StopAfterNEvalsHook(num_batches),\n tf.contrib.training.SummaryAtEndHook(eval_dir)]\n tf.contrib.training.evaluate_repeatedly(\n train_dir,\n eval_ops=eval_op,\n hooks=hooks,\n eval_interval_secs=60,\n master=master)\n\n\ndef run(config_map,\n tf_file_reader_class=tf.data.TFRecordDataset,\n file_reader=tf.python_io.tf_record_iterator):\n \"\"\"Load model params, save config file and start trainer.\n\n Args:\n config_map: Dictionary mapping configuration name to Config object.\n tf_file_reader_class: The tf.data.Dataset class to use for reading files.\n file_reader: The Python reader to use for reading files.\n\n Raises:\n ValueError: if required flags are missing or invalid.\n \"\"\"\n if not FLAGS.run_dir:\n raise ValueError('Invalid run directory: %s' % FLAGS.run_dir)\n run_dir = os.path.expanduser(FLAGS.run_dir)\n train_dir = os.path.join(run_dir, 'train')\n\n if FLAGS.mode not in ['train', 'eval']:\n raise ValueError('Invalid mode: %s' % FLAGS.mode)\n\n if FLAGS.config not in config_map:\n raise ValueError('Invalid config: %s' % FLAGS.config)\n config = config_map[FLAGS.config]\n if FLAGS.hparams:\n config.hparams.parse(FLAGS.hparams)\n config_update_map = {}\n if FLAGS.examples_path:\n config_update_map['%s_examples_path' % FLAGS.mode] = FLAGS.examples_path\n config = configs.update_config(config, config_update_map)\n if FLAGS.num_sync_workers:\n config.hparams.batch_size //= FLAGS.num_sync_workers\n\n if FLAGS.mode == 'train':\n is_training = True\n elif FLAGS.mode == 'eval':\n is_training = False\n else:\n raise ValueError('Invalid mode: {}'.format(FLAGS.mode))\n\n dataset = data.get_dataset(\n config,\n tf_file_reader_class=tf_file_reader_class,\n num_threads=FLAGS.num_data_threads,\n is_training=is_training)\n\n if is_training:\n train(\n train_dir,\n config=config,\n dataset=dataset,\n checkpoints_to_keep=FLAGS.checkpoints_to_keep,\n num_steps=FLAGS.num_steps,\n master=FLAGS.master,\n num_sync_workers=FLAGS.num_sync_workers,\n num_ps_tasks=FLAGS.num_ps_tasks,\n task=FLAGS.task)\n else:\n num_batches = FLAGS.eval_num_batches or data.count_examples(\n config.eval_examples_path,\n config.note_sequence_converter,\n file_reader) // config.hparams.batch_size\n eval_dir = os.path.join(run_dir, 'eval' + FLAGS.eval_dir_suffix)\n evaluate(\n train_dir,\n eval_dir,\n config=config,\n dataset=dataset,\n num_batches=num_batches,\n master=FLAGS.master)\n\n\ndef main(unused_argv):\n tf.logging.set_verbosity(FLAGS.log)\n run(configs.CONFIG_MAP)\n\n\ndef console_entry_point():\n tf.app.run(main)\n\n\nif __name__ == '__main__':\n console_entry_point()\n" ]
[ [ "tensorflow.contrib.training.train", "tensorflow.logging.set_verbosity", "tensorflow.train.replica_device_setter", "tensorflow.train.LoggingTensorHook", "tensorflow.contrib.training.evaluate_repeatedly", "tensorflow.Session", "tensorflow.Graph", "tensorflow.train.Saver", "tensorflow.contrib.training.SummaryAtEndHook", "tensorflow.constant", "tensorflow.gfile.MakeDirs", "tensorflow.clip_by_value", "tensorflow.train.StopAtStepHook", "tensorflow.summary.FileWriter", "tensorflow.app.run", "tensorflow.train.SyncReplicasOptimizer", "tensorflow.contrib.training.StopAfterNEvalsHook" ] ]