repo_name
stringlengths 6
130
| hexsha
list | file_path
list | code
list | apis
list | possible_versions
list |
---|---|---|---|---|---|
VirtueQuantumCloud/projectqX
|
[
"fa484fe037a3a1772127bbd00fe4628ddba34611"
] |
[
"projectq/ops/_qubit_operator_test.py"
] |
[
"# Copyright 2017 ProjectQ-Framework (www.projectq.ch)\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for _qubit_operator.py.\"\"\"\nimport copy\n\nimport numpy\nimport pytest\n\nfrom projectq.ops import _qubit_operator as qo\n\n\ndef test_pauli_operator_product_unchanged():\n correct = {('I', 'I'): (1., 'I'),\n ('I', 'X'): (1., 'X'),\n ('X', 'I'): (1., 'X'),\n ('I', 'Y'): (1., 'Y'),\n ('Y', 'I'): (1., 'Y'),\n ('I', 'Z'): (1., 'Z'),\n ('Z', 'I'): (1., 'Z'),\n ('X', 'X'): (1., 'I'),\n ('Y', 'Y'): (1., 'I'),\n ('Z', 'Z'): (1., 'I'),\n ('X', 'Y'): (1.j, 'Z'),\n ('X', 'Z'): (-1.j, 'Y'),\n ('Y', 'X'): (-1.j, 'Z'),\n ('Y', 'Z'): (1.j, 'X'),\n ('Z', 'X'): (1.j, 'Y'),\n ('Z', 'Y'): (-1.j, 'X')}\n assert qo._PAULI_OPERATOR_PRODUCTS == correct\n\n\ndef test_init_defaults():\n loc_op = qo.QubitOperator()\n assert len(loc_op.terms) == 0\n\n\[email protected](\"coefficient\", [0.5, 0.6j, numpy.float64(2.303),\n numpy.complex128(-1j)])\ndef test_init_tuple(coefficient):\n loc_op = ((0, 'X'), (5, 'Y'), (6, 'Z'))\n qubit_op = qo.QubitOperator(loc_op, coefficient)\n assert len(qubit_op.terms) == 1\n assert qubit_op.terms[loc_op] == coefficient\n\n\ndef test_init_str():\n qubit_op = qo.QubitOperator('X0 Y5 Z12', -1.)\n correct = ((0, 'X'), (5, 'Y'), (12, 'Z'))\n assert correct in qubit_op.terms\n assert qubit_op.terms[correct] == -1.0\n\n\ndef test_init_str_identity():\n qubit_op = qo.QubitOperator('', 2.)\n assert len(qubit_op.terms) == 1\n assert () in qubit_op.terms\n assert qubit_op.terms[()] == pytest.approx(2.)\n\n\ndef test_init_bad_term():\n with pytest.raises(ValueError):\n qubit_op = qo.QubitOperator(list())\n\n\ndef test_init_bad_coefficient():\n with pytest.raises(ValueError):\n qubit_op = qo.QubitOperator('X0', \"0.5\")\n\n\ndef test_init_bad_action():\n with pytest.raises(ValueError):\n qubit_op = qo.QubitOperator('Q0')\n\n\ndef test_init_bad_action_in_tuple():\n with pytest.raises(ValueError):\n qubit_op = qo.QubitOperator(((1, 'Q'),))\n\n\ndef test_init_bad_qubit_num_in_tuple():\n with pytest.raises(qo.QubitOperatorError):\n qubit_op = qo.QubitOperator(((\"1\", 'X'),))\n\n\ndef test_init_bad_tuple():\n with pytest.raises(ValueError):\n qubit_op = qo.QubitOperator(((0, 1, 'X'),))\n\n\ndef test_init_bad_str():\n with pytest.raises(ValueError):\n qubit_op = qo.QubitOperator('X')\n\n\ndef test_init_bad_qubit_num():\n with pytest.raises(qo.QubitOperatorError):\n qubit_op = qo.QubitOperator('X-1')\n\n\ndef test_isclose_abs_tol():\n a = qo.QubitOperator('X0', -1.)\n b = qo.QubitOperator('X0', -1.05)\n c = qo.QubitOperator('X0', -1.11)\n assert a.isclose(b, rel_tol=1e-14, abs_tol=0.1)\n assert not a.isclose(c, rel_tol=1e-14, abs_tol=0.1)\n a = qo.QubitOperator('X0', -1.0j)\n b = qo.QubitOperator('X0', -1.05j)\n c = qo.QubitOperator('X0', -1.11j)\n assert a.isclose(b, rel_tol=1e-14, abs_tol=0.1)\n assert not a.isclose(c, rel_tol=1e-14, abs_tol=0.1)\n\n\ndef test_compress():\n a = qo.QubitOperator('X0', .9e-12)\n assert len(a.terms) == 1\n a.compress()\n assert len(a.terms) == 0\n a = qo.QubitOperator('X0', 1. + 1j)\n a.compress(.5)\n assert len(a.terms) == 1\n for term in a.terms:\n assert a.terms[term] == 1. + 1j\n a = qo.QubitOperator('X0', 1.1 + 1j)\n a.compress(1.)\n assert len(a.terms) == 1\n for term in a.terms:\n assert a.terms[term] == 1.1\n a = qo.QubitOperator('X0', 1.1 + 1j) + qo.QubitOperator('X1', 1.e-6j)\n a.compress()\n assert len(a.terms) == 2\n for term in a.terms:\n assert isinstance(a.terms[term], complex)\n a.compress(1.e-5)\n assert len(a.terms) == 1\n for term in a.terms:\n assert isinstance(a.terms[term], complex)\n a.compress(1.)\n assert len(a.terms) == 1\n for term in a.terms:\n assert isinstance(a.terms[term], float)\n\n\ndef test_isclose_rel_tol():\n a = qo.QubitOperator('X0', 1)\n b = qo.QubitOperator('X0', 2)\n assert a.isclose(b, rel_tol=2.5, abs_tol=0.1)\n # Test symmetry\n assert a.isclose(b, rel_tol=1, abs_tol=0.1)\n assert b.isclose(a, rel_tol=1, abs_tol=0.1)\n\n\ndef test_isclose_zero_terms():\n op = qo.QubitOperator(((1, 'Y'), (0, 'X')), -1j) * 0\n assert op.isclose(qo.QubitOperator((), 0.0), rel_tol=1e-12, abs_tol=1e-12)\n assert qo.QubitOperator((), 0.0).isclose(op, rel_tol=1e-12, abs_tol=1e-12)\n\n\ndef test_isclose_different_terms():\n a = qo.QubitOperator(((1, 'Y'),), -0.1j)\n b = qo.QubitOperator(((1, 'X'),), -0.1j)\n assert a.isclose(b, rel_tol=1e-12, abs_tol=0.2)\n assert not a.isclose(b, rel_tol=1e-12, abs_tol=0.05)\n assert b.isclose(a, rel_tol=1e-12, abs_tol=0.2)\n assert not b.isclose(a, rel_tol=1e-12, abs_tol=0.05)\n\n\ndef test_isclose_different_num_terms():\n a = qo.QubitOperator(((1, 'Y'),), -0.1j)\n a += qo.QubitOperator(((2, 'Y'),), -0.1j)\n b = qo.QubitOperator(((1, 'X'),), -0.1j)\n assert not b.isclose(a, rel_tol=1e-12, abs_tol=0.05)\n assert not a.isclose(b, rel_tol=1e-12, abs_tol=0.05)\n\n\ndef test_imul_inplace():\n qubit_op = qo.QubitOperator(\"X1\")\n prev_id = id(qubit_op)\n qubit_op *= 3.\n assert id(qubit_op) == prev_id\n\n\[email protected](\"multiplier\", [0.5, 0.6j, numpy.float64(2.303),\n numpy.complex128(-1j)])\ndef test_imul_scalar(multiplier):\n loc_op = ((1, 'X'), (2, 'Y'))\n qubit_op = qo.QubitOperator(loc_op)\n qubit_op *= multiplier\n assert qubit_op.terms[loc_op] == pytest.approx(multiplier)\n\n\ndef test_imul_qubit_op():\n op1 = qo.QubitOperator(((0, 'Y'), (3, 'X'), (8, 'Z'), (11, 'X')), 3.j)\n op2 = qo.QubitOperator(((1, 'X'), (3, 'Y'), (8, 'Z')), 0.5)\n op1 *= op2\n correct_coefficient = 1.j * 3.0j * 0.5\n correct_term = ((0, 'Y'), (1, 'X'), (3, 'Z'), (11, 'X'))\n assert len(op1.terms) == 1\n assert correct_term in op1.terms\n\n\ndef test_imul_qubit_op_2():\n op3 = qo.QubitOperator(((1, 'Y'), (0, 'X')), -1j)\n op4 = qo.QubitOperator(((1, 'Y'), (0, 'X'), (2, 'Z')), -1.5)\n op3 *= op4\n op4 *= op3\n assert ((2, 'Z'),) in op3.terms\n assert op3.terms[((2, 'Z'),)] == 1.5j\n\n\ndef test_imul_bidir():\n op_a = qo.QubitOperator(((1, 'Y'), (0, 'X')), -1j)\n op_b = qo.QubitOperator(((1, 'Y'), (0, 'X'), (2, 'Z')), -1.5)\n op_a *= op_b\n op_b *= op_a\n assert ((2, 'Z'),) in op_a.terms\n assert op_a.terms[((2, 'Z'),)] == 1.5j\n assert ((0, 'X'), (1, 'Y')) in op_b.terms\n assert op_b.terms[((0, 'X'), (1, 'Y'))] == -2.25j\n\n\ndef test_imul_bad_multiplier():\n op = qo.QubitOperator(((1, 'Y'), (0, 'X')), -1j)\n with pytest.raises(TypeError):\n op *= \"1\"\n\n\ndef test_mul_by_scalarzero():\n op = qo.QubitOperator(((1, 'Y'), (0, 'X')), -1j) * 0\n assert ((0, 'X'), (1, 'Y')) in op.terms\n assert op.terms[((0, 'X'), (1, 'Y'))] == pytest.approx(0.0)\n\n\ndef test_mul_bad_multiplier():\n op = qo.QubitOperator(((1, 'Y'), (0, 'X')), -1j)\n with pytest.raises(TypeError):\n op = op * \"0.5\"\n\n\ndef test_mul_out_of_place():\n op1 = qo.QubitOperator(((0, 'Y'), (3, 'X'), (8, 'Z'), (11, 'X')), 3.j)\n op2 = qo.QubitOperator(((1, 'X'), (3, 'Y'), (8, 'Z')), 0.5)\n op3 = op1 * op2\n correct_coefficient = 1.j * 3.0j * 0.5\n correct_term = ((0, 'Y'), (1, 'X'), (3, 'Z'), (11, 'X'))\n assert op1.isclose(qo.QubitOperator(\n ((0, 'Y'), (3, 'X'), (8, 'Z'), (11, 'X')), 3.j))\n assert op2.isclose(qo.QubitOperator(((1, 'X'), (3, 'Y'), (8, 'Z')), 0.5))\n assert op3.isclose(qo.QubitOperator(correct_term, correct_coefficient))\n\n\ndef test_mul_npfloat64():\n op = qo.QubitOperator(((1, 'X'), (3, 'Y')), 0.5)\n res = op * numpy.float64(0.5)\n assert res.isclose(qo.QubitOperator(((1, 'X'), (3, 'Y')), 0.5 * 0.5))\n\n\ndef test_mul_multiple_terms():\n op = qo.QubitOperator(((1, 'X'), (3, 'Y'), (8, 'Z')), 0.5)\n op += qo.QubitOperator(((1, 'Z'), (3, 'X'), (8, 'Z')), 1.2)\n op += qo.QubitOperator(((1, 'Z'), (3, 'Y'), (9, 'Z')), 1.4j)\n res = op * op\n correct = qo.QubitOperator((), 0.5**2 + 1.2**2 + 1.4j**2)\n correct += qo.QubitOperator(((1, 'Y'), (3, 'Z')),\n 2j * 1j * 0.5 * 1.2)\n assert res.isclose(correct)\n\n\[email protected](\"multiplier\", [0.5, 0.6j, numpy.float64(2.303),\n numpy.complex128(-1j)])\ndef test_rmul_scalar(multiplier):\n op = qo.QubitOperator(((1, 'X'), (3, 'Y'), (8, 'Z')), 0.5)\n res1 = op * multiplier\n res2 = multiplier * op\n assert res1.isclose(res2)\n\n\ndef test_rmul_bad_multiplier():\n op = qo.QubitOperator(((1, 'X'), (3, 'Y'), (8, 'Z')), 0.5)\n with pytest.raises(TypeError):\n op = \"0.5\" * op\n\n\[email protected](\"divisor\", [0.5, 0.6j, numpy.float64(2.303),\n numpy.complex128(-1j), 2])\ndef test_truediv_and_div(divisor):\n op = qo.QubitOperator(((1, 'X'), (3, 'Y'), (8, 'Z')), 0.5)\n op2 = copy.deepcopy(op)\n original = copy.deepcopy(op)\n res = op / divisor\n res2 = op2.__div__(divisor) # To test python 2 version as well\n correct = op * (1. / divisor)\n assert res.isclose(correct)\n assert res2.isclose(correct)\n # Test if done out of place\n assert op.isclose(original)\n assert op2.isclose(original)\n\n\ndef test_truediv_bad_divisor():\n op = qo.QubitOperator(((1, 'X'), (3, 'Y'), (8, 'Z')), 0.5)\n with pytest.raises(TypeError):\n op = op / \"0.5\"\n\n\[email protected](\"divisor\", [0.5, 0.6j, numpy.float64(2.303),\n numpy.complex128(-1j), 2])\ndef test_itruediv_and_idiv(divisor):\n op = qo.QubitOperator(((1, 'X'), (3, 'Y'), (8, 'Z')), 0.5)\n op2 = copy.deepcopy(op)\n original = copy.deepcopy(op)\n correct = op * (1. / divisor)\n op /= divisor\n op2.__idiv__(divisor) # To test python 2 version as well\n assert op.isclose(correct)\n assert op2.isclose(correct)\n # Test if done in-place\n assert not op.isclose(original)\n assert not op2.isclose(original)\n\n\ndef test_itruediv_bad_divisor():\n op = qo.QubitOperator(((1, 'X'), (3, 'Y'), (8, 'Z')), 0.5)\n with pytest.raises(TypeError):\n op /= \"0.5\"\n\n\ndef test_iadd_cancellation():\n term_a = ((1, 'X'), (3, 'Y'), (8, 'Z'))\n term_b = ((1, 'X'), (3, 'Y'), (8, 'Z'))\n a = qo.QubitOperator(term_a, 1.0)\n a += qo.QubitOperator(term_b, -1.0)\n assert len(a.terms) == 0\n\n\ndef test_iadd_different_term():\n term_a = ((1, 'X'), (3, 'Y'), (8, 'Z'))\n term_b = ((1, 'Z'), (3, 'Y'), (8, 'Z'))\n a = qo.QubitOperator(term_a, 1.0)\n a += qo.QubitOperator(term_b, 0.5)\n assert len(a.terms) == 2\n assert a.terms[term_a] == pytest.approx(1.0)\n assert a.terms[term_b] == pytest.approx(0.5)\n a += qo.QubitOperator(term_b, 0.5)\n assert len(a.terms) == 2\n assert a.terms[term_a] == pytest.approx(1.0)\n assert a.terms[term_b] == pytest.approx(1.0)\n\n\ndef test_iadd_bad_addend():\n op = qo.QubitOperator((), 1.0)\n with pytest.raises(TypeError):\n op += \"0.5\"\n\n\ndef test_add():\n term_a = ((1, 'X'), (3, 'Y'), (8, 'Z'))\n term_b = ((1, 'Z'), (3, 'Y'), (8, 'Z'))\n a = qo.QubitOperator(term_a, 1.0)\n b = qo.QubitOperator(term_b, 0.5)\n res = a + b + b\n assert len(res.terms) == 2\n assert res.terms[term_a] == pytest.approx(1.0)\n assert res.terms[term_b] == pytest.approx(1.0)\n # Test out of place\n assert a.isclose(qo.QubitOperator(term_a, 1.0))\n assert b.isclose(qo.QubitOperator(term_b, 0.5))\n\n\ndef test_add_bad_addend():\n op = qo.QubitOperator((), 1.0)\n with pytest.raises(TypeError):\n op = op + \"0.5\"\n\n\ndef test_sub():\n term_a = ((1, 'X'), (3, 'Y'), (8, 'Z'))\n term_b = ((1, 'Z'), (3, 'Y'), (8, 'Z'))\n a = qo.QubitOperator(term_a, 1.0)\n b = qo.QubitOperator(term_b, 0.5)\n res = a - b\n assert len(res.terms) == 2\n assert res.terms[term_a] == pytest.approx(1.0)\n assert res.terms[term_b] == pytest.approx(-0.5)\n res2 = b - a\n assert len(res2.terms) == 2\n assert res2.terms[term_a] == pytest.approx(-1.0)\n assert res2.terms[term_b] == pytest.approx(0.5)\n\n\ndef test_sub_bad_subtrahend():\n op = qo.QubitOperator((), 1.0)\n with pytest.raises(TypeError):\n op = op - \"0.5\"\n\n\ndef test_isub_different_term():\n term_a = ((1, 'X'), (3, 'Y'), (8, 'Z'))\n term_b = ((1, 'Z'), (3, 'Y'), (8, 'Z'))\n a = qo.QubitOperator(term_a, 1.0)\n a -= qo.QubitOperator(term_b, 0.5)\n assert len(a.terms) == 2\n assert a.terms[term_a] == pytest.approx(1.0)\n assert a.terms[term_b] == pytest.approx(-0.5)\n a -= qo.QubitOperator(term_b, 0.5)\n assert len(a.terms) == 2\n assert a.terms[term_a] == pytest.approx(1.0)\n assert a.terms[term_b] == pytest.approx(-1.0)\n\n\ndef test_isub_bad_addend():\n op = qo.QubitOperator((), 1.0)\n with pytest.raises(TypeError):\n op -= \"0.5\"\n\n\ndef test_neg():\n op = qo.QubitOperator(((1, 'X'), (3, 'Y'), (8, 'Z')), 0.5)\n -op\n # out of place\n assert op.isclose(qo.QubitOperator(((1, 'X'), (3, 'Y'), (8, 'Z')), 0.5))\n correct = -1.0 * op\n assert correct.isclose(-op)\n\n\ndef test_str():\n op = qo.QubitOperator(((1, 'X'), (3, 'Y'), (8, 'Z')), 0.5)\n assert str(op) == \"0.5 X1 Y3 Z8\"\n op2 = qo.QubitOperator((), 2)\n assert str(op2) == \"2 I\"\n\n\ndef test_str_empty():\n op = qo.QubitOperator()\n assert str(op) == '0'\n\n\ndef test_str_multiple_terms():\n op = qo.QubitOperator(((1, 'X'), (3, 'Y'), (8, 'Z')), 0.5)\n op += qo.QubitOperator(((1, 'Y'), (3, 'Y'), (8, 'Z')), 0.6)\n assert (str(op) == \"0.5 X1 Y3 Z8 +\\n0.6 Y1 Y3 Z8\" or\n str(op) == \"0.6 Y1 Y3 Z8 +\\n0.5 X1 Y3 Z8\")\n op2 = qo.QubitOperator((), 2)\n assert str(op2) == \"2 I\"\n\n\ndef test_rep():\n op = qo.QubitOperator(((1, 'X'), (3, 'Y'), (8, 'Z')), 0.5)\n # Not necessary, repr could do something in addition\n assert repr(op) == str(op)\n"
] |
[
[
"numpy.complex128",
"numpy.float64"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
evarol/ot_tracking
|
[
"cddf27558fa5679ef06aad6a0945c34db0209ee7"
] |
[
"python/otimage/deformations.py"
] |
[
"\"\"\"Deformation models for worm registration\"\"\"\n\nfrom abc import ABC, abstractmethod\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.preprocessing import PolynomialFeatures\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.pipeline import Pipeline\n\n\nclass DeformationModel(ABC):\n\n @abstractmethod\n def fit(self, x, y, weights):\n pass\n \n @abstractmethod\n def predict(self, x):\n pass\n \n\nclass Affine(DeformationModel):\n \n def __init__(self):\n \n self._model = Pipeline([\n ('poly', PolynomialFeatures(degree=1, include_bias=True)),\n ('linear', LinearRegression(fit_intercept=False))\n ])\n \n @property\n def beta(self):\n \n return self._model.named_steps['linear'].coef_\n \n def fit(self, x, y, weights):\n \n self._model.fit(x, y, linear__sample_weight=weights)\n \n return self\n \n def predict(self, x):\n \n return self._model.predict(x)\n \n def det_jac(self, x):\n \n mtx = self.beta[:, 1:4]\n det = np.linalg.det(mtx)\n \n return np.full((x.shape[0], 1), det)\n \n \nclass Quadratic(DeformationModel):\n \n def __init__(self):\n \n self._model = Pipeline([\n ('poly', PolynomialFeatures(degree=2, include_bias=True)),\n ('linear', LinearRegression(fit_intercept=False))\n ])\n \n @property\n def beta(self):\n \n return self._model.named_steps['linear'].coef_\n \n def fit(self, x, y, weights):\n \n self._model.fit(x, y, linear__sample_weight=weights)\n \n return self\n \n def predict(self, x):\n \n return self._model.predict(x)\n \n def _compute_jac(self, x):\n \n x0 = x[0]\n x1 = x[1]\n x2 = x[2]\n\n d_phi = np.array([\n [0, 0, 0 ],\n [1, 0, 0 ],\n [0, 1, 0 ], \n [0, 0, 1 ],\n [2 * x0, 0, 0 ],\n [x1, x0, 0 ],\n [x2, 0, x0 ],\n [0, 2 * x1, 0 ],\n [0, x2, x1 ],\n [0, 0, 2 * x2 ],\n ])\n \n return self.beta @ d_phi\n\n def det_jac(self, x):\n \n det_vals = [np.linalg.det(self._compute_jac(x_i)) for x_i in x]\n \n return np.array(det_vals).reshape(-1, 1)\n\n \nclass Cubic(DeformationModel):\n \n def __init__(self):\n \n self._model = Pipeline([\n ('poly', PolynomialFeatures(degree=3, include_bias=True)),\n ('linear', LinearRegression(fit_intercept=False))\n ])\n \n @property\n def beta(self):\n \n return self._model.named_steps['linear'].coef_\n \n def fit(self, x, y, weights):\n \n self._model.fit(x, y, linear__sample_weight=weights)\n return self\n \n def predict(self, x):\n \n return self._model.predict(x)\n \n def _compute_jac(self, x):\n \n x0 = x[0]\n x1 = x[1]\n x2 = x[2]\n \n x0_2 = x0 ** 2\n x1_2 = x1 ** 2\n x2_2 = x2 ** 2\n \n x0_x1 = x0 * x1\n x1_x2 = x1 * x2\n x0_x2 = x0 * x2\n \n d_phi = np.array([\n [0, 0, 0 ],\n [1, 0, 0 ],\n [0, 1, 0 ], \n [0, 0, 1 ],\n [2 * x0, 0, 0 ],\n [x1, x0, 0 ],\n [x2, 0, x0 ],\n [0, 2 * x1, 0 ],\n [0, x2, x1 ],\n [0, 0, 2 * x2 ],\n [3 * x0_2, 0, 0 ],\n [2 * x0_x1, x0_2, 0 ],\n [2 * x0_x2, 0, x0_2 ],\n [x1_2, 2 * x0_x1, 0 ],\n [x1_x2, x0_x2, x0_x1 ],\n [x2_2, 0, 2 * x0_x2],\n [0, 3 * x1_2, 0 ],\n [0, 2 * x1_x2, x1_2 ],\n [0, x2_2, 2 * x1_x2],\n [0, 0, 3 * x2_2 ],\n ])\n \n return self.beta @ d_phi\n\n def det_jac(self, x):\n \n det_vals = [np.linalg.det(self._compute_jac(x_i)) for x_i in x]\n \n return np.array(det_vals).reshape(-1, 1)\n"
] |
[
[
"sklearn.preprocessing.PolynomialFeatures",
"numpy.full",
"numpy.linalg.det",
"sklearn.linear_model.LinearRegression",
"numpy.array"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
evazhang612/honygenerator
|
[
"cafcf1736faba978ecaed624b949ebc1498477ee"
] |
[
"language-modeling-master/src/nnlm-mc-drop.py"
] |
[
"import torchtext, random, torch\n\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\n\nimport numpy as np\nfrom tqdm import tqdm\n\nglobal use_cuda\nuse_cuda = torch.cuda.is_available()\ndevice = 0 if use_cuda else -1\n\nTEXT = torchtext.data.Field()\ntrain, val, test = torchtext.datasets.LanguageModelingDataset.splits(path=\".\", train=\"train.txt\", validation=\"valid.txt\", test=\"valid.txt\", text_field=TEXT)\nTEXT.build_vocab(train, max_size=1000) if False else TEXT.build_vocab(train)\nTEXT.vocab.load_vectors('glove.840B.300d')\ntrain_iter, val_iter, test_iter = torchtext.data.BPTTIterator.splits((train, val, test), batch_size=10, device=device, bptt_len=32, repeat=False)\n\nclass LanguageModel(nn.Module):\n \"\"\" neural network language model with multichannel embeddings and dropout \"\"\" \n def __init__(self, hidden_dim = 100, TEXT = TEXT):\n super(LanguageModel, self).__init__()\n \n vocab_size, embedding_dim = TEXT.vocab.vectors.shape\n \n self.nonstatic_embeddings = nn.Embedding(vocab_size, embedding_dim)\n self.nonstatic_embeddings.weight.data.copy_(TEXT.vocab.vectors)\n \n self.static_embeddings = nn.Embedding(vocab_size, embedding_dim)\n self.static_embeddings.weight.data.copy_(TEXT.vocab.vectors) \n self.static_embeddings.weight.requires_grad = False\n \n self.input2linear = nn.Linear(4*embedding_dim, hidden_dim)\n self.linear2output = nn.Linear(hidden_dim, vocab_size)\n self.dropout = nn.Dropout(p = 0.50)\n \n def forward(self, x):\n nonstatic_embedded, static_embedded = self.nonstatic_embeddings(x), self.static_embeddings(x)\n dropped_nonstatic, dropped_static = self.dropout(nonstatic_embedded), self.dropout(static_embedded)\n x_1 = torch.cat([dropped_nonstatic, dropped_static], dim = 2)\n x_2 = x_1.view(len(x_1), -1)\n x_3 = F.tanh(self.input2linear(x_2))\n x_4 = self.linear2output(x_3)\n x_5 = self.dropout(x_4)\n \n logits = F.log_softmax(x_5, dim = 1) \n return logits\n \n def predict(self, x, TEXT = TEXT):\n embedded = torch.cat([self.nonstatic_embeddings(x), self.static_embeddings(x)], dim = 1)\n embedded = embedded.view(-1, 1).transpose(0,1)\n activated = F.tanh(self.input2linear(embedded))\n output = self.linear2output(activated)\n logits = F.log_softmax(output, dim = 1)\n out_ids = np.argsort(logits.data[0].tolist())[-20:][::-1]\n out_words = ' '.join([TEXT.vocab.itos[out_id] for out_id in out_ids])\n return out_words\n \nclass Trainer:\n def __init__(self, train_iter, val_iter):\n self.train_iter = train_iter\n self.val_iter = val_iter\n \n def string_to_batch(self, string):\n relevant_split = string.split()[-2:] # last two words, ignore ___\n ids = [self.word_to_id(word) for word in relevant_split]\n if use_cuda:\n return Variable(torch.LongTensor(ids)).cuda()\n else:\n return Variable(torch.LongTensor(ids))\n \n def word_to_id(self, word, TEXT = TEXT):\n return TEXT.vocab.stoi[word]\n \n def batch_to_input(self, batch):\n ngrams = self.collect_batch_ngrams(batch)\n x = Variable(torch.LongTensor([ngram[:-1] for ngram in ngrams]))\n y = Variable(torch.LongTensor([ngram[-1] for ngram in ngrams]))\n if use_cuda:\n return x.cuda(), y.cuda()\n else:\n return x, y\n \n def collect_batch_ngrams(self, batch, n = 3):\n data = batch.text.view(-1).data.tolist()\n return [tuple(data[idx:idx + n]) for idx in range(0, len(data) - n + 1)]\n \n def train_model(self, model, num_epochs):\n parameters = filter(lambda p: p.requires_grad, model.parameters())\n optimizer = torch.optim.Adam(params = parameters, lr=1e-3)\n criterion = nn.NLLLoss()\n \n for epoch in tqdm(range(num_epochs)):\n \n model.train()\n epoch_loss = []\n \n for batch in self.train_iter:\n x, y = self.batch_to_input(batch)\n\n optimizer.zero_grad()\n\n y_pred = model(x)\n\n loss = criterion(y_pred, y)\n loss.backward()\n\n optimizer.step()\n\n epoch_loss.append(loss.data[0])\n \n model.eval()\n train_ppl = np.exp(np.mean(epoch_loss))\n val_ppl = self.validate(model)\n\n print('Epoch {0} | Loss: {1} | Train PPL: {2} | Val PPL: {3}'.format(epoch+1, np.mean(epoch_loss), train_ppl, val_ppl))\n \n print('Model trained.')\n self.write_kaggle(model)\n print('Output saved.')\n \n def validate(self, model):\n criterion = nn.NLLLoss()\n aggregate_loss = []\n for batch in self.val_iter:\n \n x, y_t = self.batch_to_input(batch)\n y_p = model(x)\n \n loss = criterion(y_p, y_t)\n \n aggregate_loss.append(loss.data[0])\n \n val_ppl = np.exp(np.mean(aggregate_loss))\n return val_ppl\n \n def predict_sentence(self, string, model):\n string = string[:-4]\n x = self.string_to_batch(string)\n out_words = model.predict(x)\n return out_words\n \n def write_kaggle(self, model, input_file = 'input.txt'):\n inputs = open(input_file, 'r').read().splitlines()\n outputs = [self.predict_sentence(sentence, model) for sentence in inputs]\n with open('nnlm_multichannel_dropout_output.txt', 'w') as f:\n f.write('id,word')\n for idx, line in enumerate(outputs):\n f.write('\\n')\n f.write(str(idx) + ',')\n f.write(line) \n\nmodel = LanguageModel(hidden_dim = 1024)\nif use_cuda: \n model.cuda()\ntrainer = Trainer(train_iter = train_iter, val_iter = val_iter)\ntrainer.train_model(model = model, num_epochs = 10)\n\n"
] |
[
[
"torch.optim.Adam",
"torch.nn.Dropout",
"torch.nn.NLLLoss",
"torch.LongTensor",
"torch.nn.functional.log_softmax",
"torch.cat",
"torch.nn.Embedding",
"torch.nn.Linear",
"numpy.mean",
"torch.cuda.is_available"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Haunter17/MIR_SU17
|
[
"0eaefb8cab78ca896c1ed0074892c296110eb161",
"0eaefb8cab78ca896c1ed0074892c296110eb161"
] |
[
"exp2/exp2c.py",
"exp8/exp8g_none.py"
] |
[
"import numpy as np\nimport tensorflow as tf\nimport h5py\nimport time\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\n# Functions for initializing neural nets parameters\ndef init_weight_variable(shape):\n initial = tf.truncated_normal(shape, stddev=0.1, dtype=tf.float32)\n return tf.Variable(initial)\n\ndef init_bias_variable(shape):\n initial = tf.constant(0.1, shape=shape, dtype=tf.float32)\n return tf.Variable(initial)\n\ndef conv2d(x, W):\n return tf.nn.conv2d(x, W, [1, 1, 1, 1], 'VALID')\n\ndef loadData(filepath):\n print('==> Experiment 2b')\n print('==> Loading data from {}'.format(filepath))\n # benchmark\n t_start = time.time()\n\n # reading data\n f = h5py.File(filepath)\n X_train = np.array(f.get('trainingFeatures'))\n y_train = np.array(f.get('trainingLabels'))\n X_val = np.array(f.get('validationFeatures'))\n y_val = np.array(f.get('validationLabels'))\n t_end = time.time()\n print('--Time elapsed for loading data: {t:.2f} \\\n seconds'.format(t = t_end - t_start))\n del f\n print('-- Number of training samples: {}'.format(X_train.shape[0]))\n print('-- Number of validation samples: {}'.format(X_val.shape[0]))\n\n return [X_train, y_train, X_val, y_val]\n\ndef runNeuralNet(num_freq, X_train, y_train, X_val, y_val, batch_size, num_epochs, pooling_strategy):\n\n # Neural-network model set-up\n num_training_vec, total_features = X_train.shape\n num_freq = 121\n num_frames = int(total_features / num_freq)\n print('-- Num frames: {}'.format(num_frames))\n num_classes = int(max(y_train.max(), y_val.max()) + 1)\n k1 = 32\n k2 = 64\n l = num_frames\n\n\n print_freq = 1\n\n # Transform labels into on-hot encoding form\n y_train_OHEnc = tf.one_hot(y_train.copy(), num_classes)\n y_val_OHEnc = tf.one_hot(y_val.copy(), num_classes)\n\n # Set-up input and output label\n x = tf.placeholder(tf.float32, [None, total_features])\n y_ = tf.placeholder(tf.float32, [None, num_classes])\n\n # first convolutional layer\n W_conv1 = init_weight_variable([num_freq, 1, 1, k1])\n b_conv1 = init_bias_variable([k1])\n x_image = tf.reshape(x, [-1, num_freq, num_frames, 1])\n h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)\n\n # second layer\n W_conv2 = init_weight_variable([1, l, k1, k2])\n b_conv2 = init_bias_variable([k2])\n h_conv2 = tf.nn.relu(conv2d(h_conv1, W_conv2) + b_conv2)\n h_conv2_flat = tf.reshape(h_conv2, [-1, (num_frames - l + 1) * k2])\n\n #h_pool2 = tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID')\n\n # softmax layer\n W_sm = init_weight_variable([(num_frames - l + 1) * k2, num_classes])\n b_sm = init_bias_variable([num_classes])\n\n y_conv = tf.matmul(h_conv2_flat, W_sm) + b_sm\n\n # evaluations\n cross_entropy = tf.reduce_mean(\n tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y_conv))\n train_step = tf.train.AdamOptimizer(learning_rate=1e-4).minimize(cross_entropy)\n correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y_, 1))\n accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n\n # get the gradients\n #gradients = train_step.compute_gradients()\n #gradients = train_step.compute_gradients(loss=cross_entropy, var_list=W_sm)\n\n # session\n sess = tf.InteractiveSession()\n sess.run(tf.global_variables_initializer())\n\n y_train = sess.run(y_train_OHEnc)[:, 0, :]\n y_val = sess.run(y_val_OHEnc)[:, 0, :]\n\n train_acc_list = []\n val_acc_list = []\n train_err_list = []\n val_err_list = []\n epoch_numbers = []\n\n # benchmark\n t_start = time.time()\n for epoch in range(num_epochs):\n epochStart = time.time()\n for i in range(0, num_training_vec, batch_size):\n batch_end_point = min(i + batch_size, num_training_vec)\n train_batch_data = X_train[i : batch_end_point]\n train_batch_label = y_train[i : batch_end_point]\n train_step.run(feed_dict={x: train_batch_data, y_: train_batch_label})\n epochEnd = time.time()\n # printing and recording data\n if (epoch + 1) % print_freq == 0:\n train_acc = accuracy.eval(feed_dict={x:X_train, y_: y_train})\n train_acc_list.append(train_acc)\n val_acc = accuracy.eval(feed_dict={x: X_val, y_: y_val})\n val_acc_list.append(val_acc)\n train_err = cross_entropy.eval(feed_dict={x: X_train, y_: y_train})\n train_err_list.append(train_err)\n val_err = cross_entropy.eval(feed_dict={x: X_val, y_: y_val})\n val_err_list.append(val_err) \n epoch_numbers += [epoch] \n #print(\"-- epoch: %d, training error %g\"%(epoch + 1, train_err))\n print(\"epoch: %d, time: %g, t acc, v acc, t cost, v cost: %g, %g, %g, %g\"%(epoch+1, epochEnd - epochStart, train_acc, val_acc, train_err, val_err))\n\n # print out the gradients\n #print(gradients.eval(feed_dict={x:X_train, y_: y_train}))\n\n t_end = time.time()\n print('--Time elapsed for training: {t:.2f} \\\n seconds'.format(t = t_end - t_start))\n\n return [train_acc_list, val_acc_list, train_err_list, val_err_list, epoch_numbers]\n\n'''\nOur Main\nCommand Line Arguments: (1) Length of horizontal window\n'''\n\n# load the data\n[X_train, y_train, X_val, y_val] = loadData('/pylon2/ci560sp/cstrong/exp1/taylorswift_smallDataset_71_7.mat')\n\n\nbatchSize = 500\nnumEpochs = 250\npoolingStrategy = 'MAX'\n\n[train_acc_list, val_acc_list, train_err_list, val_err_list, epoch_numbers] = runNeuralNet(121, X_train, y_train, X_val, y_val, batchSize, numEpochs, poolingStrategy)\n\n\n\n\n# Reports\nprint('-- Training accuracy: {:.4f}'.format(train_acc_list[-1]))\nprint('-- Validation accuracy: {:.4f}'.format(val_acc_list[-1]))\nprint('-- Training error: {:.4E}'.format(train_err_list[-1]))\nprint('-- Validation error: {:.4E}'.format(val_err_list[-1]))\n\nprint('==> Generating error plot...')\nx_list = epoch_numbers\ntrain_err_plot, = plt.plot(x_list, train_err_list, 'b.')\nval_err_plot, = plt.plot(x_list, val_err_list, '.', color='orange')\nplt.xlabel('Number of epochs')\nplt.ylabel('Cross-Entropy Error')\nplt.title('Error vs Number of Epochs')\nplt.legend((train_err_plot, val_err_plot), ('training', 'validation'), loc='best')\nplt.savefig('exp2c_k1=32_k2=64.png', format='png')\nplt.close()\n\nprint('==> Done.')\n",
"import numpy as np\r\nimport tensorflow as tf\r\nimport h5py\r\nfrom sklearn.preprocessing import OneHotEncoder\r\nimport matplotlib\r\nmatplotlib.use('Agg')\r\nimport matplotlib.pyplot as plt\r\nimport time\r\nimport scipy.io\r\n\r\n# Functions for initializing neural nets parameters\r\ndef weight_variable(shape, var_name):\r\n initial = tf.truncated_normal(shape, stddev=0.1, dtype=tf.float64)\r\n return tf.Variable(initial, name=var_name)\r\n\r\ndef bias_variable(shape, var_name):\r\n initial = tf.constant(0.1, shape=shape, dtype=tf.float64)\r\n return tf.Variable(initial, name=var_name)\r\n\r\ndef conv2d(x, W):\r\n\treturn tf.nn.conv2d(x, W, [1, 1, 1, 1], 'VALID')\r\n\r\ndef batch_nm(x, eps=1e-5):\r\n\t# batch normalization to have zero mean and unit variance\r\n\tmu, var = tf.nn.moments(x, [0])\r\n\treturn tf.nn.batch_normalization(x, mu, var, None, None, eps)\r\n\r\n# Download data from .mat file into numpy array\r\nprint('==> Experiment 8g')\r\nfilepath = '/scratch/ttanpras/exp8a_d7_1s.mat'\r\nprint('==> Loading data from {}'.format(filepath))\r\nf = h5py.File(filepath)\r\ndata_train = np.array(f.get('trainingFeatures'))\r\ndata_val = np.array(f.get('validationFeatures'))\r\ndel f\r\n\r\nprint('==> Data sizes:',data_train.shape, data_val.shape)\r\n\r\n# Transform labels into on-hot encoding form\r\nenc = OneHotEncoder(n_values = 71)\r\n\r\n'''\r\n NN config parameters\r\n'''\r\nsub_window_size = 32 \r\nnum_features = 169*sub_window_size\r\nnum_frames = 32\r\nhidden_layer_size = 2000\r\nnum_bits = 2000\r\nnum_classes = 71\r\nprint(\"Number of features:\", num_features)\r\nprint(\"Number of songs:\",num_classes)\r\n\r\n# Reshape input features\r\nX_train = np.reshape(data_train,(-1, num_features))\r\nX_val = np.reshape(data_val,(-1, num_features))\r\nprint(\"Input sizes:\", X_train.shape, X_val.shape)\r\n\r\ny_train = []\r\ny_val = []\r\n# Add Labels\r\nfor label in range(num_classes):\r\n for sampleCount in range(X_train.shape[0]//num_classes):\r\n y_train.append([label])\r\n for sampleCount in range(X_val.shape[0]//num_classes):\r\n y_val.append([label])\r\n\r\nX_train = np.concatenate((X_train, y_train), axis=1)\r\nX_val = np.concatenate((X_val, y_val), axis=1)\r\n\r\n# Shuffle\r\nnp.random.shuffle(X_train)\r\nnp.random.shuffle(X_val)\r\n\r\n# Separate coefficients and labels\r\ny_train = X_train[:, -1].reshape(-1, 1)\r\nX_train = X_train[:, :-1]\r\ny_val = X_val[:, -1].reshape(-1, 1)\r\nX_val = X_val[:, :-1]\r\nprint('==> Data sizes:',X_train.shape, y_train.shape,X_val.shape, y_val.shape)\r\n\r\ny_train = enc.fit_transform(y_train.copy()).astype(int).toarray()\r\ny_val = enc.fit_transform(y_val.copy()).astype(int).toarray()\r\n\r\nplotx = []\r\nploty_train = []\r\nploty_val = []\r\n \r\n # Set-up NN layers\r\nx = tf.placeholder(tf.float64, [None, num_features])\r\nW1 = weight_variable([num_features, hidden_layer_size], \"W1\")\r\nb1 = bias_variable([hidden_layer_size], \"b1\")\r\n\r\nOpW1 = tf.placeholder(tf.float64, [num_features, hidden_layer_size])\r\nOpb1 = tf.placeholder(tf.float64, [hidden_layer_size])\r\n\r\n# Hidden layer activation function: ReLU\r\nh1 = tf.nn.relu(tf.matmul(x, W1) + b1)\r\n\r\nW2 = weight_variable([hidden_layer_size, num_bits], \"W2\")\r\nb2 = bias_variable([num_bits], \"b2\")\r\n\r\nOpW2 = tf.placeholder(tf.float64, [hidden_layer_size, num_bits])\r\nOpb2 = tf.placeholder(tf.float64, [num_bits])\r\n\r\n# Pre-activation value for bit representation\r\nh = tf.matmul(h1, W2) + b2\r\nh2 = tf.nn.relu(tf.matmul(h1, W2) + b2)\r\n\r\nW3 = weight_variable([num_bits, num_classes], \"W3\")\r\nb3 = bias_variable([num_classes], \"b3\")\r\n\r\nOpW3 = tf.placeholder(tf.float64, [num_bits, num_classes])\r\nOpb3 = tf.placeholder(tf.float64, [num_classes])\r\n\r\n# Softmax layer (Output), dtype = float64\r\ny = tf.matmul(h2, W3) + b3\r\n\r\n# NN desired value (labels)\r\ny_ = tf.placeholder(tf.float64, [None, num_classes])\r\n\r\n# Loss function\r\ncross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y))\r\ntrain_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)\r\n\r\nsess = tf.InteractiveSession()\r\n\r\ncorrect_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))\r\n\r\naccuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float64))\r\nsess.run(tf.initialize_all_variables())\r\n\r\n# Training\r\nnumTrainingVec = len(X_train)\r\nbatchSize = 500\r\nnumEpochs = 1000\r\nbestValErr = 10000\r\nbestValEpoch = 0\r\n\r\nstartTime = time.time()\r\nfor epoch in range(numEpochs):\r\n for i in range(0,numTrainingVec,batchSize):\r\n\r\n # Batch Data\r\n batchEndPoint = min(i+batchSize, numTrainingVec)\r\n trainBatchData = X_train[i:batchEndPoint]\r\n trainBatchLabel = y_train[i:batchEndPoint]\r\n\r\n train_step.run(feed_dict={x: trainBatchData, y_: trainBatchLabel})\r\n\r\n # Print accuracy\r\n if epoch % 5 == 0 or epoch == numEpochs-1:\r\n plotx.append(epoch)\r\n train_error = cross_entropy.eval(feed_dict={x:trainBatchData, y_: trainBatchLabel})\r\n train_acc = accuracy.eval(feed_dict={x:trainBatchData, y_: trainBatchLabel})\r\n val_error = cross_entropy.eval(feed_dict={x:X_val, y_: y_val})\r\n val_acc = accuracy.eval(feed_dict={x:X_val, y_: y_val})\r\n ploty_train.append(train_error)\r\n ploty_val.append(val_error)\r\n print(\"epoch: %d, val error %g, train error %g\"%(epoch, val_error, train_error))\r\n\r\n if val_error < bestValErr:\r\n bestValErr = val_error\r\n bestValEpoch = epoch\r\n OpW1 = W1\r\n Opb1 = b1\r\n OpW2 = W2\r\n Opb2 = b2\r\n OpW3 = W3\r\n Opb3 = b3\r\n\r\nendTime = time.time()\r\nprint(\"Elapse Time:\", endTime - startTime)\r\nprint(\"Best validation error: %g at epoch %d\"%(bestValErr, bestValEpoch))\r\n\r\n# Restore best model for early stopping\r\nW1 = OpW1\r\nb1 = Opb1\r\nW2 = OpW2\r\nb2 = Opb2\r\nW3 = OpW3\r\nb3 = Opb3\r\n\r\nsaveweight = {}\r\nsaveweight['W1'] = np.array(W1.eval())\r\nsaveweight['b1'] = np.array(b1.eval())\r\nsaveweight['W2'] = np.array(W2.eval())\r\nsaveweight['b2'] = np.array(b2.eval())\r\nscipy.io.savemat('exp8g_none_weight.mat',saveweight)\r\n\r\nprint('==> Generating error plot...')\r\nerrfig = plt.figure()\r\ntrainErrPlot = errfig.add_subplot(111)\r\ntrainErrPlot.set_xlabel('Number of Epochs')\r\ntrainErrPlot.set_ylabel('Cross-Entropy Error')\r\ntrainErrPlot.set_title('Error vs Number of Epochs')\r\ntrainErrPlot.scatter(plotx, ploty_train)\r\nvalErrPlot = errfig.add_subplot(111)\r\nvalErrPlot.scatter(plotx, ploty_val)\r\nerrfig.savefig('exp8g_none.png')\r\n\r\n'''\r\nGENERATING REPRESENTATION OF NOISY FILES\r\n'''\r\nnamelist = ['orig','comp5','comp10','str5','str10','ampSat_(-15)','ampSat_(-10)','ampSat_(-5)', \\\r\n 'ampSat_(5)','ampSat_(10)','ampSat_(15)','pitchShift_(-1)','pitchShift_(-0.5)', \\\r\n 'pitchShift_(0.5)','pitchShift_(1)','rev_dkw','rev_gal','rev_shan0','rev_shan1', \\\r\n 'rev_gen','crowd-15','crowd-10','crowd-5','crowd0','crowd5','crowd10','crowd15', \\\r\n 'crowd100','rest-15','rest-10','rest-5','rest0','rest5','rest10','rest15', \\\r\n 'rest100','AWGN-15','AWGN-10','AWGN-5','AWGN0','AWGN5','AWGN10','AWGN15', 'AWGN100']\r\noutdir = '/scratch/ttanpras/taylorswift_noisy_processed/'\r\n\r\nrepDict = {}\r\n\r\n# Loop over each CQT files, not shuffled\r\nfor count in range(len(namelist)):\r\n\r\n name = namelist[count]\r\n filename = outdir + name + '.mat'\r\n cqt = scipy.io.loadmat(filename)['Q']\r\n cqt = np.transpose(np.array(cqt))\r\n\r\n # Group into windows of 32 without overlapping\r\n # Discard any leftover frames\r\n num_windows = cqt.shape[0] // 32\r\n cqt = cqt[:32*num_windows]\r\n X = np.reshape(cqt,(num_windows, num_features))\r\n\r\n # Feed window through model (Only 1 layer of weight w/o non-linearity)\r\n rep = h.eval(feed_dict={x:X})\r\n\r\n # Put the output representation into a dictionary\r\n repDict['n'+str(count)] = rep\r\n\r\nscipy.io.savemat('exp8g_none_repNon.mat',repDict)"
] |
[
[
"matplotlib.pyplot.legend",
"tensorflow.nn.softmax_cross_entropy_with_logits",
"tensorflow.cast",
"matplotlib.pyplot.plot",
"tensorflow.train.AdamOptimizer",
"tensorflow.nn.conv2d",
"tensorflow.Variable",
"matplotlib.pyplot.close",
"tensorflow.argmax",
"tensorflow.matmul",
"tensorflow.truncated_normal",
"tensorflow.InteractiveSession",
"matplotlib.pyplot.title",
"tensorflow.placeholder",
"matplotlib.pyplot.savefig",
"tensorflow.global_variables_initializer",
"matplotlib.pyplot.ylabel",
"tensorflow.constant",
"matplotlib.use",
"tensorflow.reshape",
"matplotlib.pyplot.xlabel"
],
[
"tensorflow.nn.softmax_cross_entropy_with_logits",
"tensorflow.cast",
"numpy.concatenate",
"tensorflow.train.AdamOptimizer",
"tensorflow.nn.conv2d",
"tensorflow.Variable",
"numpy.reshape",
"tensorflow.nn.moments",
"tensorflow.initialize_all_variables",
"tensorflow.argmax",
"matplotlib.pyplot.figure",
"tensorflow.matmul",
"tensorflow.nn.batch_normalization",
"tensorflow.truncated_normal",
"tensorflow.InteractiveSession",
"tensorflow.placeholder",
"numpy.array",
"tensorflow.constant",
"matplotlib.use",
"sklearn.preprocessing.OneHotEncoder",
"numpy.random.shuffle"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
michellesima/transformers
|
[
"327b32277d882e13b104dd8502a83a8668ea84e9"
] |
[
"utils.py"
] |
[
"import pandas as pd\r\nfrom nltk.stem import WordNetLemmatizer \r\nfrom transformers import *\r\n\r\ndef repeatN(list, n):\r\n ori = list\r\n for _ in range(n):\r\n list = list.append(ori, ignore_index=True)\r\n return list\r\n\r\ndef agen_verbs():\r\n '''\r\n for word in each category, get its infinitive form if it's in verb.txt\r\n for short phrases like 'apply to', only the first word is considered\r\n Note: 24 words not in verb.txt\r\n '''\r\n df = pd.read_csv('~/resources/lexica/CONNOTATION/agency_verb.csv')\r\n agen_v = {}\r\n total = 0\r\n cats = {'+': 'pos', '-':'neg', '=':'equal'}\r\n for k, v in cats.items():\r\n subdf = df[df['Agency{agent}_Label'] == k]\r\n ver_li = subdf['verb'].str.split()\r\n agen_v[v] = set(word_infinitive(li[0]) for li in ver_li if len(li) > 0)\r\n total += len(agen_v[v])\r\n return agen_v\r\n\r\n\r\n\r\ndef word_infinitive(word):\r\n #infi = lemmatizer.lemmatize(word)\r\n row = verb_form[verb_form.isin([word]).any(axis=1)]\r\n if row.empty:\r\n return word\r\n infi = row[0].iloc[0]\r\n return infi \r\n\r\ndef get_gpu_memory_map():\r\n \"\"\"Get the current gpu usage.\r\n Returns\r\n -------\r\n usage: dict\r\n Keys are device ids as integers.\r\n Values are memory usage as integers in MB.\r\n \"\"\"\r\n result = subprocess.check_output(\r\n [\r\n 'nvidia-smi', '--query-gpu=memory.used',\r\n '--format=csv,nounits,noheader'\r\n ], encoding='utf-8')\r\n # Convert lines into a dictionary\r\n gpu_memory = [int(x) for x in result.strip().split('\\n')]\r\n gpu_memory_map = dict(zip(range(len(gpu_memory)), gpu_memory))\r\n return gpu_memory_map\r\n\r\ndef add_pad(list, tokenizer):\r\n res = [__sen_pad(sen, tokenizer) for sen in list]\r\n return res\r\n\r\ndef __sen_pad(sen, tokenizer):\r\n # add padding for each sentence\r\n if len(sen) < max_sen_len:\r\n pad = [tokenizer.pad_token_id for i in range(max_sen_len - len(sen))]\r\n sen.extend(pad)\r\n return sen\r\n elif len(sen) > max_sen_len:\r\n orilen = len(sen)\r\n for i in range(orilen - max_sen_len):\r\n sen.pop(len(sen) - 2)\r\n return sen\r\n\r\n\r\nmax_sen_len = 64\r\n#lemmatizer = WordNetLemmatizer() \r\nverb_form = pd.read_csv('verb.txt', usecols=[_ for _ in range(24)], header=None)\r\nps = [0.4, 0.6]\r\nnum_epoch = 10\r\n\r\nagen_v = agen_verbs()\r\nROC_TRAIN = './data/roc/train.csv'\r\nROC_TEST = './data/roc/test.csv'\r\nROC_DEV = './data/roc/dev.csv'\r\n\r\n"
] |
[
[
"pandas.read_csv"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
gormlabenz/People-Counting-in-Real-Time
|
[
"d9bdfc1d428e249aa1d86b355a2aa67b5199ae2a"
] |
[
"run_2.py"
] |
[
"import argparse\nimport csv\nimport datetime\nimport logging\nimport time\nfrom itertools import zip_longest\n\nimport cv2\nimport dlib\nimport imutils\nimport numpy as np\nimport schedule\nfrom imutils.video import FPS, VideoStream\nfrom PIL import Image\n\nfrom mylib import config, epd4in2, thread\nfrom mylib.centroidtracker import CentroidTracker\nfrom mylib.mailer import Mailer\nfrom mylib.trackableobject import TrackableObject\n\nlogging.basicConfig(level=logging.DEBUG)\n\nt0 = time.time()\n\nCOLOR_ID = (0, 0, 255)\nCOLOR_DOT = (0, 0, 255)\nCOLOR_LINE = (0, 0, 255)\n\n\ndef run():\n # init paper\n logging.info(\"epd4in2 Demo\")\n\n epd = epd4in2.EPD()\n logging.info(\"init and Clear\")\n epd.init()\n epd.Clear()\n time.sleep(1)\n\n # Drawing on the image\n logging.info(\"Drawing\")\n\n # construct the argument parse and parse the arguments\n ap = argparse.ArgumentParser()\n ap.add_argument(\"-p\", \"--prototxt\", required=False,\n help=\"path to Caffe 'deploy' prototxt file\")\n ap.add_argument(\"-m\", \"--model\", required=True,\n help=\"path to Caffe pre-trained model\")\n ap.add_argument(\"-i\", \"--input\", type=str,\n help=\"path to optional input video file\")\n ap.add_argument(\"-o\", \"--output\", type=str,\n help=\"path to optional output video file\")\n # confidence default 0.4\n ap.add_argument(\"-c\", \"--confidence\", type=float, default=0.4,\n help=\"minimum probability to filter weak detections\")\n ap.add_argument(\"-s\", \"--skip-frames\", type=int, default=30,\n help=\"# of skip frames between detections\")\n args = vars(ap.parse_args())\n\n # initialize the list of class labels MobileNet SSD was trained to\n # detect\n CLASSES = [\"background\", \"aeroplane\", \"bicycle\", \"bird\", \"boat\",\n \"bottle\", \"bus\", \"car\", \"cat\", \"chair\", \"cow\", \"diningtable\",\n \"dog\", \"horse\", \"motorbike\", \"person\", \"pottedplant\", \"sheep\",\n \"sofa\", \"train\", \"tvmonitor\"]\n\n # load our serialized model from disk\n net = cv2.dnn.readNetFromCaffe(args[\"prototxt\"], args[\"model\"])\n\n # if a video path was not supplied, grab a reference to the ip camera\n if not args.get(\"input\", False):\n print(\"[INFO] Starting the live stream..\")\n vs = VideoStream(config.url).start()\n time.sleep(2.0)\n\n # otherwise, grab a reference to the video file\n else:\n print(\"[INFO] Starting the video..\")\n vs = cv2.VideoCapture(args[\"input\"])\n\n # initialize the video writer (we'll instantiate later if need be)\n writer = None\n\n # initialize the frame dimensions (we'll set them as soon as we read\n # the first frame from the video)\n W = None\n H = None\n\n # instantiate our centroid tracker, then initialize a list to store\n # each of our dlib correlation trackers, followed by a dictionary to\n # map each unique object ID to a TrackableObject\n ct = CentroidTracker(maxDisappeared=40, maxDistance=50)\n trackers = []\n trackableObjects = {}\n\n # initialize the total number of frames processed thus far, along\n # with the total number of objects that have moved either up or down\n totalFrames = 0\n totalDown = 0\n totalUp = 0\n x = []\n empty = []\n empty1 = []\n\n # start the frames per second throughput estimator\n fps = FPS().start()\n\n # loop over frames from the video stream\n while True:\n # grab the next frame and handle if we are reading from either\n # VideoCapture or VideoStream\n frame = vs.read()\n frame = frame[1] if args.get(\"input\", False) else frame\n\n # if we are viewing a video and we did not grab a frame then we\n # have reached the end of the video\n if args[\"input\"] is not None and frame is None:\n break\n\n # resize the frame to have a maximum width of 500 pixels (the\n # less data we have, the faster we can process it), then convert\n # the frame from BGR to RGB for dlib\n frame = imutils.resize(frame, width=epd.width, height=epd.height)\n rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n img_grey = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n frame_thres = cv2.adaptiveThreshold(\n img_grey, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 11, 2)\n frame_out = cv2.cvtColor(frame_thres, cv2.COLOR_GRAY2BGR)\n\n # if the frame dimensions are empty, set them\n if W is None or H is None:\n (H, W) = frame.shape[:2]\n\n # if we are supposed to be writing a video to disk, initialize\n # the writer\n if args[\"output\"] is not None and writer is None:\n fourcc = cv2.VideoWriter_fourcc(*\"MJPG\")\n writer = cv2.VideoWriter(args[\"output\"], fourcc, 30,\n (W, H), True)\n\n # initialize the current status along with our list of bounding\n # box rectangles returned by either (1) our object detector or\n # (2) the correlation trackers\n status = \"Waiting\"\n rects = []\n\n # check to see if we should run a more computationally expensive\n # object detection method to aid our tracker\n if totalFrames % args[\"skip_frames\"] == 0:\n # set the status and initialize our new set of object trackers\n status = \"Detecting\"\n trackers = []\n\n # convert the frame to a blob and pass the blob through the\n # network and obtain the detections\n blob = cv2.dnn.blobFromImage(frame, 0.007843, (W, H), 127.5)\n net.setInput(blob)\n detections = net.forward()\n\n # loop over the detections\n for i in np.arange(0, detections.shape[2]):\n # extract the confidence (i.e., probability) associated\n # with the prediction\n confidence = detections[0, 0, i, 2]\n\n # filter out weak detections by requiring a minimum\n # confidence\n if confidence > args[\"confidence\"]:\n # extract the index of the class label from the\n # detections list\n idx = int(detections[0, 0, i, 1])\n\n # if the class label is not a person, ignore it\n if CLASSES[idx] != \"person\":\n continue\n\n # compute the (x, y)-coordinates of the bounding box\n # for the object\n box = detections[0, 0, i, 3:7] * np.array([W, H, W, H])\n (startX, startY, endX, endY) = box.astype(\"int\")\n\n # construct a dlib rectangle object from the bounding\n # box coordinates and then start the dlib correlation\n # tracker\n tracker = dlib.correlation_tracker()\n rect = dlib.rectangle(startX, startY, endX, endY)\n tracker.start_track(rgb, rect)\n\n # add the tracker to our list of trackers so we can\n # utilize it during skip frames\n trackers.append(tracker)\n\n # otherwise, we should utilize our object *trackers* rather than\n # object *detectors* to obtain a higher frame processing throughput\n else:\n # loop over the trackers\n for tracker in trackers:\n # set the status of our system to be 'tracking' rather\n # than 'waiting' or 'detecting'\n status = \"Tracking\"\n\n # update the tracker and grab the updated position\n tracker.update(rgb)\n pos = tracker.get_position()\n\n # unpack the position object\n startX = int(pos.left())\n startY = int(pos.top())\n endX = int(pos.right())\n endY = int(pos.bottom())\n\n # add the bounding box coordinates to the rectangles list\n rects.append((startX, startY, endX, endY))\n\n # draw a horizontal line in the center of the frame -- once an\n # object crosses this line we will determine whether they were\n # moving 'up' or 'down'\n #cv2.line(frame_out, (0, H // 2), (W, H // 2), COLOR_LINE, 2)\n \"\"\" cv2.putText(frame_out, \"-Prediction border - Entrance-\", (10, H - ((i * 20) + 200)),\n cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), 1) \"\"\"\n\n # use the centroid tracker to associate the (1) old object\n # centroids with (2) the newly computed object centroids\n objects = ct.update(rects)\n\n # loop over the tracked objects\n for (objectID, centroid) in objects.items():\n # check to see if a trackable object exists for the current\n # object ID\n to = trackableObjects.get(objectID, None)\n\n # if there is no existing trackable object, create one\n if to is None:\n to = TrackableObject(objectID, centroid)\n\n # otherwise, there is a trackable object so we can utilize it\n # to determine direction\n else:\n # the difference between the y-coordinate of the *current*\n # centroid and the mean of *previous* centroids will tell\n # us in which direction the object is moving (negative for\n # 'up' and positive for 'down')\n y = [c[1] for c in to.centroids]\n direction = centroid[1] - np.mean(y)\n to.centroids.append(centroid)\n\n # check to see if the object has been counted or not\n if not to.counted:\n # if the direction is negative (indicating the object\n # is moving up) AND the centroid is above the center\n # line, count the object\n if direction < 0 and centroid[1] < H // 2:\n totalUp += 1\n empty.append(totalUp)\n to.counted = True\n\n # if the direction is positive (indicating the object\n # is moving down) AND the centroid is below the\n # center line, count the object\n elif direction > 0 and centroid[1] > H // 2:\n totalDown += 1\n empty1.append(totalDown)\n # print(empty1[-1])\n # if the people limit exceeds over threshold, send an email alert\n if sum(x) >= config.Threshold:\n cv2.putText(frame_out, \"-ALERT: People limit exceeded-\", (10, frame.shape[0] - 80),\n cv2.FONT_HERSHEY_COMPLEX, 0.5, (0, 0, 255), 2)\n if config.ALERT:\n print(\"[INFO] Sending email alert..\")\n Mailer().send(config.MAIL)\n print(\"[INFO] Alert sent\")\n\n to.counted = True\n\n x = []\n # compute the sum of total people inside\n x.append(len(empty1) - len(empty))\n print('x = ', x)\n for k, v in info:\n print(k, v)\n #print(\"Total people inside:\", x)\n\n # draw both the ID of the object and the centroid of the\n # object on the output frame\n text = \"ID {}\".format(objectID)\n print(text)\n frame_out_pil = Image.fromarray(frame_out)\n cv2.putText(frame_out, text, (centroid[0] - 10, centroid[1] - 10),\n cv2.FONT_HERSHEY_SIMPLEX, 0.5, COLOR_ID, 2)\n cv2.circle(\n frame_out, (centroid[0], centroid[1]), 4, COLOR_DOT, -1)\n epd.display(epd.getbuffer(frame_out_pil))\n\n # store the trackable object in our dictionary\n trackableObjects[objectID] = to\n\n # construct a tuple of information we will be displaying on the\n info = [\n (\"Exit\", totalUp),\n (\"Enter\", totalDown),\n (\"Status\", status),\n ]\n\n info2 = [\n (\"Total people inside\", x),\n ]\n\n # Display the output\n # for (i, (k, v)) in enumerate(info):\n # text = \"{}: {}\".format(k, v)\n # cv2.putText(frame_out, text, (10, H - ((i * 20) + 20)),\n # cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 0), 2)\n#\n # for (i, (k, v)) in enumerate(info2):\n # text = \"{}: {}\".format(k, v)\n # cv2.putText(frame_out, text, (265, H - ((i * 20) + 60)),\n # cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 255, 255), 2)\n # show the output frame\n # cv2.imshow(\"Real-Time Monitoring/Analysis Window\", frame_out)\n\n key = cv2.waitKey(1) & 0xFF\n\n # increment the total number of frames processed thus far and\n # then update the FPS counter\n totalFrames += 1\n fps.update()\n\n # stop the timer and display FPS information\n fps.stop()\n print(\"[INFO] elapsed time: {:.2f}\".format(fps.elapsed()))\n print(\"[INFO] approx. FPS: {:.2f}\".format(fps.fps()))\n\n # # if we are not using a video file, stop the camera video stream\n # if not args.get(\"input\", False):\n # \tvs.stop()\n #\n # # otherwise, release the video file pointer\n # else:\n # \tvs.release()\n\n # issue 15\n if config.Thread:\n vs.release()\n\n\nif __name__ == '__main__':\n run()\n"
] |
[
[
"numpy.arange",
"numpy.array",
"numpy.mean"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
yingwaner/C-MNMT
|
[
"3a87f75302efc859139af72483f480a1cac86f25"
] |
[
"fairseq/sequence_generator.py"
] |
[
"# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport math\n\nimport torch\n\nfrom fairseq import search, utils\nfrom fairseq.data import data_utils\nfrom fairseq.models import FairseqIncrementalDecoder\n\n\nclass SequenceGenerator(object):\n def __init__(\n self,\n tgt_dict,\n beam_size=1,\n max_len_a=0,\n max_len_b=200,\n min_len=1,\n normalize_scores=True,\n len_penalty=1.,\n unk_penalty=0.,\n retain_dropout=False,\n sampling=False,\n sampling_topk=-1,\n sampling_topp=-1.0,\n temperature=1.,\n diverse_beam_groups=-1,\n diverse_beam_strength=0.5,\n match_source_len=False,\n no_repeat_ngram_size=0,\n ):\n \"\"\"Generates translations of a given source sentence.\n\n Args:\n tgt_dict (~fairseq.data.Dictionary): target dictionary\n beam_size (int, optional): beam width (default: 1)\n max_len_a/b (int, optional): generate sequences of maximum length\n ax + b, where x is the source length\n min_len (int, optional): the minimum length of the generated output\n (not including end-of-sentence)\n normalize_scores (bool, optional): normalize scores by the length\n of the output (default: True)\n len_penalty (float, optional): length penalty, where <1.0 favors\n shorter, >1.0 favors longer sentences (default: 1.0)\n unk_penalty (float, optional): unknown word penalty, where <0\n produces more unks, >0 produces fewer (default: 0.0)\n retain_dropout (bool, optional): use dropout when generating\n (default: False)\n sampling (bool, optional): sample outputs instead of beam search\n (default: False)\n sampling_topk (int, optional): only sample among the top-k choices\n at each step (default: -1)\n sampling_topp (float, optional): only sample among the smallest set\n of words whose cumulative probability mass exceeds p\n at each step (default: -1.0)\n temperature (float, optional): temperature, where values\n >1.0 produce more uniform samples and values <1.0 produce\n sharper samples (default: 1.0)\n diverse_beam_groups/strength (float, optional): parameters for\n Diverse Beam Search sampling\n match_source_len (bool, optional): outputs should match the source\n length (default: False)\n \"\"\"\n self.pad = tgt_dict.pad()\n self.unk = tgt_dict.unk()\n self.eos = tgt_dict.eos()\n self.vocab_size = len(tgt_dict)\n self.beam_size = beam_size\n # the max beam size is the dictionary size - 1, since we never select pad\n self.beam_size = min(beam_size, self.vocab_size - 1)\n self.max_len_a = max_len_a\n self.max_len_b = max_len_b\n self.min_len = min_len\n self.normalize_scores = normalize_scores\n self.len_penalty = len_penalty\n self.unk_penalty = unk_penalty\n self.retain_dropout = retain_dropout\n self.temperature = temperature\n self.match_source_len = match_source_len\n self.no_repeat_ngram_size = no_repeat_ngram_size\n assert sampling_topk < 0 or sampling, '--sampling-topk requires --sampling'\n assert sampling_topp < 0 or sampling, '--sampling-topp requires --sampling'\n assert temperature > 0, '--temperature must be greater than 0'\n\n if sampling:\n self.search = search.Sampling(tgt_dict, sampling_topk, sampling_topp)\n elif diverse_beam_groups > 0:\n self.search = search.DiverseBeamSearch(tgt_dict, diverse_beam_groups, diverse_beam_strength)\n elif match_source_len:\n self.search = search.LengthConstrainedBeamSearch(\n tgt_dict, min_len_a=1, min_len_b=0, max_len_a=1, max_len_b=0,\n )\n else:\n self.search = search.BeamSearch(tgt_dict)\n\n @torch.no_grad()\n def generate(self, models, sample, lang_num, **kwargs):\n \"\"\"Generate a batch of translations.\n\n Args:\n models (List[~fairseq.models.FairseqModel]): ensemble of models\n sample (dict): batch\n prefix_tokens (torch.LongTensor, optional): force decoder to begin\n with these tokens\n bos_token (int, optional): beginning of sentence token\n (default: self.eos)\n \"\"\"\n model = EnsembleModel(models)\n return self._generate(model, sample, lang_num, **kwargs)\n\n @torch.no_grad()\n def _generate(\n self,\n model,\n sample,\n lang_num,\n prefix_tokens=None,\n bos_token=None,\n **kwargs\n ):\n if not self.retain_dropout:\n model.eval()\n\n # model.forward normally channels prev_output_tokens into the decoder\n # separately, but SequenceGenerator directly calls model.encoder\n encoder_input = {\n k: v for k, v in sample['net_input'].items()\n if k != 'prev_output_tokens'\n }\n\n src_tokens = encoder_input['src_tokens']\n src_lengths = (src_tokens.ne(self.eos) & src_tokens.ne(self.pad)).long().sum(dim=1)\n input_size = src_tokens.size()\n # batch dimension goes first followed by source lengths\n bsz = input_size[0]\n src_len = input_size[1]\n beam_size = self.beam_size\n\n if self.match_source_len:\n max_len = src_lengths.max().item()\n else:\n max_len = min(\n int(self.max_len_a * src_len + self.max_len_b),\n # exclude the EOS marker\n model.max_decoder_positions() - 1,\n )\n\n # compute the encoder output for each beam\n encoder_outs = model.forward_encoder(lang_num, encoder_input)\n sents = encoder_outs[0]['encoder_out']\n sents = sents.transpose(0, 1)#T x B x C -> B x T x C\n sents = sents.cpu().numpy()\n new_order = torch.arange(bsz).view(-1, 1).repeat(1, beam_size).view(-1)\n new_order = new_order.to(src_tokens.device).long()\n encoder_outs = model.reorder_encoder_out(encoder_outs, new_order)\n\n # initialize buffers\n scores = src_tokens.new(bsz * beam_size, max_len + 1).float().fill_(0)\n scores_buf = scores.clone()\n tokens = src_tokens.new(bsz * beam_size, max_len + 2).long().fill_(self.pad)\n tokens_buf = tokens.clone()\n tokens[:, 0] = self.eos if bos_token is None else bos_token\n attn, attn_buf = None, None\n\n # The blacklist indicates candidates that should be ignored.\n # For example, suppose we're sampling and have already finalized 2/5\n # samples. Then the blacklist would mark 2 positions as being ignored,\n # so that we only finalize the remaining 3 samples.\n blacklist = src_tokens.new_zeros(bsz, beam_size).eq(-1) # forward and backward-compatible False mask\n\n # list of completed sentences\n finalized = [[] for i in range(bsz)]\n finished = [False for i in range(bsz)]\n num_remaining_sent = bsz\n\n # number of candidate hypos per step\n cand_size = 2 * beam_size # 2 x beam size in case half are EOS\n\n # offset arrays for converting between different indexing schemes\n bbsz_offsets = (torch.arange(0, bsz) * beam_size).unsqueeze(1).type_as(tokens)\n cand_offsets = torch.arange(0, cand_size).type_as(tokens)\n\n # helper function for allocating buffers on the fly\n buffers = {}\n\n def buffer(name, type_of=tokens): # noqa\n if name not in buffers:\n buffers[name] = type_of.new()\n return buffers[name]\n\n def is_finished(sent, step, unfin_idx):\n \"\"\"\n Check whether we've finished generation for a given sentence, by\n comparing the worst score among finalized hypotheses to the best\n possible score among unfinalized hypotheses.\n \"\"\"\n assert len(finalized[sent]) <= beam_size\n if len(finalized[sent]) == beam_size:\n return True\n return False\n\n def finalize_hypos(step, bbsz_idx, eos_scores):\n \"\"\"\n Finalize the given hypotheses at this step, while keeping the total\n number of finalized hypotheses per sentence <= beam_size.\n\n Note: the input must be in the desired finalization order, so that\n hypotheses that appear earlier in the input are preferred to those\n that appear later.\n\n Args:\n step: current time step\n bbsz_idx: A vector of indices in the range [0, bsz*beam_size),\n indicating which hypotheses to finalize\n eos_scores: A vector of the same size as bbsz_idx containing\n scores for each hypothesis\n \"\"\"\n assert bbsz_idx.numel() == eos_scores.numel()\n\n # clone relevant token and attention tensors\n tokens_clone = tokens.index_select(0, bbsz_idx)\n tokens_clone = tokens_clone[:, 1:step + 2] # skip the first index, which is EOS\n assert not tokens_clone.eq(self.eos).any()\n tokens_clone[:, step] = self.eos\n attn_clone = attn.index_select(0, bbsz_idx)[:, :, 1:step+2] if attn is not None else None\n\n # compute scores per token position\n pos_scores = scores.index_select(0, bbsz_idx)[:, :step+1]\n pos_scores[:, step] = eos_scores\n # convert from cumulative to per-position scores\n pos_scores[:, 1:] = pos_scores[:, 1:] - pos_scores[:, :-1]\n\n # normalize sentence-level scores\n if self.normalize_scores:\n eos_scores /= (step + 1) ** self.len_penalty\n\n cum_unfin = []\n prev = 0\n for f in finished:\n if f:\n prev += 1\n else:\n cum_unfin.append(prev)\n\n sents_seen = set()\n for i, (idx, score) in enumerate(zip(bbsz_idx.tolist(), eos_scores.tolist())):\n unfin_idx = idx // beam_size\n sent = unfin_idx + cum_unfin[unfin_idx]\n\n sents_seen.add((sent, unfin_idx))\n\n if self.match_source_len and step > src_lengths[unfin_idx]:\n score = -math.inf\n\n def get_hypo():\n\n if attn_clone is not None:\n # remove padding tokens from attn scores\n hypo_attn = attn_clone[i]\n else:\n hypo_attn = None\n\n return {\n 'tokens': tokens_clone[i],\n 'score': score,\n 'attention': hypo_attn, # src_len x tgt_len\n 'alignment': None,\n 'positional_scores': pos_scores[i],\n }\n\n if len(finalized[sent]) < beam_size:\n finalized[sent].append(get_hypo())\n\n newly_finished = []\n for sent, unfin_idx in sents_seen:\n # check termination conditions for this sentence\n if not finished[sent] and is_finished(sent, step, unfin_idx):\n finished[sent] = True\n newly_finished.append(unfin_idx)\n return newly_finished\n\n reorder_state = None\n batch_idxs = None\n for step in range(max_len + 1): # one extra step for EOS marker\n # reorder decoder internal states based on the prev choice of beams\n if reorder_state is not None:\n if batch_idxs is not None:\n # update beam indices to take into account removed sentences\n corr = batch_idxs - torch.arange(batch_idxs.numel()).type_as(batch_idxs)\n reorder_state.view(-1, beam_size).add_(corr.unsqueeze(-1) * beam_size)\n model.reorder_incremental_state(reorder_state)\n encoder_outs = model.reorder_encoder_out(encoder_outs, reorder_state)\n\n lprobs, avg_attn_scores = model.forward_decoder(\n tokens[:, :step + 1], encoder_outs, temperature=self.temperature,\n )\n\n lprobs[:, self.pad] = -math.inf # never select pad\n lprobs[:, self.unk] -= self.unk_penalty # apply unk penalty\n\n # handle min and max length constraints\n if step >= max_len:\n lprobs[:, :self.eos] = -math.inf\n lprobs[:, self.eos + 1:] = -math.inf\n elif step < self.min_len:\n lprobs[:, self.eos] = -math.inf\n\n # handle prefix tokens (possibly with different lengths)\n if prefix_tokens is not None and step < prefix_tokens.size(1):\n prefix_toks = prefix_tokens[:, step].unsqueeze(-1).repeat(1, beam_size).view(-1)\n prefix_lprobs = lprobs.gather(-1, prefix_toks.unsqueeze(-1))\n prefix_mask = prefix_toks.ne(self.pad)\n lprobs[prefix_mask] = -math.inf\n lprobs[prefix_mask] = lprobs[prefix_mask].scatter_(\n -1, prefix_toks[prefix_mask].unsqueeze(-1), prefix_lprobs\n )\n # if prefix includes eos, then we should make sure tokens and\n # scores are the same across all beams\n eos_mask = prefix_toks.eq(self.eos)\n if eos_mask.any():\n # validate that the first beam matches the prefix\n first_beam = tokens[eos_mask].view(-1, beam_size, tokens.size(-1))[:, 0, 1:step + 1]\n eos_mask_batch_dim = eos_mask.view(-1, beam_size)[:, 0]\n target_prefix = prefix_tokens[eos_mask_batch_dim][:, :step]\n assert (first_beam == target_prefix).all()\n\n def replicate_first_beam(tensor, mask):\n tensor = tensor.view(-1, beam_size, tensor.size(-1))\n tensor[mask] = tensor[mask][:, :1, :]\n return tensor.view(-1, tensor.size(-1))\n\n # copy tokens, scores and lprobs from the first beam to all beams\n tokens = replicate_first_beam(tokens, eos_mask_batch_dim)\n scores = replicate_first_beam(scores, eos_mask_batch_dim)\n lprobs = replicate_first_beam(lprobs, eos_mask_batch_dim)\n\n if self.no_repeat_ngram_size > 0:\n # for each beam and batch sentence, generate a list of previous ngrams\n gen_ngrams = [{} for bbsz_idx in range(bsz * beam_size)]\n for bbsz_idx in range(bsz * beam_size):\n gen_tokens = tokens[bbsz_idx].tolist()\n for ngram in zip(*[gen_tokens[i:] for i in range(self.no_repeat_ngram_size)]):\n gen_ngrams[bbsz_idx][tuple(ngram[:-1])] = \\\n gen_ngrams[bbsz_idx].get(tuple(ngram[:-1]), []) + [ngram[-1]]\n\n # Record attention scores\n if avg_attn_scores is not None:\n if attn is None:\n attn = scores.new(bsz * beam_size, src_tokens.size(1), max_len + 2)\n attn_buf = attn.clone()\n attn[:, :, step + 1].copy_(avg_attn_scores)\n\n scores = scores.type_as(lprobs)\n scores_buf = scores_buf.type_as(lprobs)\n eos_bbsz_idx = buffer('eos_bbsz_idx')\n eos_scores = buffer('eos_scores', type_of=scores)\n\n self.search.set_src_lengths(src_lengths)\n\n if self.no_repeat_ngram_size > 0:\n def calculate_banned_tokens(bbsz_idx):\n # before decoding the next token, prevent decoding of ngrams that have already appeared\n ngram_index = tuple(tokens[bbsz_idx, step + 2 - self.no_repeat_ngram_size:step + 1].tolist())\n return gen_ngrams[bbsz_idx].get(ngram_index, [])\n\n if step + 2 - self.no_repeat_ngram_size >= 0:\n # no banned tokens if we haven't generated no_repeat_ngram_size tokens yet\n banned_tokens = [calculate_banned_tokens(bbsz_idx) for bbsz_idx in range(bsz * beam_size)]\n else:\n banned_tokens = [[] for bbsz_idx in range(bsz * beam_size)]\n\n for bbsz_idx in range(bsz * beam_size):\n lprobs[bbsz_idx, banned_tokens[bbsz_idx]] = -math.inf\n\n cand_scores, cand_indices, cand_beams = self.search.step(\n step,\n lprobs.view(bsz, -1, self.vocab_size),\n scores.view(bsz, beam_size, -1)[:, :, :step],\n )\n\n # cand_bbsz_idx contains beam indices for the top candidate\n # hypotheses, with a range of values: [0, bsz*beam_size),\n # and dimensions: [bsz, cand_size]\n cand_bbsz_idx = cand_beams.add(bbsz_offsets)\n\n # finalize hypotheses that end in eos (except for blacklisted ones)\n eos_mask = cand_indices.eq(self.eos)\n eos_mask[:, :beam_size][blacklist] = 0\n\n # only consider eos when it's among the top beam_size indices\n torch.masked_select(\n cand_bbsz_idx[:, :beam_size],\n mask=eos_mask[:, :beam_size],\n out=eos_bbsz_idx,\n )\n\n finalized_sents = set()\n if eos_bbsz_idx.numel() > 0:\n torch.masked_select(\n cand_scores[:, :beam_size],\n mask=eos_mask[:, :beam_size],\n out=eos_scores,\n )\n finalized_sents = finalize_hypos(step, eos_bbsz_idx, eos_scores)\n num_remaining_sent -= len(finalized_sents)\n\n assert num_remaining_sent >= 0\n if num_remaining_sent == 0:\n break\n assert step < max_len\n\n if len(finalized_sents) > 0:\n new_bsz = bsz - len(finalized_sents)\n\n # construct batch_idxs which holds indices of batches to keep for the next pass\n batch_mask = cand_indices.new_ones(bsz)\n batch_mask[cand_indices.new(finalized_sents)] = 0\n batch_idxs = batch_mask.nonzero().squeeze(-1)\n\n eos_mask = eos_mask[batch_idxs]\n cand_beams = cand_beams[batch_idxs]\n bbsz_offsets.resize_(new_bsz, 1)\n cand_bbsz_idx = cand_beams.add(bbsz_offsets)\n cand_scores = cand_scores[batch_idxs]\n cand_indices = cand_indices[batch_idxs]\n if prefix_tokens is not None:\n prefix_tokens = prefix_tokens[batch_idxs]\n src_lengths = src_lengths[batch_idxs]\n blacklist = blacklist[batch_idxs]\n\n scores = scores.view(bsz, -1)[batch_idxs].view(new_bsz * beam_size, -1)\n scores_buf.resize_as_(scores)\n tokens = tokens.view(bsz, -1)[batch_idxs].view(new_bsz * beam_size, -1)\n tokens_buf.resize_as_(tokens)\n if attn is not None:\n attn = attn.view(bsz, -1)[batch_idxs].view(new_bsz * beam_size, attn.size(1), -1)\n attn_buf.resize_as_(attn)\n bsz = new_bsz\n else:\n batch_idxs = None\n\n # Set active_mask so that values > cand_size indicate eos or\n # blacklisted hypos and values < cand_size indicate candidate\n # active hypos. After this, the min values per row are the top\n # candidate active hypos.\n active_mask = buffer('active_mask')\n eos_mask[:, :beam_size] |= blacklist\n torch.add(\n eos_mask.type_as(cand_offsets) * cand_size,\n cand_offsets[:eos_mask.size(1)],\n out=active_mask,\n )\n\n # get the top beam_size active hypotheses, which are just the hypos\n # with the smallest values in active_mask\n active_hypos, new_blacklist = buffer('active_hypos'), buffer('new_blacklist')\n torch.topk(\n active_mask, k=beam_size, dim=1, largest=False,\n out=(new_blacklist, active_hypos)\n )\n\n # update blacklist to ignore any finalized hypos\n blacklist = new_blacklist.ge(cand_size)[:, :beam_size]\n assert (~blacklist).any(dim=1).all()\n\n active_bbsz_idx = buffer('active_bbsz_idx')\n torch.gather(\n cand_bbsz_idx, dim=1, index=active_hypos,\n out=active_bbsz_idx,\n )\n active_scores = torch.gather(\n cand_scores, dim=1, index=active_hypos,\n out=scores[:, step].view(bsz, beam_size),\n )\n\n active_bbsz_idx = active_bbsz_idx.view(-1)\n active_scores = active_scores.view(-1)\n\n # copy tokens and scores for active hypotheses\n torch.index_select(\n tokens[:, :step + 1], dim=0, index=active_bbsz_idx,\n out=tokens_buf[:, :step + 1],\n )\n torch.gather(\n cand_indices, dim=1, index=active_hypos,\n out=tokens_buf.view(bsz, beam_size, -1)[:, :, step + 1],\n )\n if step > 0:\n torch.index_select(\n scores[:, :step], dim=0, index=active_bbsz_idx,\n out=scores_buf[:, :step],\n )\n torch.gather(\n cand_scores, dim=1, index=active_hypos,\n out=scores_buf.view(bsz, beam_size, -1)[:, :, step],\n )\n\n # copy attention for active hypotheses\n if attn is not None:\n torch.index_select(\n attn[:, :, :step + 2], dim=0, index=active_bbsz_idx,\n out=attn_buf[:, :, :step + 2],\n )\n\n # swap buffers\n tokens, tokens_buf = tokens_buf, tokens\n scores, scores_buf = scores_buf, scores\n if attn is not None:\n attn, attn_buf = attn_buf, attn\n\n # reorder incremental state in decoder\n reorder_state = active_bbsz_idx\n\n # sort by score descending\n for sent in range(len(finalized)):\n finalized[sent] = sorted(finalized[sent], key=lambda r: r['score'], reverse=True)\n return finalized, sents\n\n\nclass EnsembleModel(torch.nn.Module):\n \"\"\"A wrapper around an ensemble of models.\"\"\"\n\n def __init__(self, models):\n super().__init__()\n self.models = torch.nn.ModuleList(models)\n self.incremental_states = None\n if all(isinstance(m.decoder, FairseqIncrementalDecoder) for m in models):\n self.incremental_states = {m: {} for m in models}\n\n def has_encoder(self):\n return hasattr(self.models[0], 'encoder')\n\n def max_decoder_positions(self):\n return min(m.max_decoder_positions() for m in self.models)\n\n @torch.no_grad()\n def forward_encoder(self, lang_num, encoder_input):\n if not self.has_encoder():\n return None\n return [model.encoder(lang_num, **encoder_input) for model in self.models]\n\n @torch.no_grad()\n def forward_decoder(self, tokens, encoder_outs, temperature=1.):\n if len(self.models) == 1:\n return self._decode_one(\n tokens,\n self.models[0],\n encoder_outs[0] if self.has_encoder() else None,\n self.incremental_states,\n log_probs=True,\n temperature=temperature,\n )\n\n log_probs = []\n avg_attn = None\n for model, encoder_out in zip(self.models, encoder_outs):\n probs, attn = self._decode_one(\n tokens,\n model,\n encoder_out,\n self.incremental_states,\n log_probs=True,\n temperature=temperature,\n )\n log_probs.append(probs)\n if attn is not None:\n if avg_attn is None:\n avg_attn = attn\n else:\n avg_attn.add_(attn)\n avg_probs = torch.logsumexp(torch.stack(log_probs, dim=0), dim=0) - math.log(len(self.models))\n if avg_attn is not None:\n avg_attn.div_(len(self.models))\n return avg_probs, avg_attn\n\n def _decode_one(\n self, tokens, model, encoder_out, incremental_states, log_probs,\n temperature=1.,\n ):\n if self.incremental_states is not None:\n decoder_out = list(model.decoder(\n tokens, encoder_out=encoder_out, incremental_state=self.incremental_states[model],\n ))\n else:\n decoder_out = list(model.decoder(tokens, encoder_out=encoder_out))\n decoder_out[0] = decoder_out[0][:, -1:, :]\n if temperature != 1.:\n decoder_out[0].div_(temperature)\n attn = decoder_out[1]\n if type(attn) is dict:\n attn = attn.get('attn', None)\n if attn is not None:\n attn = attn[:, -1, :]\n probs = model.get_normalized_probs(decoder_out, log_probs=log_probs)\n probs = probs[:, -1, :]\n return probs, attn\n\n def reorder_encoder_out(self, encoder_outs, new_order):\n if not self.has_encoder():\n return\n return [\n model.encoder.reorder_encoder_out(encoder_out, new_order)\n for model, encoder_out in zip(self.models, encoder_outs)\n ]\n\n def reorder_incremental_state(self, new_order):\n if self.incremental_states is None:\n return\n for model in self.models:\n model.decoder.reorder_incremental_state(self.incremental_states[model], new_order)\n\n\nclass SequenceGeneratorWithAlignment(SequenceGenerator):\n\n def __init__(self, tgt_dict, left_pad_target=False, **kwargs):\n \"\"\"Generates translations of a given source sentence.\n\n Produces alignments following \"Jointly Learning to Align and\n Translate with Transformer Models\" (Garg et al., EMNLP 2019).\n\n Args:\n left_pad_target (bool, optional): Whether or not the\n hypothesis should be left padded or not when they are\n teacher forced for generating alignments.\n \"\"\"\n super().__init__(tgt_dict, **kwargs)\n self.left_pad_target = left_pad_target\n\n @torch.no_grad()\n def generate(self, models, sample, **kwargs):\n model = EnsembleModelWithAlignment(models)\n finalized = super()._generate(model, sample, **kwargs)\n\n src_tokens = sample['net_input']['src_tokens']\n bsz = src_tokens.shape[0]\n beam_size = self.beam_size\n src_tokens, src_lengths, prev_output_tokens, tgt_tokens = \\\n self._prepare_batch_for_alignment(sample, finalized)\n if any(getattr(m, 'full_context_alignment', False) for m in model.models):\n attn = model.forward_align(src_tokens, src_lengths, prev_output_tokens)\n else:\n attn = [\n finalized[i // beam_size][i % beam_size]['attention'].transpose(1, 0)\n for i in range(bsz * beam_size)\n ]\n\n # Process the attn matrix to extract hard alignments.\n for i in range(bsz * beam_size):\n alignment = utils.extract_hard_alignment(attn[i], src_tokens[i], tgt_tokens[i], self.pad, self.eos)\n finalized[i // beam_size][i % beam_size]['alignment'] = alignment\n return finalized\n\n def _prepare_batch_for_alignment(self, sample, hypothesis):\n src_tokens = sample['net_input']['src_tokens']\n bsz = src_tokens.shape[0]\n src_tokens = src_tokens[:, None, :].expand(-1, self.beam_size, -1).contiguous().view(bsz * self.beam_size, -1)\n src_lengths = sample['net_input']['src_lengths']\n src_lengths = src_lengths[:, None].expand(-1, self.beam_size).contiguous().view(bsz * self.beam_size)\n prev_output_tokens = data_utils.collate_tokens(\n [beam['tokens'] for example in hypothesis for beam in example],\n self.pad, self.eos, self.left_pad_target, move_eos_to_beginning=True,\n )\n tgt_tokens = data_utils.collate_tokens(\n [beam['tokens'] for example in hypothesis for beam in example],\n self.pad, self.eos, self.left_pad_target, move_eos_to_beginning=False,\n )\n return src_tokens, src_lengths, prev_output_tokens, tgt_tokens\n\n\nclass EnsembleModelWithAlignment(EnsembleModel):\n \"\"\"A wrapper around an ensemble of models.\"\"\"\n\n def __init__(self, models):\n super().__init__(models)\n\n def forward_align(self, src_tokens, src_lengths, prev_output_tokens):\n avg_attn = None\n for model in self.models:\n decoder_out = model(src_tokens, src_lengths, prev_output_tokens)\n attn = decoder_out[1]['attn']\n if avg_attn is None:\n avg_attn = attn\n else:\n avg_attn.add_(attn)\n if len(self.models) > 1:\n avg_attn.div_(len(self.models))\n return avg_attn\n\n def _decode_one(\n self, tokens, model, encoder_out, incremental_states, log_probs,\n temperature=1.,\n ):\n if self.incremental_states is not None:\n decoder_out = list(model.forward_decoder(\n tokens,\n encoder_out=encoder_out,\n incremental_state=self.incremental_states[model],\n ))\n else:\n decoder_out = list(model.forward_decoder(tokens, encoder_out=encoder_out))\n decoder_out[0] = decoder_out[0][:, -1:, :]\n if temperature != 1.:\n decoder_out[0].div_(temperature)\n attn = decoder_out[1]\n if type(attn) is dict:\n attn = attn.get('attn', None)\n if attn is not None:\n attn = attn[:, -1, :]\n probs = model.get_normalized_probs(decoder_out, log_probs=log_probs)\n probs = probs[:, -1, :]\n return probs, attn\n"
] |
[
[
"torch.nn.ModuleList",
"torch.gather",
"torch.no_grad",
"torch.arange",
"torch.topk",
"torch.masked_select",
"torch.index_select",
"torch.stack"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
sct-pipeline/contrast-agnostic-softseg-spinalcord
|
[
"4d3429f311c2c4787c63c290acf3cafa0a4e15bf"
] |
[
"create_training_joblib.py"
] |
[
"import pandas as pd\nimport joblib\nimport numpy as np\nimport argparse\nimport os\n\n\n# Inputs:\n# --sct_train_file: Pickle file that was holds the a list of the dataset used for training.\n# Can be downloaded at: https://github.com/sct-data/deepseg_sc_models\n# train_valid_test column: 1 for training, 2 for validating, 3 for testing\n# --bids_datasets_list: List of dataset folders to gather list of subjects from.\n# 1 or more (e.g. sct-testing-large spine-generic-multi-subject etc.)\n# --ofolder: Folder to save the output .joblib file\n\n# Example usage:\n# python3 create_training_joblib --sct_train_file ~/dataset.pkl --bids_datasets_list ~/datasets/testing-large\n# --ofolder ~/train_new_model\n#\n# Konstantinos Nasiotis 2021\n\n\ndef create_new_joblib(dataset_sct_file, input_bids_folders, outputFolder):\n\n ## Load the merged participants.tsv\n #merged_folder = '/home/nas/Consulting/ivado-project/Datasets/merged_SCTLARGE_MULTISUBJECT/'\n #df_merged = bids.BIDS(merged_folder).participants.content\n\n # Merge multiple .tsv files into the same dataframe\n df_merged = pd.read_table(os.path.join(input_bids_folders[0], 'participants.tsv'), encoding=\"ISO-8859-1\")\n # Convert to string to get rid of potential TypeError during merging within the same column\n df_merged = df_merged.astype(str)\n # Add the Bids_path to the dataframe\n df_merged['bids_path'] = [input_bids_folders[0]] * len(df_merged)\n\n for iFolder in range(1, len(input_bids_folders)):\n df_next = pd.read_table(os.path.join(input_bids_folders[iFolder], 'participants.tsv'), encoding=\"ISO-8859-1\")\n df_next = df_next.astype(str)\n df_next['bids_path'] = [input_bids_folders[iFolder]] * len(df_next)\n # Merge the .tsv files (This keeps also non-overlapping fields)\n df_merged = pd.merge(left=df_merged, right=df_next, how='outer')\n\n dataUsedOnSct = pd.read_pickle(dataset_sct_file)\n # Force the subjects that were used for testing for SCT models to be used for testing in the new .joblib\n subjectsUsedForTesting = dataUsedOnSct[dataUsedOnSct['train_valid_test'] == 3]['subject'].to_list()\n\n # Use 60% for training/validation and 40% for testing\n percentage_train = 0.4\n percentage_validation = 0.2\n\n # Whatever was used in sct testing, will stay in the testing side of the joblib as well\n test = df_merged[np.in1d(df_merged['data_id'], subjectsUsedForTesting)]\n # Keep only the rest of the subjects for splitting to training/validation/testing sets\n df_merged_reduced = df_merged[np.invert(np.in1d(df_merged['data_id'], subjectsUsedForTesting))]\n\n train, validate, test2 = np.split(df_merged_reduced.sample(frac=1),\n [int(percentage_train*(len(df_merged_reduced))+len(test)/2),\n int((percentage_train+percentage_validation)*len(df_merged_reduced)+len(test)/2)])\n\n # Append the testing from sct to the new testing entries\n test3 = test.append(test2, ignore_index=1)\n\n # Populate the joblib file\n jobdict = {'train': train['participant_id'].to_list(),\n 'valid': validate['participant_id'].to_list(),\n 'test': test3['participant_id'].to_list()}\n\n joblib.dump(jobdict, os.path.join(outputFolder, \"new_splits.joblib\"))\n\n '''\n # Debugging\n newJoblib = joblib.load(os.path.join(outputFolder, \"new_splits.joblib\"))\n print(len(newJoblib[\"train\"]))\n print(len(newJoblib[\"valid\"]))\n print(len(newJoblib[\"test\"]))\n '''\n print('Success')\n\n\ndef get_parser():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--sct_train_file\", required=True, nargs=\"*\", dest=\"sctTrainFile\",\n help=\".pkl file that was used while training SCT models\")\n parser.add_argument(\"--bids_datasets_list\", required=True, nargs=\"*\", dest=\"bidsDatasets\",\n help=\"BIDS dataset inputs\")\n parser.add_argument(\"--ofolder\", required=True, nargs=\"*\", dest=\"outputFolder\",\n help=\"Output folder where the new_splits.joblib file will be saved\")\n return parser\n\n\ndef main():\n parser = get_parser()\n args = parser.parse_args()\n\n # Run comparison\n create_new_joblib(args.sctTrainFile[0], args.bidsDatasets, args.outputFolder[0])\n\n\nif __name__ == '__main__':\n main()\n"
] |
[
[
"pandas.merge",
"pandas.read_pickle",
"numpy.in1d"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
rafvasq/cage-challenge-1
|
[
"95affdfa38afc1124f1a1a09c92fbc0ed5b96318",
"95affdfa38afc1124f1a1a09c92fbc0ed5b96318"
] |
[
"CybORG/CybORG/Agents/ComplexAgents/utilities/data_structures/Action_Balanced_Replay_Buffer.py",
"CybORG/CybORG/Agents/ComplexAgents/actor_critic_agents/SAC_Discrete.py"
] |
[
"import random\nfrom collections import namedtuple, deque\nimport torch\nimport numpy as np\nfrom .Replay_Buffer import Replay_Buffer\n\nclass Action_Balanced_Replay_Buffer(Replay_Buffer):\n \"\"\"Replay buffer that provides sample of experiences that have an equal number of each action being conducted\"\"\"\n def __init__(self, buffer_size, batch_size, seed, num_actions):\n self.num_actions = num_actions\n self.buffer_size_per_memory = int(buffer_size / self.num_actions)\n\n print(\"NUM ACTIONS \", self.num_actions)\n self.memories = {action: deque(maxlen=self.buffer_size_per_memory) for action in range(self.num_actions)}\n self.batch_size = batch_size\n self.experience = namedtuple(\"Experience\", field_names=[\"state\", \"action\", \"reward\", \"next_state\", \"done\"])\n self.seed = random.seed(seed)\n self.device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\n def add_experience(self, states, actions, rewards, next_states, dones):\n \"\"\"Adds experience or list of experiences into the replay buffer\"\"\"\n if type(dones) == list:\n assert type(dones[0]) != list, \"A done shouldn't be a list\"\n experiences = [self.experience(state, action, reward, next_state, done)\n for state, action, reward, next_state, done in\n zip(states, actions, rewards, next_states, dones)]\n for experience in experiences:\n action = experience.action\n self.memories[action].append(experience)\n else:\n experience = self.experience(states, actions, rewards, next_states, dones)\n self.memories[actions].append(experience)\n\n def pick_experiences(self, num_experiences=None):\n \"\"\"Picks the experiences that the sample function will return as a random sample of experiences. It works by picking\n an equal number of experiences that used each action (as far as possible)\"\"\"\n if num_experiences: batch_size = num_experiences\n else: batch_size = self.batch_size\n batch_per_action = self.calculate_batch_sizes_per_action(batch_size)\n samples_split_by_action = self.sample_each_action_equally(batch_per_action)\n combined_sample = []\n for key in samples_split_by_action.keys():\n combined_sample.extend(samples_split_by_action[key])\n return combined_sample\n\n def calculate_batch_sizes_per_action(self, batch_size):\n \"\"\"Calculates the batch size we need to randomly draw from each action to make sure there is equal coverage\n per action and that the batch gets filled up\"\"\"\n min_batch_per_action = int(batch_size / self.num_actions)\n batch_per_action = {k: min_batch_per_action for k in range(self.num_actions)}\n current_batch_size = np.sum([batch_per_action[k] for k in range(self.num_actions)])\n remainder = batch_size - current_batch_size\n give_remainder_to = random.sample(range(self.num_actions), remainder)\n for action in give_remainder_to:\n batch_per_action[action] += 1\n return batch_per_action\n\n def sample_each_action_equally(self, batch_per_action):\n \"\"\"Samples a number of experiences (determined by batch_per_action) from the memory buffer for each action\"\"\"\n samples = {}\n for action in range(self.num_actions):\n memory = self.memories[action]\n batch_size_for_action = batch_per_action[action]\n action_memory_size = len(memory)\n assert action_memory_size > 0, \"Need at least 1 experience for each action\"\n if action_memory_size >= batch_size_for_action:\n samples[action] = random.sample(memory, batch_size_for_action)\n else:\n print(\"Memory size {} vs. required batch size {}\".format(action_memory_size, batch_size_for_action))\n samples_for_action = []\n while len(samples_for_action) < batch_per_action[action]:\n remainder = batch_per_action[action] - len(samples_for_action)\n sampled_experiences = random.sample(memory, min(remainder, action_memory_size))\n samples_for_action.extend(sampled_experiences)\n samples[action] = samples_for_action\n return samples\n\n def __len__(self):\n return np.sum([len(memory) for memory in self.memories.values()])\n\n def sample_experiences_with_certain_actions(self, allowed_actions, num_all_actions, required_batch_size):\n \"\"\"Samples a number of experiences where the action conducted was in the list of required actions\"\"\"\n assert isinstance(allowed_actions, list)\n assert len(allowed_actions) > 0\n\n num_new_actions = len(allowed_actions)\n experiences_to_sample = int(required_batch_size * float(num_all_actions) / float(num_new_actions))\n experiences = self.sample(num_experiences=experiences_to_sample)\n states, actions, rewards, next_states, dones = experiences\n matching_indexes = np.argwhere((np.in1d(actions.numpy(), allowed_actions)))\n assert matching_indexes.shape[1] == 1\n\n matching_indexes = matching_indexes[:, 0]\n\n states = states[matching_indexes]\n actions = actions[matching_indexes]\n rewards = rewards[matching_indexes]\n next_states = next_states[matching_indexes]\n dones = dones[matching_indexes]\n\n assert abs(states.shape[0] - required_batch_size) <= 0.05*required_batch_size, \"{} vs. {}\".format(states.shape[0], required_batch_size)\n\n\n return (states, actions, rewards, next_states, dones)\n",
"import torch\nfrom torch.optim import Adam\nimport torch.nn.functional as F\nimport numpy as np\nfrom CybORG.Agents.ComplexAgents.Base_Agent import Base_Agent\nfrom CybORG.Agents.ComplexAgents.utilities.data_structures.Replay_Buffer import Replay_Buffer\nfrom CybORG.Agents.ComplexAgents.actor_critic_agents.SAC import SAC\nfrom CybORG.Agents.ComplexAgents.utilities.Utility_Functions import create_actor_distribution\n\nclass SAC_Discrete(SAC):\n \"\"\"The Soft Actor Critic for discrete actions. It inherits from SAC for continuous actions and only changes a few\n methods.\"\"\"\n agent_name = \"SAC\"\n def __init__(self, config):\n Base_Agent.__init__(self, config)\n assert self.action_types == \"DISCRETE\", \"Action types must be discrete. Use SAC instead for continuous actions\"\n assert self.config.hyperparameters[\"Actor\"][\"final_layer_activation\"] == \"Softmax\", \"Final actor layer must be softmax\"\n self.hyperparameters = config.hyperparameters\n self.critic_local = self.create_NN(input_dim=self.state_size, output_dim=self.action_size, key_to_use=\"Critic\")\n self.critic_local_2 = self.create_NN(input_dim=self.state_size, output_dim=self.action_size,\n key_to_use=\"Critic\", override_seed=self.config.seed + 1)\n self.critic_optimizer = torch.optim.Adam(self.critic_local.parameters(),\n lr=self.hyperparameters[\"Critic\"][\"learning_rate\"], eps=1e-4)\n self.critic_optimizer_2 = torch.optim.Adam(self.critic_local_2.parameters(),\n lr=self.hyperparameters[\"Critic\"][\"learning_rate\"], eps=1e-4)\n self.critic_target = self.create_NN(input_dim=self.state_size, output_dim=self.action_size,\n key_to_use=\"Critic\")\n self.critic_target_2 = self.create_NN(input_dim=self.state_size, output_dim=self.action_size,\n key_to_use=\"Critic\")\n Base_Agent.copy_model_over(self.critic_local, self.critic_target)\n Base_Agent.copy_model_over(self.critic_local_2, self.critic_target_2)\n self.memory = Replay_Buffer(self.hyperparameters[\"Critic\"][\"buffer_size\"], self.hyperparameters[\"batch_size\"],\n self.config.seed, device=self.device)\n\n self.actor_local = self.create_NN(input_dim=self.state_size, output_dim=self.action_size, key_to_use=\"Actor\")\n self.actor_optimizer = torch.optim.Adam(self.actor_local.parameters(),\n lr=self.hyperparameters[\"Actor\"][\"learning_rate\"], eps=1e-4)\n self.automatic_entropy_tuning = self.hyperparameters[\"automatically_tune_entropy_hyperparameter\"]\n if self.automatic_entropy_tuning:\n # we set the max possible entropy as the target entropy\n self.target_entropy = -np.log((1.0 / self.action_size)) * 0.98\n self.log_alpha = torch.zeros(1, requires_grad=True, device=self.device)\n self.alpha = self.log_alpha.exp()\n self.alpha_optim = Adam([self.log_alpha], lr=self.hyperparameters[\"Actor\"][\"learning_rate\"], eps=1e-4)\n else:\n self.alpha = self.hyperparameters[\"entropy_term_weight\"]\n assert not self.hyperparameters[\"add_extra_noise\"], \"There is no add extra noise option for the discrete version of SAC at moment\"\n self.add_extra_noise = False\n self.do_evaluation_iterations = self.hyperparameters[\"do_evaluation_iterations\"]\n\n def produce_action_and_action_info(self, state):\n \"\"\"Given the state, produces an action, the probability of the action, the log probability of the action, and\n the argmax action\"\"\"\n action_probabilities = self.actor_local(state)\n max_probability_action = torch.argmax(action_probabilities, dim=-1)\n action_distribution = create_actor_distribution(self.action_types, action_probabilities, self.action_size)\n action = action_distribution.sample().cpu()\n # Have to deal with situation of 0.0 probabilities because we can't do log 0\n z = action_probabilities == 0.0\n z = z.float() * 1e-8\n log_action_probabilities = torch.log(action_probabilities + z)\n return action, (action_probabilities, log_action_probabilities), max_probability_action\n\n def calculate_critic_losses(self, state_batch, action_batch, reward_batch, next_state_batch, mask_batch):\n \"\"\"Calculates the losses for the two critics. This is the ordinary Q-learning loss except the additional entropy\n term is taken into account\"\"\"\n with torch.no_grad():\n next_state_action, (action_probabilities, log_action_probabilities), _ = self.produce_action_and_action_info(next_state_batch)\n qf1_next_target = self.critic_target(next_state_batch)\n qf2_next_target = self.critic_target_2(next_state_batch)\n min_qf_next_target = action_probabilities * (torch.min(qf1_next_target, qf2_next_target) - self.alpha * log_action_probabilities)\n min_qf_next_target = min_qf_next_target.sum(dim=1).unsqueeze(-1)\n next_q_value = reward_batch + (1.0 - mask_batch) * self.hyperparameters[\"discount_rate\"] * (min_qf_next_target)\n\n qf1 = self.critic_local(state_batch).gather(1, action_batch.long())\n qf2 = self.critic_local_2(state_batch).gather(1, action_batch.long())\n qf1_loss = F.mse_loss(qf1, next_q_value)\n qf2_loss = F.mse_loss(qf2, next_q_value)\n return qf1_loss, qf2_loss\n\n def calculate_actor_loss(self, state_batch):\n \"\"\"Calculates the loss for the actor. This loss includes the additional entropy term\"\"\"\n action, (action_probabilities, log_action_probabilities), _ = self.produce_action_and_action_info(state_batch)\n qf1_pi = self.critic_local(state_batch)\n qf2_pi = self.critic_local_2(state_batch)\n min_qf_pi = torch.min(qf1_pi, qf2_pi)\n inside_term = self.alpha * log_action_probabilities - min_qf_pi\n policy_loss = (action_probabilities * inside_term).sum(dim=1).mean()\n log_action_probabilities = torch.sum(log_action_probabilities * action_probabilities, dim=1)\n return policy_loss, log_action_probabilities\n"
] |
[
[
"torch.cuda.is_available"
],
[
"torch.optim.Adam",
"numpy.log",
"torch.zeros",
"torch.min",
"torch.sum",
"torch.nn.functional.mse_loss",
"torch.log",
"torch.no_grad",
"torch.argmax"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
xuyanbo03/lab
|
[
"cf2f5250e1a00ecce37b3480df28c3a5dcd08b57",
"cf2f5250e1a00ecce37b3480df28c3a5dcd08b57"
] |
[
"python/tests/episode_time_test.py",
"python/tests/teleporter_test.py"
] |
[
"# Copyright 2017-2018 Google Inc.\n#\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 2 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License along\n# with this program; if not, write to the Free Software Foundation, Inc.,\n# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.\n\"\"\"Test for the EpisodeTimeMs callback.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport unittest\nimport numpy as np\nimport six\n\nimport deepmind_lab\n\n\nclass EpisodeTimeTest(unittest.TestCase):\n\n def run_at_frame_rate(self, fps):\n env = deepmind_lab.Lab(\n 'tests/episode_time_test', ['EPISODE_TIME_SECONDS'],\n config={\n 'fps': str(fps),\n 'width': '32',\n 'height': '32'\n })\n\n env.reset()\n nop = np.zeros((7,), dtype=np.intc)\n\n for _ in six.moves.range(0, fps):\n env.step(nop, 1)\n\n obs = env.observations()\n self.assertEqual(obs['EPISODE_TIME_SECONDS'][0], 1.0)\n\n def test_at_60(self):\n self.run_at_frame_rate(60)\n\n def test_at_30(self):\n self.run_at_frame_rate(30)\n\nif __name__ == '__main__':\n if os.environ.get('TEST_SRCDIR'):\n deepmind_lab.set_runfiles_path(\n os.path.join(os.environ['TEST_SRCDIR'],\n 'org_deepmind_lab'))\n unittest.main()\n",
"# Copyright 2018 Google Inc.\n#\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 2 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License along\n# with this program; if not, write to the Free Software Foundation, Inc.,\n# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport unittest\nimport numpy as np\nimport six\n\nimport deepmind_lab\n\n\nclass TeleporterTest(unittest.TestCase):\n\n def test_movement(self):\n fps = 60\n env = deepmind_lab.Lab(\n 'tests/teleporter_test', [\n 'VEL.TRANS',\n 'DEBUG.POS.TRANS',\n 'DEBUG.POS.ROT',\n ],\n config={\n 'fps': str(fps),\n 'width': '80',\n 'height': '80',\n })\n\n action_spec = env.action_spec()\n action_index = {action['name']: i for i, action in enumerate(action_spec)}\n\n action = np.zeros([len(action_spec)], dtype=np.intc)\n\n env.reset()\n vel = env.observations()['VEL.TRANS']\n self.assertTrue(np.array_equal(vel, np.array([0, 0, 0])))\n\n # Agent begins facing south\n initial_facing = env.observations()['DEBUG.POS.ROT']\n self.assertTrue(np.allclose(initial_facing, np.array([0, -90, 0]),\n atol=0.1))\n\n # Player moves straight ahead through the teleporter\n action[action_index['MOVE_BACK_FORWARD']] = 1\n self.assertEqual(env.events(), [])\n for _ in six.moves.range(120):\n p_before = env.observations()['DEBUG.POS.TRANS']\n env.step(action, 1)\n p_after = env.observations()['DEBUG.POS.TRANS']\n if p_after[1] - p_before[1] > 100:\n break\n else:\n self.fail('Failed to teleport')\n self.assertEqual(env.events(), [('PLAYER_TELEPORTED', [])])\n env.step(action, 1)\n self.assertEqual(env.events(), [])\n\nif __name__ == '__main__':\n if 'TEST_SRCDIR' in os.environ:\n deepmind_lab.set_runfiles_path(\n os.path.join(os.environ['TEST_SRCDIR'],\n 'org_deepmind_lab'))\n unittest.main()\n"
] |
[
[
"numpy.zeros"
],
[
"numpy.array"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
trimitri/freqle
|
[
"67f48a0f4a4ab181902796339e7d0ad7a40399c0"
] |
[
"parsers.py"
] |
[
"\"\"\"Parse output from various sources into `FreqSeries` objects.\"\"\"\nfrom typing import List\nimport pandas as pd\n\nfrom .freq_series import FreqSeries\n\n\ndef fokus2_txt(file_name: str, session: str = None,\n drop_lines: List[int] = None) -> FreqSeries:\n \"\"\"Parse frequency measurement done by the FOKUS2 Dual frequency comb.\n\n :param session: Measurement context. See `FreqSeries`'s `session` param.\n \"\"\"\n data = pd.read_table(file_name, squeeze=True, index_col=0, usecols=[0, 1])\n if drop_lines is not None:\n data.drop(data.index[drop_lines], inplace=True)\n data.index = pd.to_datetime(data.index, unit='us')\n with open(file_name) as file:\n data.name = file.readline().strip()\n return FreqSeries(data, session=session)\n\n\ndef generic_freq_counter(\n file_name: str, session: str = None,\n time_unit: str = 's', original_freq: float = None) -> FreqSeries:\n \"\"\"Parse a generic two-column counter file like (time, frequency).\n :param file_name: File to read from.\n :param time_unit: Which unit does the counter count time in? (s, ms, us, ns)\n \"\"\"\n data = pd.read_table(file_name, squeeze=True, index_col=0, usecols=[0, 1])\n data.index = pd.to_datetime(data.index, unit=time_unit)\n return FreqSeries(data, session=session, original_freq=original_freq)\n\n\ndef pendulum_cnt91_txt(file_name: str, session: str = None) -> FreqSeries:\n \"\"\"Parse frequency measurement done with a Pendulum CNT 91 counter.\n\n :param session: Measurement context. See `FreqSeries`'s `session` param.\n \"\"\"\n def get_start_time(file_name: str) -> pd.datetime:\n \"\"\"Get the measurement starting time from a CNT-91 CSV file.\"\"\"\n time_string = _get_info(file_name)[21:40]\n return pd.to_datetime(time_string)\n\n def _get_info(file_name: str) -> str:\n with open(file_name) as file:\n info = file.readline().replace('\\t', ' ').strip()\n return info\n\n data = pd.read_table(file_name, squeeze=True, index_col=0, usecols=[0, 1])\n data.index = pd.to_datetime(data.index, unit='s',\n origin=get_start_time(file_name))\n data.name = _get_info(file_name)\n return FreqSeries(data, session=session)\n\n\ndef menlo_lambda_freq_counter(file_name: str, session_name: str,\n original_freq: float, series: int = 1) -> FreqSeries:\n \"\"\"\n :param series: Which of the recorded time series to use?\n \"\"\"\n data = pd.read_csv(file_name, delim_whitespace=True, usecols=[2 + series],\n header=None, squeeze=True)\n\n # Create an equidistand time stamp index, as the values in the Menlo\n # counter file are garbage.\n first_sample = pd.read_csv(file_name, delim_whitespace=True,\n usecols=[0, 1], header=None,\n nrows=1).applymap(str).values\n last_sample = pd.read_csv(file_name, delim_whitespace=True,\n usecols=[0, 1], header=None,\n skiprows=len(data) - 1).applymap(str).values\n start = pd.to_datetime(\"{} {}\".format(first_sample[0][0], first_sample[0][1]),\n format='%y%m%d %H%M%S.%f')\n end = pd.to_datetime(\"{} {}\".format(last_sample[0][0], last_sample[0][1]),\n format='%y%m%d %H%M%S.%f')\n data.index = pd.date_range(start, end, len(data))\n\n return FreqSeries(data, session=session_name, original_freq=original_freq)\n"
] |
[
[
"pandas.read_table",
"pandas.to_datetime",
"pandas.read_csv"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
hebrewsnabla/dh
|
[
"222e3d4d8d4d04cd63074327ebb5fb39ea4441b7"
] |
[
"pyscf/dh/grad/udfdh.py"
] |
[
"from __future__ import annotations\n# dh import\ntry:\n from dh.udfdh import UDFDH\n from dh.dhutil import calc_batch_size, gen_batch, gen_shl_batch, tot_size, timing\n from dh.grad.rdfdh import get_H_1_ao, get_S_1_ao, generator_L_1\n from dh.grad.rdfdh import Gradients as RGradients\nexcept ImportError:\n from pyscf.dh.udfdh import UDFDH\n from pyscf.dh.dhutil import calc_batch_size, gen_batch, gen_shl_batch, tot_size, timing\n from pyscf.dh.grad.rdfdh import get_H_1_ao, get_S_1_ao, generator_L_1\n from pyscf.dh.grad.rdfdh import Gradients as RGradients\n# pyscf import\nfrom pyscf import gto, lib, df\nfrom pyscf.df.grad.rhf import _int3c_wrapper as int3c_wrapper\ntry:\n from pyscf.dftd3 import itrf\nexcept ImportError:\n print('''Warning: dftd3 not found. You cannot using functionals with \"-D3\" suffix \n before installing pyscf-dftd3. See https://github.com/pyscf/dftd3 and\n https://github.com/ajz34/dh#dftd3-extension ''') \n# other import\nimport numpy as np\nimport itertools\nimport ctypes\n\neinsum = lib.einsum\nα, β = 0, 1\nαα, αβ, ββ = 0, 1, 2\n\n\n@timing\ndef get_gradient_jk(dfobj: df.DF, C, D, D_r, Y_mo, cx, cx_n, max_memory=2000):\n mol, aux = dfobj.mol, dfobj.auxmol\n natm, nao, nmo, nocc = mol.natm, mol.nao, C.shape[-1], mol.nelec\n mocc = max(nocc)\n naux = Y_mo[0].shape[0]\n # this algorithm asserts naux = aux.nao, i.e. no linear dependency in auxiliary basis\n assert naux == aux.nao\n so = slice(0, nocc[α]), slice(0, nocc[β])\n\n D_r_symm = (D_r + D_r.swapaxes(-1, -2)) / 2\n D_r_ao = einsum(\"sup, spq, svq -> suv\", C, D_r_symm, C)\n D_mo = np.zeros((2, nmo, nmo))\n for σ in (α, β):\n for i in range(nocc[σ]):\n D_mo[σ, i, i] = 1\n\n Y_dot_D, Y_dot_D_r = np.zeros((2, naux)), np.zeros((2, naux))\n nbatch = calc_batch_size(nmo**2, max_memory)\n for σ in (α, β):\n for i in range(nocc[σ]):\n Y_dot_D[σ] += Y_mo[σ][:, i, i]\n for saux in gen_batch(0, naux, nbatch):\n Y_dot_D_r[σ][saux] = einsum(\"Ppq, pq -> P\", Y_mo[σ][saux], D_r_symm[σ])\n\n Y_ip = [np.asarray(Y_mo[σ][:, so[σ]]) for σ in (α, β)]\n L_inv, L_1_gen = generator_L_1(aux)\n int3c2e_ip1_gen = int3c_wrapper(mol, aux, \"int3c2e_ip1\", \"s1\")\n int3c2e_ip2_gen = int3c_wrapper(mol, aux, \"int3c2e_ip2\", \"s1\")\n C0 = [C[σ][:, so[σ]] for σ in (α, β)]\n D1 = [cx * D_r_symm[σ] + 0.5 * cx_n * D_mo[σ] for σ in (α, β)]\n C1 = [C[σ] @ D1[σ] for σ in (α, β)]\n\n grad_contrib = np.zeros((natm, 3))\n for A in range(natm):\n shA0, shA1, _, _ = mol.aoslice_by_atom()[A]\n shA0a, shA1a, _, _ = aux.aoslice_by_atom()[A]\n\n Y_1_mo_D_r = [np.zeros((3, naux, nocc[σ], nmo)) for σ in (α, β)]\n Y_1_dot_D, Y_1_dot_D_r = np.zeros((2, 3, naux)), np.zeros((2, 3, naux))\n\n pre_flop = tot_size(Y_1_mo_D_r, Y_ip, Y_1_dot_D, Y_1_dot_D_r)\n nbatch = calc_batch_size(3*(nao+mocc)*naux, max_memory, pre_flop)\n for shU0, shU1, U0, U1 in gen_shl_batch(mol, nbatch, shA0, shA1):\n su = slice(U0, U1)\n int3c2e_ip1 = int3c2e_ip1_gen((shU0, shU1, 0, mol.nbas, 0, aux.nbas))\n for σ in (α, β):\n Y_1_mo_D_r[σ] -= einsum(\"tuvQ, PQ, ui, vp -> tPip\", int3c2e_ip1, L_inv, C0[σ][su], C1[σ])\n Y_1_mo_D_r[σ] -= einsum(\"tuvQ, PQ, up, vi -> tPip\", int3c2e_ip1, L_inv, C1[σ][su], C0[σ])\n Y_1_dot_D[σ] -= 2 * einsum(\"tuvQ, PQ, uv -> tP\", int3c2e_ip1, L_inv, D[σ][su])\n Y_1_dot_D_r[σ] -= 2 * einsum(\"tuvQ, PQ, uv -> tP\", int3c2e_ip1, L_inv, D_r_ao[σ][su])\n\n nbatch = calc_batch_size(3*nao*(nao+mocc), max_memory, pre_flop)\n for shP0, shP1, P0, P1 in gen_shl_batch(aux, nbatch, shA0a, shA1a):\n sp = slice(P0, P1)\n int3c2e_ip2 = int3c2e_ip2_gen((0, mol.nbas, 0, mol.nbas, shP0, shP1))\n for σ in (α, β):\n Y_1_mo_D_r[σ] -= einsum(\"tuvQ, PQ, ui, vp -> tPip\", int3c2e_ip2, L_inv[:, sp], C0[σ], C1[σ])\n Y_1_dot_D[σ] -= einsum(\"tuvQ, PQ, uv -> tP\", int3c2e_ip2, L_inv[:, sp], D[σ])\n Y_1_dot_D_r[σ] -= einsum(\"tuvQ, PQ, uv -> tP\", int3c2e_ip2, L_inv[:, sp], D_r_ao[σ])\n\n L_1 = L_1_gen(A)\n L_1_dot_inv = einsum(\"tRQ, PR -> tPQ\", L_1, L_inv)\n for σ in (α, β):\n Y_1_mo_D_r[σ] -= einsum(\"Qiq, qp, tPQ -> tPip\", Y_ip[σ], D1[σ], L_1_dot_inv)\n Y_1_dot_D[σ] -= einsum(\"Q, tPQ -> tP\", Y_dot_D[σ], L_1_dot_inv)\n Y_1_dot_D_r[σ] -= einsum(\"Q, tPQ -> tP\", Y_dot_D_r[σ], L_1_dot_inv)\n # RI-K contribution\n grad_contrib[A] += - 2 * einsum(\"Pip, tPip -> t\", Y_ip[σ], Y_1_mo_D_r[σ])\n\n # RI-J contribution\n for σ, ς in itertools.product((α, β), (α, β)):\n grad_contrib[A] += (\n + einsum(\"P, tP -> t\", Y_dot_D[σ], Y_1_dot_D_r[ς])\n + einsum(\"P, tP -> t\", Y_dot_D_r[σ], Y_1_dot_D[ς])\n + einsum(\"P, tP -> t\", Y_dot_D[σ], Y_1_dot_D[ς]))\n return grad_contrib\n\n\nclass Gradients(UDFDH, RGradients):\n\n def __init__(self, mol: gto.Mole, *args, skip_construct=False, **kwargs):\n if not skip_construct:\n super(Gradients, self).__init__(mol, *args, **kwargs)\n # results\n self.grad_jk = NotImplemented\n self.grad_gga = NotImplemented\n self.grad_pt2 = NotImplemented\n self.grad_enfunc = NotImplemented\n self.grad_tot = NotImplemented\n self.de = NotImplemented\n\n @timing\n def prepare_H_1(self):\n H_1_ao = get_H_1_ao(self.mol)\n H_1_mo = np.array([einsum(\"up, Auv, vq -> Apq\", self.C[σ], H_1_ao, self.C[σ]) for σ in (α, β)])\n self.tensors.create(\"H_1_ao\", H_1_ao)\n self.tensors.create(\"H_1_mo\", H_1_mo)\n\n @timing\n def prepare_S_1(self):\n S_1_ao = get_S_1_ao(self.mol)\n S_1_mo = np.array([einsum(\"up, Auv, vq -> Apq\", self.C[σ], S_1_ao, self.C[σ]) for σ in (α, β)])\n self.tensors.create(\"S_1_ao\", S_1_ao)\n self.tensors.create(\"S_1_mo\", S_1_mo)\n\n def prepare_gradient_jk(self):\n D_r = self.tensors.load(\"D_r\")\n Y_mo = [self.tensors[\"Y_mo_jk\" + str(σ)] for σ in (α, β)]\n # a special treatment\n cx_n = self.cx_n if self.xc_n else self.cx\n self.grad_jk = get_gradient_jk(self.df_jk, self.C, self.D, D_r, Y_mo, self.cx, cx_n, self.get_memory())\n\n @timing\n def prepare_gradient_gga(self):\n tensors = self.tensors\n if \"rho\" not in tensors:\n self.grad_gga = 0\n return self\n # --- LAZY CODE ---\n from pyscf import grad, hessian\n ni, mol, grids = self.ni, self.mol, self.grids\n natm = mol.natm\n C, D = self.C, self.D\n grad_contrib = np.zeros((natm, 3))\n\n xc = self.xc_n if self.xc_n else self.xc\n if self.ni._xc_type(xc) == \"GGA\": # energy functional contribution\n veff_1_gga = grad.uks.get_vxc(ni, mol, grids, xc, D)[1]\n for A, (_, _, A0, A1) in enumerate(mol.aoslice_by_atom()):\n grad_contrib[A] += 2 * einsum(\"stuv, suv -> t\", veff_1_gga[:, :, A0:A1], D[:, A0:A1])\n\n if self.ni._xc_type(self.xc) == \"GGA\": # reference functional skeleton fock derivative contribution\n D_r = tensors.load(\"D_r\")\n D_r_symm = (D_r + D_r.swapaxes(-1, -2)) / 2\n D_r_ao = einsum(\"sup, spq, svq -> suv\", C, D_r_symm, C)\n\n F_1_ao_dfa = np.array(hessian.uks._get_vxc_deriv1(self.mf_s.Hessian(), C, self.mo_occ, 2000))\n grad_contrib += einsum(\"suv, sAtuv -> At\", D_r_ao, F_1_ao_dfa)\n\n self.grad_gga = grad_contrib\n return self\n\n @timing\n def prepare_gradient_pt2(self):\n tensors = self.tensors\n C, D, e = self.C, self.D, self.e\n mol, aux_ri = self.mol, self.aux_ri\n natm, nao, nmo, nocc, nvir, naux = mol.natm, self.nao, self.nmo, self.nocc, self.nvir, self.df_ri.get_naoaux()\n mocc, mvir = max(nocc), max(nvir)\n # this algorithm asserts naux = aux.nao, i.e. no linear dependency in auxiliary basis\n assert naux == aux_ri.nao\n so, sv, sa = self.so, self.sv, self.sa\n\n D_r = tensors.load(\"D_r\")\n H_1_mo = tensors.load(\"H_1_mo\")\n grad_corr = einsum(\"spq, sApq -> A\", D_r, H_1_mo)\n if not self.eval_pt2:\n grad_corr.shape = (natm, 3)\n self.grad_pt2 = grad_corr\n return\n\n W_I = tensors.load(\"W_I\")\n W_II = - einsum(\"spq, sq -> spq\", D_r, e)\n W_III_tmp = self.Ax0_Core(so, so, sa, sa)(D_r)\n W = W_I + W_II\n for σ in (α, β):\n W[σ][so[σ], so[σ]] += - 0.5 * W_III_tmp[σ]\n W_ao = einsum(\"sup, spq, svq -> suv\", C, W, C)\n S_1_ao = tensors.load(\"S_1_ao\")\n grad_corr += np.einsum(\"suv, Auv -> A\", W_ao, S_1_ao)\n grad_corr.shape = (natm, 3)\n\n L_inv, L_1_gen = generator_L_1(aux_ri)\n int3c2e_ip1_gen = int3c_wrapper(mol, aux_ri, \"int3c2e_ip1\", \"s1\")\n int3c2e_ip2_gen = int3c_wrapper(mol, aux_ri, \"int3c2e_ip2\", \"s1\")\n Y_ia_ri = [np.asarray(tensors[\"Y_mo_ri\" + str(σ)][:, so[σ], sv[σ]]) for σ in (α, β)]\n G_ia_ri = [tensors.load(\"G_ia_ri\" + str(σ)) for σ in (α, β)]\n\n for A in range(natm):\n L_1_ri = L_1_gen(A)\n Y_1_ia_ri = [np.zeros((3, naux, nocc[σ], nvir[σ])) for σ in (α, β)]\n shA0, shA1, _, _ = mol.aoslice_by_atom()[A]\n shA0a, shA1a, _, _ = aux_ri.aoslice_by_atom()[A]\n\n nbatch = calc_batch_size(3*(nao+mocc)*naux, self.get_memory(), tot_size(Y_1_ia_ri))\n for shU0, shU1, U0, U1 in gen_shl_batch(mol, nbatch, shA0, shA1):\n su = slice(U0, U1)\n int3c2e_ip1 = int3c2e_ip1_gen((shU0, shU1, 0, mol.nbas, 0, aux_ri.nbas))\n for σ in (α, β):\n Y_1_ia_ri[σ] -= einsum(\"tuvQ, PQ, ui, va -> tPia\", int3c2e_ip1, L_inv, C[σ][su, so[σ]], C[σ][:, sv[σ]])\n Y_1_ia_ri[σ] -= einsum(\"tuvQ, PQ, ua, vi -> tPia\", int3c2e_ip1, L_inv, C[σ][su, sv[σ]], C[σ][:, so[σ]])\n\n nbatch = calc_batch_size(3*nao*(nao+mocc), self.get_memory(), tot_size(Y_1_ia_ri))\n for shP0, shP1, P0, P1 in gen_shl_batch(aux_ri, nbatch, shA0a, shA1a):\n sp = slice(P0, P1)\n int3c2e_ip2 = int3c2e_ip2_gen((0, mol.nbas, 0, mol.nbas, shP0, shP1))\n for σ in (α, β):\n Y_1_ia_ri[σ] -= einsum(\"tuvQ, PQ, ui, va -> tPia\", int3c2e_ip2, L_inv[:, sp], C[σ][:, so[σ]], C[σ][:, sv[σ]])\n\n for σ in (α, β):\n Y_1_ia_ri[σ] -= einsum(\"Qia, tRQ, PR -> tPia\", Y_ia_ri[σ], L_1_ri, L_inv)\n grad_corr[A] += einsum(\"Pia, tPia -> t\", G_ia_ri[σ], Y_1_ia_ri[σ])\n self.grad_pt2 = grad_corr\n\n @timing\n def prepare_gradient_enfunc(self):\n tensors = self.tensors\n natm = self.mol.natm\n Co, eo, D = self.Co, self.eo, self.D\n so = self.so\n\n grad_contrib = self.mf_s.Gradients().grad_nuc()\n grad_contrib.shape = (natm * 3,)\n\n H_1_ao = tensors.load(\"H_1_ao\")\n S_1_mo = tensors.load(\"S_1_mo\")\n\n grad_contrib += np.einsum(\"Auv, suv -> A\", H_1_ao, D, optimize=True) # TODO check PySCF lib.einsum why fails\n if self.xc_n is None:\n for σ in (α, β):\n grad_contrib -= np.einsum(\"Ai, i -> A\", S_1_mo[σ][:, so[σ], so[σ]].diagonal(0, -1, -2), eo[σ])\n else:\n # TODO see whether get_fock could use mo_coeff to accelearate RI-K\n F_0_ao_n = self.mf_n.get_fock(dm=D)\n nc_F_0_ij = [(Co[σ].T @ F_0_ao_n[σ] @ Co[σ]) for σ in (α, β)]\n for σ in (α, β):\n grad_contrib -= einsum(\"Aij, ij -> A\", S_1_mo[σ][:, so[σ], so[σ]], nc_F_0_ij[σ])\n grad_contrib.shape = (natm, 3)\n\n # handle dftd3 situation\n mol = self.mol\n if \"D3\" in self.xc_add:\n drv = itrf.libdftd3.wrapper_params\n params = np.asarray(self.xc_add[\"D3\"][0], order=\"F\")\n version = self.xc_add[\"D3\"][1]\n coords = np.asarray(mol.atom_coords(), order=\"F\")\n itype = np.asarray(mol.atom_charges(), order=\"F\")\n edisp = np.zeros(1)\n grad = np.zeros((mol.natm, 3))\n drv(\n ctypes.c_int(mol.natm), # natoms\n coords.ctypes.data_as(ctypes.c_void_p), # coords\n itype.ctypes.data_as(ctypes.c_void_p), # itype\n params.ctypes.data_as(ctypes.c_void_p), # params\n ctypes.c_int(version), # version\n edisp.ctypes.data_as(ctypes.c_void_p), # edisp\n grad.ctypes.data_as(ctypes.c_void_p)) # grads)\n grad_contrib += grad\n\n self.grad_enfunc = grad_contrib\n\n def base_method(self) -> UDFDH:\n self.__class__ = UDFDH\n return self\n\n"
] |
[
[
"numpy.asarray",
"numpy.zeros",
"numpy.einsum"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
frankxu2004/tacred-relation-cotrain
|
[
"005dd0cf27d6a68fcf3cbef736de3fe9759ce6b4",
"005dd0cf27d6a68fcf3cbef736de3fe9759ce6b4"
] |
[
"prepare_vocab.py",
"model/rnn.py"
] |
[
"\"\"\"\nPrepare vocabulary and initial word vectors.\n\"\"\"\nimport json\nimport msgpack\nimport pickle\nimport argparse\nimport numpy as np\nfrom collections import Counter\n\nfrom utils import vocab, constant, helper\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(description='Prepare vocab for relation extraction.')\n parser.add_argument('data_dir', help='TACRED directory.')\n parser.add_argument('squad_dir', help='SQuAD directory.')\n parser.add_argument('vocab_dir', help='Output vocab directory.')\n parser.add_argument('--glove_dir', default='dataset/glove', help='GloVe directory.')\n parser.add_argument('--wv_file', default='glove.840B.300d.txt', help='GloVe vector file.')\n parser.add_argument('--wv_dim', type=int, default=300, help='GloVe vector dimension.')\n parser.add_argument('--min_freq', type=int, default=0, help='If > 0, use min_freq as the cutoff.')\n parser.add_argument('--lower', action='store_true', help='If specified, lowercase all words.')\n\n args = parser.parse_args()\n return args\n\n\ndef process_squad(squad_msgpack):\n train, dev = squad_msgpack\n train_tokens = []\n dev_tokens = []\n for row in train:\n train_tokens += row[1] # context\n train_tokens += row[5] # question\n for row in dev:\n dev_tokens += row[1]\n dev_tokens += row[5]\n return train_tokens, dev_tokens\n\n\ndef main():\n args = parse_args()\n\n # input files\n train_file = args.data_dir + '/train.json'\n dev_file = args.data_dir + '/dev.json'\n test_file = args.data_dir + '/test.json'\n wv_file = args.glove_dir + '/' + args.wv_file\n wv_dim = args.wv_dim\n\n # output files\n helper.ensure_dir(args.vocab_dir)\n vocab_file = args.vocab_dir + '/vocab.pkl'\n emb_file = args.vocab_dir + '/embedding.npy'\n\n # load files\n print(\"loading files...\")\n train_tokens = load_tokens(train_file)\n dev_tokens = load_tokens(dev_file)\n test_tokens = load_tokens(test_file)\n # processing squad intermediate files\n with open(args.squad_dir + '/intermediate.msgpack', 'rb') as squad_file:\n squad_msgpack = msgpack.load(squad_file, encoding='utf-8')\n squad_train, squad_dev = squad_msgpack\n squad_train_tokens, squad_dev_tokens = process_squad(squad_msgpack)\n if args.lower:\n train_tokens, dev_tokens, test_tokens = [[t.lower() for t in tokens] for tokens in \\\n (train_tokens, dev_tokens, test_tokens)]\n\n squad_train_tokens, squad_dev_tokens = [[t.lower() for t in tokens] for tokens in \\\n (squad_train_tokens, squad_dev_tokens)]\n # load glove\n print(\"loading glove...\")\n glove_vocab = vocab.load_glove_vocab(wv_file, wv_dim)\n print(\"{} words loaded from glove.\".format(len(glove_vocab)))\n\n print(\"building vocab...\")\n v = build_vocab(train_tokens + squad_train_tokens, glove_vocab, args.min_freq)\n\n print(\"calculating oov...\")\n datasets = {'train': train_tokens, 'dev': dev_tokens, 'test': test_tokens}\n for dname, d in datasets.items():\n total, oov = count_oov(d, v)\n print(\"{} oov: {}/{} ({:.2f}%)\".format(dname, oov, total, oov * 100.0 / total))\n\n print(\"building embeddings...\")\n embedding = vocab.build_embedding(wv_file, v, wv_dim)\n print(\"embedding size: {} x {}\".format(*embedding.shape))\n\n print(\"dumping to files...\")\n with open(vocab_file, 'wb') as outfile:\n pickle.dump(v, outfile)\n np.save(emb_file, embedding)\n print(\"all done.\")\n\n print('converting SQuAD dataset to ids')\n\n id2word = v\n word2id = dict([(id2word[idx], idx) for idx in range(len(id2word))])\n\n def to_id(row, unk_id=constant.UNK_ID):\n context_tokens = row[1]\n context_features = row[2]\n context_tags = row[3]\n context_ents = row[4]\n question_tokens = row[5]\n question_ids = [word2id[w] if w in word2id else unk_id for w in question_tokens]\n context_ids = [word2id[w] if w in word2id else unk_id for w in context_tokens]\n tag_ids = [constant.POS_TO_ID[w] if w in constant.POS_TO_ID else unk_id for w in context_tags]\n ent_ids = [constant.NER_TO_ID[w] if w in constant.NER_TO_ID else unk_id for w in context_ents]\n return [row[0], context_ids, context_features, tag_ids, ent_ids, question_ids] + row[6:]\n\n squad_train = list(map(to_id, squad_train))\n squad_dev = list(map(to_id, squad_dev))\n result = {\n 'train': squad_train,\n 'dev': squad_dev\n }\n # train: id, context_id, context_features, tag_id, ent_id,\n # question_id, context, context_token_span, answer_start, answer_end\n # dev: id, context_id, context_features, tag_id, ent_id,\n # question_id, context, context_token_span, answer\n with open('dataset/SQuAD/data.msgpack', 'wb') as f:\n msgpack.dump(result, f)\n\n\ndef load_tokens(filename):\n with open(filename) as infile:\n data = json.load(infile)\n tokens = []\n for d in data:\n tokens += d['tokens']\n print(\"{} tokens from {} examples loaded from {}.\".format(len(tokens), len(data), filename))\n return tokens\n\n\ndef build_vocab(tokens, glove_vocab, min_freq):\n \"\"\" build vocab from tokens and glove words. \"\"\"\n counter = Counter(t for t in tokens)\n # if min_freq > 0, use min_freq, otherwise keep all glove words\n if min_freq > 0:\n v = sorted([t for t in counter if counter.get(t) >= min_freq], key=counter.get, reverse=True)\n else:\n v = sorted([t for t in counter if t in glove_vocab], key=counter.get, reverse=True)\n # add special tokens and entity mask tokens\n v = constant.VOCAB_PREFIX + entity_masks() + v\n print(\"vocab built with {}/{} words.\".format(len(v), len(counter)))\n return v\n\n\ndef count_oov(tokens, vocab):\n c = Counter(t for t in tokens)\n total = sum(c.values())\n matched = sum(c[t] for t in vocab)\n return total, total - matched\n\n\ndef entity_masks():\n \"\"\" Get all entity mask tokens as a list. \"\"\"\n masks = []\n subj_entities = list(constant.SUBJ_NER_TO_ID.keys())[2:]\n obj_entities = list(constant.OBJ_NER_TO_ID.keys())[2:]\n masks += [\"SUBJ-\" + e for e in subj_entities]\n masks += [\"OBJ-\" + e for e in obj_entities]\n return masks\n\n\nif __name__ == '__main__':\n main()\n",
"\"\"\"\nA rnn model for relation extraction, written in pytorch.\n\"\"\"\nimport math\nimport numpy as np\nimport torch\nfrom torch import nn\nfrom torch.nn import init\nfrom torch.autograd import Variable\nimport torch.nn.functional as F\n\nfrom utils import constant, torch_utils\nfrom model import layers\n\n\nclass RelationModel(object):\n \"\"\" A wrapper class for the training and evaluation of models. \"\"\"\n\n def __init__(self, opt, emb_matrix=None):\n self.opt = opt\n self.model = PositionAwareRNN(opt, emb_matrix)\n self.criterion = nn.CrossEntropyLoss()\n self.parameters = [p for p in self.model.parameters() if p.requires_grad]\n if opt['cuda']:\n self.model.cuda()\n self.criterion.cuda()\n self.optimizer = torch_utils.get_optimizer(opt['optim'], self.parameters, opt['lr'])\n\n def joint_update(self, re_batch, qa_batch):\n if self.opt['cuda']:\n re_inputs = [Variable(b.cuda()) for b in re_batch[:7]]\n re_labels = Variable(re_batch[7].cuda())\n qa_inputs = [Variable(b.cuda()) for b in qa_batch[:8]]\n target_s = Variable(qa_batch[8].cuda())\n target_e = Variable(qa_batch[9].cuda())\n else:\n re_inputs = [Variable(b) for b in re_batch[:7]]\n re_labels = Variable(re_batch[7])\n qa_inputs = [Variable(b) for b in qa_batch[:8]]\n target_s = Variable(qa_batch[8])\n target_e = Variable(qa_batch[9])\n\n # step forward\n self.model.train()\n self.optimizer.zero_grad()\n logits, _ = self.model(re_inputs, data_type='RE')\n\n score_s, score_e = self.model(qa_inputs, data_type='RC')\n loss = self.opt['qa_weight'] * (self.criterion(score_s, target_s) + self.criterion(score_e, target_e)) + \\\n self.criterion(logits, re_labels)\n # backward\n loss.backward()\n torch.nn.utils.clip_grad_norm(self.model.parameters(), self.opt['max_grad_norm'])\n self.optimizer.step()\n loss_val = loss.data[0]\n return loss_val\n\n\n\n def update(self, batch):\n \"\"\" Run a step of forward and backward model update. \"\"\"\n if self.opt['cuda']:\n inputs = [Variable(b.cuda()) for b in batch[:7]]\n labels = Variable(batch[7].cuda())\n else:\n inputs = [Variable(b) for b in batch[:7]]\n labels = Variable(batch[7])\n\n # step forward\n self.model.train()\n self.optimizer.zero_grad()\n logits, _ = self.model(inputs, data_type='RE')\n loss = self.criterion(logits, labels)\n\n # backward\n loss.backward()\n torch.nn.utils.clip_grad_norm(self.model.parameters(), self.opt['max_grad_norm'])\n self.optimizer.step()\n loss_val = loss.data[0]\n return loss_val\n\n def update_qa(self, qa_batch):\n if self.opt['cuda']:\n inputs = [Variable(b.cuda()) for b in qa_batch[:8]]\n target_s = Variable(qa_batch[8].cuda())\n target_e = Variable(qa_batch[9].cuda())\n else:\n inputs = [Variable(b) for b in qa_batch[:8]]\n target_s = Variable(qa_batch[8])\n target_e = Variable(qa_batch[9])\n\n # step forward\n self.model.train()\n self.optimizer.zero_grad()\n score_s, score_e = self.model(inputs, data_type='RC')\n loss = self.criterion(score_s, target_s) + self.criterion(score_e, target_e)\n # backward\n loss.backward()\n torch.nn.utils.clip_grad_norm(self.model.parameters(), self.opt['max_grad_norm'])\n self.optimizer.step()\n loss_val = loss.data[0]\n return loss_val\n\n def predict(self, batch, unsort=True):\n \"\"\" Run forward prediction. If unsort is True, recover the original order of the batch. \"\"\"\n if self.opt['cuda']:\n inputs = [Variable(b.cuda()) for b in batch[:7]]\n labels = Variable(batch[7].cuda())\n else:\n inputs = [Variable(b) for b in batch[:7]]\n labels = Variable(batch[7])\n\n orig_idx = batch[8]\n\n # forward\n self.model.eval()\n logits, _ = self.model(inputs)\n loss = self.criterion(logits, labels)\n probs = F.softmax(logits).data.cpu().numpy().tolist()\n predictions = np.argmax(logits.data.cpu().numpy(), axis=1).tolist()\n if unsort:\n _, predictions, probs = [list(t) for t in zip(*sorted(zip(orig_idx, \\\n predictions, probs)))]\n return predictions, probs, loss.data[0]\n\n def update_lr(self, new_lr):\n torch_utils.change_lr(self.optimizer, new_lr)\n\n def save(self, filename, epoch):\n params = {\n 'model': self.model.state_dict(),\n 'config': self.opt,\n 'epoch': epoch\n }\n try:\n torch.save(params, filename)\n print(\"model saved to {}\".format(filename))\n except BaseException:\n print(\"[Warning: Saving failed... continuing anyway.]\")\n\n def load(self, filename):\n try:\n checkpoint = torch.load(filename)\n except BaseException:\n print(\"Cannot load model from {}\".format(filename))\n exit()\n self.model.load_state_dict(checkpoint['model'])\n self.opt = checkpoint['config']\n\n\nclass PositionAwareRNN(nn.Module):\n \"\"\" A sequence model for relation extraction. \"\"\"\n\n def __init__(self, opt, emb_matrix=None):\n super(PositionAwareRNN, self).__init__()\n self.drop = nn.Dropout(opt['dropout'])\n self.emb = nn.Embedding(opt['vocab_size'], opt['emb_dim'], padding_idx=constant.PAD_ID)\n if opt['pos_dim'] > 0:\n self.pos_emb = nn.Embedding(len(constant.POS_TO_ID), opt['pos_dim'],\n padding_idx=constant.PAD_ID)\n if opt['ner_dim'] > 0:\n self.ner_emb = nn.Embedding(len(constant.NER_TO_ID), opt['ner_dim'],\n padding_idx=constant.PAD_ID)\n\n input_size = opt['emb_dim'] + opt['pos_dim'] + opt['ner_dim']\n q_input_size = opt['emb_dim']\n self.rnn = nn.LSTM(input_size, opt['hidden_dim'], opt['num_layers'], batch_first=True,\n dropout=opt['dropout'])\n self.linear = nn.Linear(opt['hidden_dim'], opt['num_class'])\n\n self.qrnn = nn.LSTM(q_input_size, opt['hidden_dim'], opt['num_layers'], batch_first=True,\n dropout=opt['dropout'])\n\n self.self_attn = layers.LinearSeqAttn(opt['hidden_dim'])\n\n # Bilinear attention for span start/end\n self.start_attn = layers.BilinearSeqAttn(\n opt['hidden_dim'],\n opt['hidden_dim'],\n )\n self.end_attn = layers.BilinearSeqAttn(\n opt['hidden_dim'],\n opt['hidden_dim'],\n )\n\n if opt['attn']:\n self.attn_layer = layers.PositionAwareAttention(opt['hidden_dim'],\n opt['hidden_dim'], 2 * opt['pe_dim'], opt['attn_dim'])\n self.pe_emb = nn.Embedding(constant.MAX_LEN * 2 + 1, opt['pe_dim'])\n\n self.opt = opt\n self.topn = self.opt.get('topn', 1e10)\n self.use_cuda = opt['cuda']\n self.emb_matrix = emb_matrix\n self.init_weights()\n\n def init_weights(self):\n if self.emb_matrix is None:\n self.emb.weight.data[1:, :].uniform_(-1.0, 1.0) # keep padding dimension to be 0\n else:\n self.emb_matrix = torch.from_numpy(self.emb_matrix)\n self.emb.weight.data.copy_(self.emb_matrix)\n if self.opt['pos_dim'] > 0:\n self.pos_emb.weight.data[1:, :].uniform_(-1.0, 1.0)\n if self.opt['ner_dim'] > 0:\n self.ner_emb.weight.data[1:, :].uniform_(-1.0, 1.0)\n\n self.linear.bias.data.fill_(0)\n init.xavier_uniform(self.linear.weight, gain=1) # initialize linear layer\n if self.opt['attn']:\n self.pe_emb.weight.data.uniform_(-1.0, 1.0)\n\n # decide finetuning\n if self.topn <= 0:\n print(\"Do not finetune word embedding layer.\")\n self.emb.weight.requires_grad = False\n elif self.topn < self.opt['vocab_size']:\n print(\"Finetune top {} word embeddings.\".format(self.topn))\n self.emb.weight.register_hook(lambda x: \\\n torch_utils.keep_partial_grad(x, self.topn))\n else:\n print(\"Finetune all embeddings.\")\n\n def zero_state(self, batch_size):\n state_shape = (self.opt['num_layers'], batch_size, self.opt['hidden_dim'])\n h0 = c0 = Variable(torch.zeros(*state_shape), requires_grad=False)\n if self.use_cuda:\n return h0.cuda(), c0.cuda()\n else:\n return h0, c0\n\n def forward(self, inputs, data_type='RE'):\n if data_type == 'RE':\n words, masks, pos, ner, deprel, subj_pos, obj_pos = inputs # unpack\n seq_lens = list(masks.data.eq(constant.PAD_ID).long().sum(1).squeeze())\n batch_size = words.size()[0]\n\n # embedding lookup\n word_inputs = self.emb(words)\n inputs = [word_inputs]\n if self.opt['pos_dim'] > 0:\n inputs += [self.pos_emb(pos)]\n if self.opt['ner_dim'] > 0:\n inputs += [self.ner_emb(ner)]\n inputs = self.drop(torch.cat(inputs, dim=2)) # add dropout to input\n\n # rnn\n h0, c0 = self.zero_state(batch_size)\n inputs = nn.utils.rnn.pack_padded_sequence(inputs, seq_lens, batch_first=True)\n outputs, (ht, ct) = self.rnn(inputs, (h0, c0))\n outputs, output_lens = nn.utils.rnn.pad_packed_sequence(outputs, batch_first=True)\n hidden = self.drop(ht[-1, :, :]) # get the outmost layer h_n\n outputs = self.drop(outputs)\n\n # attention\n if self.opt['attn']:\n # convert all negative PE numbers to positive indices\n # e.g., -2 -1 0 1 will be mapped to 98 99 100 101\n subj_pe_inputs = self.pe_emb(subj_pos + constant.MAX_LEN)\n obj_pe_inputs = self.pe_emb(obj_pos + constant.MAX_LEN)\n pe_features = torch.cat((subj_pe_inputs, obj_pe_inputs), dim=2)\n final_hidden = self.attn_layer(outputs, masks, hidden, pe_features)\n else:\n final_hidden = hidden\n\n logits = self.linear(final_hidden)\n return logits, final_hidden\n\n elif data_type == 'RC':\n context_words, context_pos, context_ner, context_mask, orig_idx, question_words, question_mask, q_orig_idx = inputs # unpack\n seq_lens = list(context_mask.data.eq(constant.PAD_ID).long().sum(1).squeeze())\n q_seq_lens = list(question_mask.data.eq(constant.PAD_ID).long().sum(1).squeeze())\n batch_size = context_words.size()[0]\n # embedding lookup\n word_inputs = self.emb(context_words)\n q_word_inputs = self.drop(self.emb(question_words))\n inputs = [word_inputs]\n if self.opt['pos_dim'] > 0:\n inputs += [self.pos_emb(context_pos)]\n if self.opt['ner_dim'] > 0:\n inputs += [self.ner_emb(context_ner)]\n inputs = self.drop(torch.cat(inputs, dim=2)) # add dropout to input\n\n # rnn\n h0, c0 = self.zero_state(batch_size)\n inputs = nn.utils.rnn.pack_padded_sequence(inputs, seq_lens, batch_first=True)\n outputs, _ = self.rnn(inputs, (h0, c0))\n outputs, output_lens = nn.utils.rnn.pad_packed_sequence(outputs, batch_first=True)\n doc_hiddens = self.drop(outputs)\n\n qh0, qc0 = self.zero_state(batch_size)\n q_word_inputs = nn.utils.rnn.pack_padded_sequence(q_word_inputs, q_seq_lens, batch_first=True)\n q_outputs, _ = self.qrnn(q_word_inputs, (qh0, qc0))\n q_outputs, q_output_lens = nn.utils.rnn.pad_packed_sequence(q_outputs, batch_first=True)\n q_hiddens = self.drop(q_outputs)\n\n q_merge_weights = self.self_attn(q_hiddens, question_mask)\n question_hidden = layers.weighted_avg(q_hiddens, q_merge_weights)\n\n # unsort both doc and question to original ordering\n doc_hiddens = doc_hiddens.index_select(0, orig_idx)\n question_hidden = question_hidden.index_select(0, q_orig_idx)\n context_mask = context_mask.index_select(0, orig_idx)\n\n # Predict start and end positions\n start_scores = self.start_attn(doc_hiddens, question_hidden, context_mask)\n end_scores = self.end_attn(doc_hiddens, question_hidden, context_mask)\n return start_scores, end_scores\n\n"
] |
[
[
"numpy.save"
],
[
"torch.nn.Dropout",
"torch.nn.CrossEntropyLoss",
"torch.nn.functional.softmax",
"torch.nn.LSTM",
"torch.load",
"torch.zeros",
"torch.cat",
"torch.from_numpy",
"torch.nn.Embedding",
"torch.nn.utils.rnn.pack_padded_sequence",
"torch.nn.Linear",
"torch.nn.utils.rnn.pad_packed_sequence",
"torch.save",
"torch.nn.init.xavier_uniform",
"torch.autograd.Variable"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
hengwei-chan/TDC
|
[
"d19bd63c46c8f283c19764c3c9970de66f9a5d29"
] |
[
"tdc/utils.py"
] |
[
"import requests\nfrom zipfile import ZipFile \nimport os, sys\nimport numpy as np\nimport pandas as pd\nfrom pandas.errors import EmptyDataError\nimport json\nimport warnings\nwarnings.filterwarnings(\"ignore\")\nimport subprocess\nimport pickle\nfrom fuzzywuzzy import fuzz\nfrom tqdm import tqdm\nfrom .metadata import name2type, name2id, dataset_list, dataset_names, benchmark_names, benchmark2id, benchmark2type\nfrom .metadata import property_names, paired_dataset_names, single_molecule_dataset_names\nfrom .metadata import retrosyn_dataset_names, forwardsyn_dataset_names, molgenpaired_dataset_names, generation_datasets\nfrom .metadata import oracle2id, download_oracle_names, trivial_oracle_names, oracle_names, oracle2type \n\nfrom .label_name_list import dataset2target_lists\n\ntry:\n from urllib.error import HTTPError\n from urllib.parse import quote, urlencode\n from urllib.request import urlopen\nexcept ImportError:\n from urllib import urlencode\n from urllib2 import quote, urlopen, HTTPError\n\ndef fuzzy_search(name, dataset_names):\n\tname = name.lower()\n\tif name in dataset_names:\n\t\ts = name\n\telse: \n\t\t# print(\"========fuzzysearch=======\", dataset_names, name)\n\t\ts = get_closet_match(dataset_names, name)[0]\n\tif s in dataset_names:\n\t\treturn s\n\telse:\n\t\traise ValueError(s + \" does not belong to this task, please refer to the correct task name!\")\n\ndef download_wrapper(name, path, dataset_names):\n\tname = fuzzy_search(name, dataset_names)\n\tserver_path = 'https://dataverse.harvard.edu/api/access/datafile/'\n\n\tdataset_path = server_path + str(name2id[name])\n\n\tif not os.path.exists(path):\n\t\tos.mkdir(path)\n\n\tif os.path.exists(os.path.join(path, name + '.' + name2type[name])):\n\t\tprint_sys('Found local copy...')\n\telse:\n\t\tprint_sys(\"Downloading...\")\n\t\tdataverse_download(dataset_path, path, name, name2type)\n\treturn name\n\ndef oracle_download_wrapper(name, path, oracle_names):\n\tname = fuzzy_search(name, oracle_names)\n\tif name in trivial_oracle_names:\n\t\treturn name \n\n\tserver_path = 'https://dataverse.harvard.edu/api/access/datafile/'\n\tdataset_path = server_path + str(oracle2id[name])\n\n\tif not os.path.exists(path):\n\t\tos.mkdir(path)\n\n\tif os.path.exists(os.path.join(path, name + '.' + oracle2type[name])):\n\t\tprint_sys('Found local copy...')\n\telse:\n\t\tprint_sys(\"Downloading Oracle...\")\n\t\tdataverse_download(dataset_path, path, name, oracle2type) ## to-do to-check\n\t\tprint_sys(\"Done!\")\n\treturn name\n\ndef bm_download_wrapper(name, path):\n\tname = fuzzy_search(name, list(benchmark_names.keys()))\n\tserver_path = 'https://dataverse.harvard.edu/api/access/datafile/'\n\tdataset_path = server_path + str(benchmark2id[name])\n\n\tif not os.path.exists(path):\n\t\tos.mkdir(path)\n\n\tif os.path.exists(os.path.join(path, name)):\n\t\tprint_sys('Found local copy...')\n\telse:\n\t\tprint_sys('Downloading Benchmark Group...')\n\t\tdataverse_download(dataset_path, path, name, benchmark2type)\n\t\tprint_sys('Extracting zip file...')\n\t\twith ZipFile(os.path.join(path, name + '.zip'), 'r') as zip: \n\t\t\tzip.extractall(path = os.path.join(path))\n\t\tprint_sys(\"Done!\")\n\treturn name\n\ndef pd_load(name, path):\n\ttry:\n\t\tif name2type[name] == 'tab':\n\t\t\tdf = pd.read_csv(os.path.join(path, name + '.' + name2type[name]), sep = '\\t')\n\t\telif name2type[name] == 'csv':\n\t\t\tdf = pd.read_csv(os.path.join(path, name + '.' + name2type[name]))\n\t\telif name2type[name] == 'pkl':\n\t\t\tdf = pd.read_pickle(os.path.join(path, name + '.' + name2type[name]))\n\t\telse:\n\t\t\traise ValueError(\"The file type must be one of tab/csv/pickle.\")\n\t\ttry:\n\t\t\tdf = df.drop_duplicates()\n\t\texcept:\n\t\t\tpass\n\t\treturn df\t\n\texcept (EmptyDataError, EOFError) as e:\n\t\timport sys\n\t\tsys.exit(\"TDC is hosted in Harvard Dataverse and it is currently under maintenance, please check back in a few hours or checkout https://dataverse.harvard.edu/.\")\n\ndef property_dataset_load(name, path, target, dataset_names):\n\tif target is None:\n\t\ttarget = 'Y'\n\tname = download_wrapper(name, path, dataset_names)\n\tprint_sys('Loading...')\n\tdf = pd_load(name, path)\n\ttry:\n\t\tif target is not None:\n\t\t\ttarget = fuzzy_search(target, df.columns.values)\n\t\tdf = df[df[target].notnull()].reset_index(drop = True)\n\texcept:\n\t\twith open(os.path.join(path, name + '.' + name2type[name]), 'r') as f:\n\t\t\tflag = 'Service Unavailable' in ' '.join(f.readlines())\n\t\t\tif flag:\n\t\t\t\timport sys\n\t\t\t\tsys.exit(\"TDC is hosted in Harvard Dataverse and it is currently under maintenance, please check back in a few hours or checkout https://dataverse.harvard.edu/.\")\n\t\t\telse:\n\t\t\t\tsys.exit(\"Please report this error to [email protected], thanks!\")\n\ttry:\n\t\treturn df['X'], df[target], df['ID']\n\texcept:\n\t\treturn df['Drug'], df[target], df['Drug_ID']\n\ndef molpair_process(name, path, dataset_names):\n\tname = download_wrapper(name, path, dataset_names)\n\tprint_sys('Loading...')\n\tdf = pd_load(name, path)\n\treturn df['input'], df['output']\n\ndef interaction_dataset_load(name, path, target, dataset_names):\n\tname = download_wrapper(name, path, dataset_names)\n\tprint_sys('Loading...')\n\tdf = pd_load(name, path)\n\ttry:\n\t\tif target is None:\n\t\t\ttarget = 'Y'\n\t\tif target not in df.columns.values:\n\t\t\t# for binary interaction data, the labels are all 1. negative samples can be sampled from utils.NegSample function\n\t\t\tdf[target] = 1\n\t\tif target is not None:\n\t\t\ttarget = fuzzy_search(target, df.columns.values)\n\t\tdf = df[df[target].notnull()].reset_index(drop = True)\n\t\treturn df['X1'], df['X2'], df[target], df['ID1'], df['ID2']\n\texcept:\n\t\twith open(os.path.join(path, name + '.' + name2type[name]), 'r') as f:\n\t\t\tflag = 'Service Unavailable' in ' '.join(f.readlines())\n\t\t\tif flag:\n\t\t\t\timport sys\n\t\t\t\tsys.exit(\"TDC is hosted in Harvard Dataverse and it is currently under maintenance, please check back in a few hours or checkout https://dataverse.harvard.edu/.\")\n\t\t\telse:\n\t\t\t\tsys.exit(\"Please report this error to [email protected], thanks!\")\n\n\ndef multi_dataset_load(name, path, dataset_names):\n\tname = download_wrapper(name, path, dataset_names)\n\tprint_sys('Loading...')\n\tdf = pd_load(name, path)\n\treturn df\n\ndef generation_paired_dataset_load(name, path, dataset_names, input_name, output_name):\n\tname = download_wrapper(name, path, dataset_names)\n\tprint_sys('Loading...')\n\tdf = pd_load(name, path)\n\treturn df[input_name], df[output_name]\n\n\ndef distribution_dataset_load(name, path, dataset_names, column_name):\n\tname = download_wrapper(name, path, dataset_names)\n\tprint_sys('Loading...')\n\tdf = pd_load(name, path)\n\treturn df[column_name]\n\ndef generation_dataset_load(name, path, dataset_names):\n\tname = download_wrapper(name, path, dataset_names)\n\tprint_sys('Loading...')\n\tdf = pd_load(name, path)\n\treturn df['input'], df['target'] \n\ndef oracle_load(name, path = './oracle', oracle_names = oracle_names):\n\tname = oracle_download_wrapper(name, path, oracle_names)\n\treturn name \n\ndef bm_group_load(name, path):\n\tname = bm_download_wrapper(name, path)\n\treturn name\n\ndef get_label_map(name, path = './data', target = None, file_format = 'csv', output_format = 'dict', task = 'DDI'):\n\tname = fuzzy_search(name, dataset_names[task])\n\tif target is None:\n\t\ttarget = 'Y'\t\t\n\tdf = pd_load(name, path)\n\n\tif output_format == 'dict':\n\t\treturn dict(zip(df[target].values, df['Map'].values))\n\telif output_format == 'df':\n\t\treturn df\n\telif output_format == 'array':\n\t\treturn df['Map'].values\n\telse:\n\t\traise ValueError(\"Please use the correct output format, select from dict, df, array.\")\n\ndef get_reaction_type(name, path = './data', output_format = 'array'):\n\tname = fuzzy_search(name, dataset_names['RetroSyn'])\t\t\n\tdf = pd_load(name, path)\n\n\tif output_format == 'df':\n\t\treturn df\n\telif output_format == 'array':\n\t\treturn df['category'].values\n\telse:\n\t\traise ValueError(\"Please use the correct output format, select from df, array.\")\n\ndef dataverse_download(url, path, name, types):\n\tsave_path = os.path.join(path, name + '.' + types[name])\n\tresponse = requests.get(url, stream=True)\n\ttotal_size_in_bytes= int(response.headers.get('content-length', 0))\n\tblock_size = 1024\n\tprogress_bar = tqdm(total=total_size_in_bytes, unit='iB', unit_scale=True)\n\twith open(save_path, 'wb') as file:\n\t\tfor data in response.iter_content(block_size):\n\t\t\tprogress_bar.update(len(data))\n\t\t\tfile.write(data)\n\tprogress_bar.close()\n\ndef convert_y_unit(y, from_, to_):\n\t\"\"\"\n\tArguments:\n\t\ty: a list of labels\n\t\tfrom_: 'nM' or 'p'\n\t\tto_: 'nM' or 'p'\n\n\tReturns:\n\t\ty: a numpy array of transformed labels\n\t\"\"\"\n\tif from_ == 'nM':\n\t\ty = y\n\telif from_ == 'p':\n\t\ty = 10**(-y) / 1e-9\n\n\tif to_ == 'p':\n\t\ty = -np.log10(y*1e-9 + 1e-10)\n\telif to_ == 'nM':\n\t\ty = y\n\n\treturn y\n\ndef label_transform(y, binary, threshold, convert_to_log, verbose = True, order = 'descending'):\n\t\"\"\"\n\tArguments:\n\t\ty: a list of labels\n\t\tbinary: binarize the label given the threshold\n\t\tthreshold: threshold values\n\t\tconvert_to_log: for continuous values such as Kd and etc\n\n\tReturns:\n\t\ty: a numpy array of transformed labels\n\t\"\"\"\n\n\tif (len(np.unique(y)) > 2) and binary:\n\t\tif verbose:\n\t\t\tprint(\"Binariztion using threshold' + str(threshold) + ', you use specify your threhsold values by threshold = X)\", flush = True, file = sys.stderr)\n\t\tif order == 'descending':\n\t\t\ty = np.array([1 if i else 0 for i in np.array(y) < threshold])\n\t\telif order == 'ascending':\n\t\t\ty = np.array([1 if i else 0 for i in np.array(y) > threshold])\n\t\telse:\n\t\t\traise ValueError(\"Please select order from 'descending or ascending!\")\n\telse:\n\t\tif (len(np.unique(y)) > 2) and convert_to_log:\n\t\t\tif verbose:\n\t\t\t\tprint('To log space...', flush = True, file = sys.stderr)\n\t\t\ty = convert_y_unit(np.array(y), 'nM', 'p') \n\t\telse:\n\t\t\ty = y\n\n\treturn y\n\ndef convert_to_log(y):\n\ty = convert_y_unit(np.array(y), 'nM', 'p') \n\treturn y\n\ndef convert_back_log(y):\n\ty = convert_y_unit(np.array(y), 'p', 'nM') \n\treturn y\n\ndef binarize(y, threshold, order = 'ascending'):\n\tif order == 'ascending':\n\t\ty = np.array([1 if i else 0 for i in np.array(y) > threshold])\n\telif order == 'descending':\n\t\ty = np.array([1 if i else 0 for i in np.array(y) < threshold])\n\telse:\n\t\traise AttributeError(\"'order' must be either ascending or descending\")\n\treturn y\n\ndef label_dist(y, name = None):\n\n\ttry:\n\t\timport seaborn as sns\n\t\timport matplotlib.pyplot as plt\n\texcept:\n\t\tutils.install(\"seaborn\")\n\t\tutils.install(\"matplotlib\")\n\t\timport seaborn as sns\n\t\timport matplotlib.pyplot as plt\n\n\tmedian = np.median(y)\n\tmean = np.mean(y)\n\n\tf, (ax_box, ax_hist) = plt.subplots(2, sharex=True, gridspec_kw= {\"height_ratios\": (0.15, 1)})\n\n\tif name is None:\n\t\tsns.boxplot(y, ax=ax_box).set_title(\"Label Distribution\")\n\telse:\n\t\tsns.boxplot(y, ax=ax_box).set_title(\"Label Distribution of \" + str(name) + \" Dataset\")\n\tax_box.axvline(median, color='b', linestyle='--')\n\tax_box.axvline(mean, color='g', linestyle='--')\n\n\tsns.distplot(y, ax = ax_hist)\n\tax_hist.axvline(median, color='b', linestyle='--')\n\tax_hist.axvline(mean, color='g', linestyle='--')\n\tax_hist.legend({'Median':median,'Mean':mean})\n\n\tax_box.set(xlabel='')\n\tplt.show()\n\t#print(\"The median is \" + str(median), flush = True, file = sys.stderr)\n\t#print(\"The mean is \" + str(mean), flush = True, file = sys.stderr)\n\n\n# random split\ndef create_fold(df, fold_seed, frac):\n\ttrain_frac, val_frac, test_frac = frac\n\ttest = df.sample(frac = test_frac, replace = False, random_state = fold_seed)\n\ttrain_val = df[~df.index.isin(test.index)]\n\tval = train_val.sample(frac = val_frac/(1-test_frac), replace = False, random_state = 1)\n\ttrain = train_val[~train_val.index.isin(val.index)]\n\n\treturn {'train': train.reset_index(drop = True), \n\t\t\t'valid': val.reset_index(drop = True), \n\t\t\t'test': test.reset_index(drop = True)}\n\n# cold setting\ndef create_fold_setting_cold(df, fold_seed, frac, entity):\n\ttrain_frac, val_frac, test_frac = frac\n\tgene_drop = df[entity].drop_duplicates().sample(frac = test_frac, replace = False, random_state = fold_seed).values\n\n\ttest = df[df[entity].isin(gene_drop)]\n\n\ttrain_val = df[~df[entity].isin(gene_drop)]\n\n\tgene_drop_val = train_val[entity].drop_duplicates().sample(frac = val_frac/(1-test_frac), replace = False, random_state = fold_seed).values\n\tval = train_val[train_val[entity].isin(gene_drop_val)]\n\ttrain = train_val[~train_val[entity].isin(gene_drop_val)]\n\n\treturn {'train': train.reset_index(drop = True), \n\t\t\t'valid': val.reset_index(drop = True), \n\t\t\t'test': test.reset_index(drop = True)}\n\n# scaffold split\ndef create_scaffold_split(df, seed, frac, entity):\n\t# reference: https://github.com/chemprop/chemprop/blob/master/chemprop/data/scaffold.py\n\ttry:\n\t\tfrom rdkit import Chem\n\t\tfrom rdkit.Chem.Scaffolds import MurckoScaffold\n\texcept:\n\t\traise ImportError(\"Please install rdkit by 'conda install -c conda-forge rdkit'! \")\n\tfrom tqdm import tqdm\n\tfrom random import Random\n\n\tfrom collections import defaultdict\n\trandom = Random(seed)\n\t\n\ts = df[entity].values\n\tscaffolds = defaultdict(set)\n\tidx2mol = dict(zip(list(range(len(s))),s))\n\n\terror_smiles = 0\n\tfor i, smiles in tqdm(enumerate(s), total=len(s)):\n\t\ttry:\n\t\t\tscaffold = MurckoScaffold.MurckoScaffoldSmiles(mol = Chem.MolFromSmiles(smiles), includeChirality = False)\n\t\t\tscaffolds[scaffold].add(i)\n\t\texcept:\n\t\t\tprint_sys(smiles + ' returns RDKit error and is thus omitted...')\n\t\t\terror_smiles += 1\n\t\n\ttrain, val, test = [], [], []\n\ttrain_size = int((len(df) - error_smiles) * frac[0])\n\tval_size = int((len(df) - error_smiles) * frac[1])\n\ttest_size = (len(df) - error_smiles) - train_size - val_size\n\ttrain_scaffold_count, val_scaffold_count, test_scaffold_count = 0, 0, 0\n\n\t#index_sets = sorted(list(scaffolds.values()), key=lambda i: len(i), reverse=True)\n\tindex_sets = list(scaffolds.values())\n\tbig_index_sets = []\n\tsmall_index_sets = []\n\tfor index_set in index_sets:\n\t\tif len(index_set) > val_size / 2 or len(index_set) > test_size / 2:\n\t\t\tbig_index_sets.append(index_set)\n\t\telse:\n\t\t\tsmall_index_sets.append(index_set)\n\trandom.seed(seed)\n\trandom.shuffle(big_index_sets)\n\trandom.shuffle(small_index_sets)\n\tindex_sets = big_index_sets + small_index_sets\n\n\tif frac[2] == 0:\n\t\tfor index_set in index_sets:\n\t\t\tif len(train) + len(index_set) <= train_size:\n\t\t\t\ttrain += index_set\n\t\t\t\ttrain_scaffold_count += 1\n\t\t\telse:\n\t\t\t\tval += index_set\n\t\t\t\tval_scaffold_count += 1\n\telse:\n\t\tfor index_set in index_sets:\n\t\t\tif len(train) + len(index_set) <= train_size:\n\t\t\t\ttrain += index_set\n\t\t\t\ttrain_scaffold_count += 1\n\t\t\telif len(val) + len(index_set) <= val_size:\n\t\t\t\tval += index_set\n\t\t\t\tval_scaffold_count += 1\n\t\t\telse:\n\t\t\t\ttest += index_set\n\t\t\t\ttest_scaffold_count += 1\n\n\treturn {'train': df.iloc[train].reset_index(drop = True), \n\t\t\t'valid': df.iloc[val].reset_index(drop = True), \n\t\t\t'test': df.iloc[test].reset_index(drop = True)}\n\ndef train_val_test_split(len_data, frac, seed):\n\ttest_size = int(len_data * frac[2])\n\ttrain_size = int(len_data * frac[0])\n\tval_size = len_data - train_size - test_size\n\tnp.random.seed(seed)\n\tx = np.array(list(range(len_data)))\n\tnp.random.shuffle(x)\n\treturn x[:train_size], x[train_size:(train_size + val_size)], x[-test_size:]\n\ndef install(package):\n subprocess.check_call([sys.executable, \"-m\", \"pip\", \"install\", package])\n\ndef print_sys(s):\n\tprint(s, flush = True, file = sys.stderr)\n\ndef _parse_prop(search, proplist):\n \"\"\"Extract property value from record using the given urn search filter.\"\"\"\n props = [i for i in proplist if all(item in i['urn'].items() for item in search.items())]\n if len(props) > 0:\n return props[0]['value'][list(props[0]['value'].keys())[0]]\n\ndef request(identifier, namespace='cid', domain='compound', operation=None, output='JSON', searchtype=None):\n \"\"\"\n copied from https://github.com/mcs07/PubChemPy/blob/e3c4f4a9b6120433e5cc3383464c7a79e9b2b86e/pubchempy.py#L238\n Construct API request from parameters and return the response.\n Full specification at http://pubchem.ncbi.nlm.nih.gov/pug_rest/PUG_REST.html\n \"\"\"\n API_BASE = 'https://pubchem.ncbi.nlm.nih.gov/rest/pug'\n text_types = str, bytes\n if not identifier:\n raise ValueError('identifier/cid cannot be None')\n # If identifier is a list, join with commas into string\n if isinstance(identifier, int):\n identifier = str(identifier)\n if not isinstance(identifier, text_types):\n identifier = ','.join(str(x) for x in identifier)\n \n # Build API URL\n urlid, postdata = None, None\n if namespace == 'sourceid':\n identifier = identifier.replace('/', '.')\n if namespace in ['listkey', 'formula', 'sourceid'] \\\n or searchtype == 'xref' \\\n or (searchtype and namespace == 'cid') or domain == 'sources':\n urlid = quote(identifier.encode('utf8'))\n else:\n postdata = urlencode([(namespace, identifier)]).encode('utf8')\n comps = filter(None, [API_BASE, domain, searchtype, namespace, urlid, operation, output])\n apiurl = '/'.join(comps)\n # Make request\n response = urlopen(apiurl, postdata)\n return response\n\ndef NegSample(df, column_names, frac, two_types):\n \"\"\"Negative Sampling for Binary Interaction Dataset\n\n Parameters\n ----------\n df : pandas.DataFrame\n Data File\n column_names: list\n column names in the order of [id1, x1, id2, x2]\n \"\"\"\n x = int(len(df) * frac)\n id1, x1, id2, x2 = column_names\n df[id1] = df[id1].apply(lambda x: str(x))\n df[id2] = df[id2].apply(lambda x: str(x))\n\n if not two_types:\n df_unique = np.unique(df[[id1, id2]].values.reshape(-1))\n pos = df[[id1, id2]].values\n pos_set = set([tuple([i[0], i[1]]) for i in pos])\n np.random.seed(1234)\n samples = np.random.choice(df_unique, size=(x, 2), replace=True)\n neg_set = set([tuple([i[0], i[1]]) for i in samples if i[0] != i[1]]) - pos_set\n\n while len(neg_set) < x:\n sample = np.random.choice(df_unique, 2, replace=False)\n sample = tuple([sample[0], sample[1]])\n if sample not in pos_set:\n neg_set.add(sample)\n neg_list = [list(i) for i in neg_set]\n\n id2seq = dict(df[[id1, x1]].values)\n id2seq.update(df[[id2, x2]].values)\n\n neg_list_val = []\n for i in neg_list:\n neg_list_val.append([i[0], id2seq[i[0]], i[1], id2seq[i[1]], 0])\n \n df = df.append(pd.DataFrame(neg_list_val).rename(columns = {0: id1, 1: x1, 2: id2, 3: x2, 4: 'Y'})).reset_index(drop = True)\n return df\n else:\n df_unique_id1 = np.unique(df[id1].values.reshape(-1))\n df_unique_id2 = np.unique(df[id2].values.reshape(-1))\n\n pos = df[[id1, id2]].values\n pos_set = set([tuple([i[0], i[1]]) for i in pos])\n np.random.seed(1234)\n\n sample_id1 = np.random.choice(df_unique_id1, size=len(df), replace=True)\n sample_id2 = np.random.choice(df_unique_id2, size=len(df), replace=True)\n\n neg_set = set([tuple([sample_id1[i], sample_id2[i]]) for i in range(len(df)) if sample_id1[i] != sample_id2[i]]) - pos_set\n\n while len(neg_set) < len(df):\n sample_id1 = np.random.choice(df_unique_id1, size=1, replace=True)\n sample_id2 = np.random.choice(df_unique_id2, size=1, replace=True)\n\n sample = tuple([sample_id1[0], sample_id2[0]])\n if sample not in pos_set:\n neg_set.add(sample)\n neg_list = [list(i) for i in neg_set]\n\n id2seq1 = dict(df_temp[[id1, x1]].values)\n id2seq2 = dict(df_temp[[id2, x2]].values)\n\n neg_list_val = []\n for i in neg_list:\n neg_list_val.append([i[0], id2seq[i[0]], i[1], id2seq[i[1]], 0])\n\n df = df.append(pd.DataFrame(neg_list_val).rename(columns = {0: id1, 1: x1, 2: id2, 3: x2, 4: 'Y'})).reset_index(drop = True)\n return df\n\ndef uniprot2seq(ProteinID):\n\t\"\"\"Get protein sequence from Uniprot ID\n\t\n\tParameters\n\t----------\n\tProteinID : str\n\t Uniprot ID\n\t\n\tReturns\n\t-------\n\tstr\n\t Amino acid sequence of input uniprot ID\n\t\"\"\"\n\timport urllib\n\timport string\n\timport urllib.request as ur\n\n\tID = str(ProteinID)\n\tlocalfile = ur.urlopen('http://www.uniprot.org/uniprot/' + ID + '.fasta')\n\ttemp = localfile.readlines()\n\tres = ''\n\tfor i in range(1, len(temp)):\n\t\tres = res + temp[i].strip().decode(\"utf-8\")\n\treturn res\n\ndef cid2smiles(cid):\n\ttry:\n\t\tsmiles = _parse_prop({'label': 'SMILES', 'name': 'Canonical'}, json.loads(request(cid).read().decode())['PC_Compounds'][0]['props'])\n\texcept:\n\t\tprint('cid ' + str(cid) + ' failed, use NULL string')\n\t\tsmiles = 'NULL'\n\treturn smiles\n\ndef get_closet_match(predefined_tokens, test_token, threshold=0.8):\n \"\"\"Get the closest match by Levenshtein Distance.\n\n Parameters\n ----------\n predefined_tokens : list of string\n Predefined string tokens.\n \n test_token : string \n User input that needs matching to existing tokens.\n \n threshold : float in (0, 1), optional (default=0.8)\n The lowest match score to raise errors.\n\n Returns\n -------\n\n \"\"\"\n prob_list = []\n\n for token in predefined_tokens:\n # print(token)\n prob_list.append(\n fuzz.ratio(str(token).lower(), str(test_token).lower()))\n\n assert (len(prob_list) == len(predefined_tokens))\n\n prob_max = np.nanmax(prob_list)\n token_max = predefined_tokens[np.nanargmax(prob_list)]\n\n # match similarity is low\n if prob_max / 100 < threshold:\n print_sys(predefined_tokens)\n raise ValueError(test_token,\n \"does not match to available values. \"\n \"Please double check.\")\n return token_max, prob_max / 100\n\ndef save_dict(path, obj):\n\twith open(path, 'wb') as f:\n\t\tpickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)\n\ndef load_dict(path):\n\twith open(path, 'rb') as f:\n\t\treturn pickle.load(f)\n\ndef retrieve_label_name_list(name):\n\tname = fuzzy_search(name, dataset_list)\n\treturn dataset2target_lists[name]\n\ndef retrieve_dataset_names(name):\n\treturn dataset_names[name]\n\ndef retrieve_all_benchmarks():\n\treturn list(benchmark_names.keys())\n\ndef retrieve_benchmark_names(name):\n\tname = fuzzy_search(name, list(benchmark_names.keys()))\n\tdatasets = benchmark_names[name]\n\n\tdataset_names = []\n\n\tfor task, datasets in datasets.items():\n\t\tfor dataset in datasets:\n\t\t\tdataset_names.append(dataset)\n\treturn dataset_names\n\ndef to_submission_format(results):\n df = pd.DataFrame(results)\n def get_metric(x):\n metric = []\n for i in x:\n metric.append(list(i.values())[0])\n return [round(np.mean(metric), 3), round(np.std(metric), 3)]\n return dict(df.apply(get_metric, axis = 1))\n"
] |
[
[
"numpy.nanmax",
"numpy.nanargmax",
"numpy.random.seed",
"numpy.random.choice",
"numpy.unique",
"numpy.median",
"matplotlib.pyplot.subplots",
"numpy.random.shuffle",
"pandas.DataFrame",
"numpy.std",
"numpy.log10",
"numpy.mean",
"numpy.array",
"matplotlib.pyplot.show"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
newUtkarsh/detectron2
|
[
"e1c055abff34513f347a767f43bfe60e275b136c"
] |
[
"detectron2/utils/events.py"
] |
[
"# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\nimport datetime\nimport json\nimport logging\nimport os\nfrom collections import defaultdict\nfrom contextlib import contextmanager\nimport torch\nfrom fvcore.common.file_io import PathManager\nfrom fvcore.common.history_buffer import HistoryBuffer\n\n_CURRENT_STORAGE_STACK = []\n\n\ndef get_event_storage():\n assert len(\n _CURRENT_STORAGE_STACK\n ), \"get_event_storage() has to be called inside a 'with EventStorage(...)' context!\"\n return _CURRENT_STORAGE_STACK[-1]\n\n\nclass EventWriter:\n \"\"\"\n Base class for writers that obtain events from :class:`EventStorage` and process them.\n \"\"\"\n\n def write(self):\n raise NotImplementedError\n\n def close(self):\n pass\n\n\nclass JSONWriter(EventWriter):\n \"\"\"\n Write scalars to a json file.\n\n It saves scalars as one json per line (instead of a big json) for easy parsing.\n\n Examples parsing such a json file:\n\n .. code-block:: none\n\n $ cat metrics.json | jq -s '.[0:2]'\n [\n {\n \"data_time\": 0.008433341979980469,\n \"iteration\": 20,\n \"loss\": 1.9228371381759644,\n \"loss_box_reg\": 0.050025828182697296,\n \"loss_classifier\": 0.5316952466964722,\n \"loss_mask\": 0.7236229181289673,\n \"loss_rpn_box\": 0.0856662318110466,\n \"loss_rpn_cls\": 0.48198649287223816,\n \"lr\": 0.007173333333333333,\n \"time\": 0.25401854515075684\n },\n {\n \"data_time\": 0.007216215133666992,\n \"iteration\": 40,\n \"loss\": 1.282649278640747,\n \"loss_box_reg\": 0.06222952902317047,\n \"loss_classifier\": 0.30682939291000366,\n \"loss_mask\": 0.6970193982124329,\n \"loss_rpn_box\": 0.038663312792778015,\n \"loss_rpn_cls\": 0.1471673548221588,\n \"lr\": 0.007706666666666667,\n \"time\": 0.2490077018737793\n }\n ]\n\n $ cat metrics.json | jq '.loss_mask'\n 0.7126231789588928\n 0.689423680305481\n 0.6776131987571716\n ...\n\n \"\"\"\n\n def __init__(self, json_file, window_size=20):\n \"\"\"\n Args:\n json_file (str): path to the json file. New data will be appended if the file exists.\n window_size (int): the window size of median smoothing for the scalars whose\n `smoothing_hint` are True.\n \"\"\"\n self._file_handle = PathManager.open(json_file, \"a\")\n self._window_size = window_size\n\n def write(self):\n storage = get_event_storage()\n to_save = {\"iteration\": storage.iter}\n to_save.update(storage.latest_with_smoothing_hint(self._window_size))\n self._file_handle.write(json.dumps(to_save, sort_keys=True) + \"\\n\")\n self._file_handle.flush()\n try:\n os.fsync(self._file_handle.fileno())\n except AttributeError:\n pass\n\n def close(self):\n self._file_handle.close()\n\n\nclass TensorboardXWriter(EventWriter):\n \"\"\"\n Write all scalars to a tensorboard file.\n \"\"\"\n\n def __init__(self, log_dir: str, window_size: int = 20, **kwargs):\n \"\"\"\n Args:\n log_dir (str): The directory to save the output events\n window_size (int): the scalars will be median-smoothed by this window size\n kwargs: other arguments passed to `torch.utils.tensorboard.SummaryWriter(...)`\n \"\"\"\n self._window_size = window_size\n from torch.utils.tensorboard import SummaryWriter\n\n self._writer = SummaryWriter(log_dir, **kwargs)\n\n def write(self):\n storage = get_event_storage()\n for k, v in storage.latest_with_smoothing_hint(self._window_size).items():\n self._writer.add_scalar(k, v, storage.iter)\n\n def close(self):\n if hasattr(self, \"_writer\"): # doesn't exist when the code fails at import\n self._writer.close()\n\n\nclass CommonMetricPrinter(EventWriter):\n \"\"\"\n Print **common** metrics to the terminal, including\n iteration time, ETA, memory, all losses, and the learning rate.\n\n To print something different, please implement a similar printer by yourself.\n \"\"\"\n\n def __init__(self, max_iter):\n \"\"\"\n Args:\n max_iter (int): the maximum number of iterations to train.\n Used to compute ETA.\n \"\"\"\n self.logger = logging.getLogger(__name__)\n self._max_iter = max_iter\n\n def write(self):\n storage = get_event_storage()\n iteration = storage.iter\n\n data_time, time = None, None\n eta_string = \"N/A\"\n try:\n data_time = storage.history(\"data_time\").avg(20)\n time = storage.history(\"time\").global_avg()\n eta_seconds = storage.history(\"time\").median(1000) * (self._max_iter - iteration)\n storage.put_scalar(\"eta_seconds\", eta_seconds, smoothing_hint=False)\n eta_string = str(datetime.timedelta(seconds=int(eta_seconds)))\n except KeyError: # they may not exist in the first few iterations (due to warmup)\n pass\n\n try:\n lr = \"{:.6f}\".format(storage.history(\"lr\").latest())\n except KeyError:\n lr = \"N/A\"\n\n if torch.cuda.is_available():\n max_mem_mb = torch.cuda.max_memory_allocated() / 1024.0 / 1024.0\n else:\n max_mem_mb = None\n\n # NOTE: max_mem is parsed by grep in \"dev/parse_results.sh\"\n self.logger.info(\n \"\"\"\\\neta: {eta} iter: {iter} {losses} \\\n{time} {data_time} \\\nlr: {lr} {memory}\\\n\"\"\".format(\n eta=eta_string,\n iter=iteration,\n losses=\" \".join(\n [\n \"{}: {:.3f}\".format(k, v.median(20))\n for k, v in storage.histories().items()\n if \"loss\" in k\n ]\n ),\n time=\"time: {:.4f}\".format(time) if time is not None else \"\",\n data_time=\"data_time: {:.4f}\".format(data_time) if data_time is not None else \"\",\n lr=lr,\n memory=\"max_mem: {:.0f}M\".format(max_mem_mb) if max_mem_mb is not None else \"\",\n )\n )\n\n\nclass EventStorage:\n \"\"\"\n The user-facing class that provides metric storage functionalities.\n\n In the future we may add support for storing / logging other types of data if needed.\n \"\"\"\n\n def __init__(self, start_iter=0):\n \"\"\"\n Args:\n start_iter (int): the iteration number to start with\n \"\"\"\n self._history = defaultdict(HistoryBuffer)\n self._smoothing_hints = {}\n self._latest_scalars = {}\n self._iter = start_iter\n self._current_prefix = \"\"\n\n def put_scalar(self, name, value, smoothing_hint=True):\n \"\"\"\n Add a scalar `value` to the `HistoryBuffer` associated with `name`.\n\n Args:\n smoothing_hint (bool): a 'hint' on whether this scalar is noisy and should be\n smoothed when logged. The hint will be accessible through\n :meth:`EventStorage.smoothing_hints`. A writer may ignore the hint\n and apply custom smoothing rule.\n\n It defaults to True because most scalars we save need to be smoothed to\n provide any useful signal.\n \"\"\"\n name = self._current_prefix + name\n history = self._history[name]\n value = float(value)\n history.update(value, self._iter)\n self._latest_scalars[name] = value\n\n existing_hint = self._smoothing_hints.get(name)\n if existing_hint is not None:\n assert (\n existing_hint == smoothing_hint\n ), \"Scalar {} was put with a different smoothing_hint!\".format(name)\n else:\n self._smoothing_hints[name] = smoothing_hint\n\n def put_scalars(self, *, smoothing_hint=True, **kwargs):\n \"\"\"\n Put multiple scalars from keyword arguments.\n\n Examples:\n\n storage.put_scalars(loss=my_loss, accuracy=my_accuracy, smoothing_hint=True)\n \"\"\"\n for k, v in kwargs.items():\n self.put_scalar(k, v, smoothing_hint=smoothing_hint)\n\n def history(self, name):\n \"\"\"\n Returns:\n HistoryBuffer: the scalar history for name\n \"\"\"\n ret = self._history.get(name, None)\n if ret is None:\n raise KeyError(\"No history metric available for {}!\".format(name))\n return ret\n\n def histories(self):\n \"\"\"\n Returns:\n dict[name -> HistoryBuffer]: the HistoryBuffer for all scalars\n \"\"\"\n return self._history\n\n def latest(self):\n \"\"\"\n Returns:\n dict[name -> number]: the scalars that's added in the current iteration.\n \"\"\"\n return self._latest_scalars\n\n def latest_with_smoothing_hint(self, window_size=20):\n \"\"\"\n Similar to :meth:`latest`, but the returned values\n are either the un-smoothed original latest value,\n or a median of the given window_size,\n depend on whether the smoothing_hint is True.\n\n This provides a default behavior that other writers can use.\n \"\"\"\n result = {}\n for k, v in self._latest_scalars.items():\n result[k] = self._history[k].median(window_size) if self._smoothing_hints[k] else v\n return result\n\n def smoothing_hints(self):\n \"\"\"\n Returns:\n dict[name -> bool]: the user-provided hint on whether the scalar\n is noisy and needs smoothing.\n \"\"\"\n return self._smoothing_hints\n\n def step(self):\n \"\"\"\n User should call this function at the beginning of each iteration, to\n notify the storage of the start of a new iteration.\n The storage will then be able to associate the new data with the\n correct iteration number.\n \"\"\"\n self._iter += 1\n self._latest_scalars = {}\n\n @property\n def iter(self):\n return self._iter\n\n @property\n def iteration(self):\n # for backward compatibility\n return self._iter\n\n def __enter__(self):\n _CURRENT_STORAGE_STACK.append(self)\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n assert _CURRENT_STORAGE_STACK[-1] == self\n _CURRENT_STORAGE_STACK.pop()\n\n @contextmanager\n def name_scope(self, name):\n \"\"\"\n Yields:\n A context within which all the events added to this storage\n will be prefixed by the name scope.\n \"\"\"\n old_prefix = self._current_prefix\n self._current_prefix = name.rstrip(\"/\") + \"/\"\n yield\n self._current_prefix = old_prefix\n"
] |
[
[
"torch.cuda.max_memory_allocated",
"torch.utils.tensorboard.SummaryWriter",
"torch.cuda.is_available"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
SGeetansh/dffml
|
[
"04647bdcadef2f7e7b59cdd8ac1e89f17ef1095b"
] |
[
"model/pytorch/dffml_model_pytorch/pytorch_base.py"
] |
[
"import os\nimport pathlib\nfrom typing import Any, Tuple, AsyncIterator, List, Type, Dict\nimport copy\nimport time\n\nimport torch\nimport torch.optim as optim\nfrom torch.optim import lr_scheduler\n\nimport numpy as np\n\nfrom dffml.record import Record\nfrom dffml.model.accuracy import Accuracy\nfrom dffml.base import config, field\nfrom dffml.feature.feature import Feature, Features\nfrom dffml.source.source import Sources, SourcesContext\nfrom dffml.model.model import ModelContext, ModelNotTrained\nfrom .utils import NumpyToTensor, PyTorchLoss, CrossEntropyLossFunction\n\n\n@config\nclass PyTorchModelConfig:\n predict: Feature = field(\"Feature name holding classification value\")\n features: Features = field(\"Features to train on\")\n directory: pathlib.Path = field(\"Directory where state should be saved\")\n classifications: List[str] = field(\n \"Options for value of classification\", default=None\n )\n clstype: Type = field(\"Data type of classifications values\", default=str)\n imageSize: int = field(\n \"Common size for all images to resize and crop to\", default=None\n )\n enableGPU: bool = field(\"Utilize GPUs for processing\", default=False)\n epochs: int = field(\n \"Number of iterations to pass over all records in a source\", default=20\n )\n batch_size: int = field(\"Batch size\", default=32)\n validation_split: float = field(\n \"Split training data for Validation\", default=0.0\n )\n patience: int = field(\n \"Early stops the training if validation loss doesn't improve after a given patience\",\n default=5,\n )\n loss: PyTorchLoss = field(\n \"Loss Functions available in PyTorch\",\n default=CrossEntropyLossFunction,\n )\n optimizer: str = field(\n \"Optimizer Algorithms available in PyTorch\", default=\"SGD\"\n )\n normalize_mean: List[float] = field(\n \"Mean values for normalizing Tensor image\", default=None\n )\n normalize_std: List[float] = field(\n \"Standard Deviation values for normalizing Tensor image\", default=None\n )\n\n def __post_init__(self):\n if self.classifications is not None:\n self.classifications = list(\n map(self.clstype, self.classifications)\n )\n\n\nclass PyTorchModelContext(ModelContext):\n def __init__(self, parent):\n super().__init__(parent)\n if self.parent.config.classifications:\n self.cids = self._mkcids(self.parent.config.classifications)\n self.classifications = self._classifications(self.cids)\n self.features = self._applicable_features()\n self.model_path = self._model_path()\n self._model = None\n self.counter = 0\n\n if self.parent.config.enableGPU and torch.cuda.is_available():\n self.device = torch.device(\"cuda:0\")\n self.logger.info(\"Using CUDA\")\n else:\n self.device = torch.device(\"cpu\")\n\n async def __aenter__(self):\n if os.path.isfile(self.model_path):\n self.logger.info(f\"Using saved model from {self.model_path}\")\n self._model = torch.load(self.model_path)\n else:\n self._model = self.createModel()\n\n self.set_model_parameters()\n\n self.criterion = self.parent.config.loss.function\n self.optimizer = getattr(optim, self.parent.config.optimizer)(\n self.model_parameters, lr=0.001\n )\n self.exp_lr_scheduler = lr_scheduler.StepLR(\n self.optimizer, step_size=5, gamma=0.1\n )\n\n return self\n\n async def __aexit__(self, exc_type, exc_value, traceback):\n pass\n\n def set_model_parameters(self):\n self.model_parameters = self._model.parameters()\n\n def _classifications(self, cids):\n \"\"\"\n Map classifications to numeric values\n \"\"\"\n classifications = {value: key for key, value in cids.items()}\n self.logger.debug(\n \"classifications(%d): %r\", len(classifications), classifications\n )\n return classifications\n\n def _applicable_features(self):\n return [name for name in self.parent.config.features.names()]\n\n def _model_path(self):\n if self.parent.config.directory is None:\n return None\n if not os.path.isdir(self.parent.config.directory):\n raise NotADirectoryError(\n \"%s is not a directory\" % (self.parent.config.directory)\n )\n os.makedirs(self.parent.config.directory, exist_ok=True)\n\n return os.path.join(self.parent.config.directory, \"model.pt\")\n\n def _mkcids(self, classifications):\n \"\"\"\n Create an index, possible classification mapping and sort the list of\n classifications first.\n \"\"\"\n cids = dict(\n zip(range(0, len(classifications)), sorted(classifications))\n )\n self.logger.debug(\"cids(%d): %r\", len(cids), cids)\n return cids\n\n async def dataset_generator(self, sources: Sources):\n self.logger.debug(\"Training on features: %r\", self.features)\n x_cols: Dict[str, Any] = {feature: [] for feature in self.features}\n y_cols = []\n all_records = []\n all_sources = sources.with_features(\n self.features + [self.parent.config.predict.name]\n )\n\n async for record in all_sources:\n for feature, results in record.features(self.features).items():\n x_cols[feature].append(np.array(results))\n y_cols.append(\n self.classifications[\n record.feature(self.parent.config.predict.name)\n ]\n if self.classifications\n else record.feature(self.parent.config.predict.name)\n )\n if (len(self.features)) > 1:\n self.logger.critical(\n \"Found more than one feature to train on. Only first feature will be used\"\n )\n if not y_cols:\n raise ValueError(\"No records to train on\")\n\n y_cols = np.array(y_cols)\n for feature in x_cols:\n x_cols[feature] = np.array(x_cols[feature])\n\n self.logger.info(\"------ Record Data ------\")\n self.logger.info(\"x_cols: %d\", len(list(x_cols.values())[0]))\n self.logger.info(\"y_cols: %d\", len(y_cols))\n self.logger.info(\"-----------------------\")\n\n x_cols = x_cols[self.features[0]]\n dataset = NumpyToTensor(\n x_cols,\n y_cols,\n size=self.parent.config.imageSize,\n norm_mean=self.parent.config.normalize_mean,\n norm_std=self.parent.config.normalize_std,\n )\n\n return dataset, len(dataset)\n\n async def prediction_data_generator(self, data):\n dataset = NumpyToTensor(\n [data],\n size=self.parent.config.imageSize,\n norm_mean=self.parent.config.normalize_mean,\n norm_std=self.parent.config.normalize_std,\n )\n dataloader = torch.utils.data.DataLoader(dataset)\n return dataloader\n\n async def train(self, sources: Sources):\n dataset, size = await self.dataset_generator(sources)\n size = {\n \"Training\": size - int(self.parent.config.validation_split * size),\n \"Validation\": int(self.parent.config.validation_split * size),\n }\n\n if self.parent.config.validation_split:\n data = dict(\n zip(\n [\"Training\", \"Validation\"],\n list(\n torch.utils.data.random_split(\n dataset, [size[\"Training\"], size[\"Validation\"]]\n )\n ),\n )\n )\n self.logger.info(\n \"Data split into Training samples: {} and Validation samples: {}\".format(\n size[\"Training\"], size[\"Validation\"]\n )\n )\n dataloaders = {\n x: torch.utils.data.DataLoader(\n data[x],\n batch_size=self.parent.config.batch_size,\n shuffle=True,\n num_workers=4,\n )\n for x in [\"Training\", \"Validation\"]\n }\n else:\n dataloaders = {\n \"Training\": torch.utils.data.DataLoader(\n dataset,\n batch_size=self.parent.config.batch_size,\n shuffle=True,\n num_workers=4,\n )\n }\n\n since = time.time()\n\n best_model_wts = copy.deepcopy(self._model.state_dict())\n best_acc = 0.0\n\n for epoch in range(self.parent.config.epochs):\n self.logger.debug(\n \"Epoch {}/{}\".format(epoch + 1, self.parent.config.epochs)\n )\n self.logger.debug(\"-\" * 10)\n\n for phase in dataloaders.keys():\n if phase == \"Training\":\n self._model.train()\n else:\n self._model.eval()\n\n running_loss = 0.0\n running_corrects = 0\n\n for inputs, labels in dataloaders[phase]:\n inputs = inputs.to(self.device)\n labels = labels.to(self.device)\n self.optimizer.zero_grad()\n\n with torch.set_grad_enabled(phase == \"Training\"):\n outputs = self._model(inputs)\n if self.classifications:\n _, preds = torch.max(outputs, 1)\n loss = self.criterion(outputs, labels)\n\n if phase == \"Training\":\n loss.backward()\n self.optimizer.step()\n\n running_loss += loss.item() * inputs.size(0)\n if self.classifications:\n running_corrects += torch.sum(preds == labels.data)\n\n if phase == \"Training\":\n self.exp_lr_scheduler.step()\n\n epoch_loss = running_loss / size[phase]\n epoch_acc = (\n running_corrects.double() / size[phase]\n if self.classifications\n else 1.0 - epoch_loss\n )\n\n self.logger.debug(\n \"{} Loss: {:.4f} Acc: {:.4f}\".format(\n phase, epoch_loss, epoch_acc\n )\n )\n\n if phase == \"Validation\":\n if epoch_acc >= best_acc:\n best_acc = epoch_acc\n best_model_wts = copy.deepcopy(\n self._model.state_dict()\n )\n self.counter = 0\n else:\n self.counter += 1\n if best_acc == 1.0:\n self.counter = self.parent.config.patience\n\n self.logger.debug(\"\")\n\n if self.counter == self.parent.config.patience:\n self.logger.info(\n f\"Early stopping: Validation Loss didn't improve for {self.counter} \"\n + \"consecutive epochs OR maximum accuracy attained.\"\n )\n break\n\n time_elapsed = time.time() - since\n self.logger.info(\n \"Training complete in {:.0f}m {:.0f}s\".format(\n time_elapsed // 60, time_elapsed % 60\n )\n )\n\n if self.parent.config.validation_split:\n self.logger.info(\n \"Best Validation Accuracy: {:4f}\".format(best_acc)\n )\n self._model.load_state_dict(best_model_wts)\n\n torch.save(self._model, self.model_path)\n\n async def accuracy(self, sources: Sources) -> Accuracy:\n if not os.path.isfile(os.path.join(self.model_path)):\n raise ModelNotTrained(\"Train model before assessing for accuracy.\")\n\n dataset, size = await self.dataset_generator(sources)\n dataloader = torch.utils.data.DataLoader(\n dataset,\n batch_size=self.parent.config.batch_size,\n shuffle=True,\n num_workers=4,\n )\n\n self._model.eval()\n\n if self.classifications:\n running_corrects = 0\n\n for inputs, labels in dataloader:\n inputs = inputs.to(inputs)\n labels = labels.to(inputs)\n\n with torch.set_grad_enabled(False):\n outputs = self._model(inputs)\n _, preds = torch.max(outputs, 1)\n\n running_corrects += torch.sum(preds == labels.data)\n acc = running_corrects.double() / size\n else:\n running_loss = 0.0\n\n for inputs, labels in dataloader:\n inputs = inputs.to(inputs)\n labels = labels.to(inputs)\n\n with torch.set_grad_enabled(False):\n outputs = self._model(inputs)\n loss = self.criterion(inputs, outputs)\n\n running_loss += loss.item() * inputs.size(0)\n\n total_loss = running_loss / size\n acc = 1.0 - total_loss\n\n return Accuracy(acc)\n\n async def predict(\n self, sources: SourcesContext\n ) -> AsyncIterator[Tuple[Record, Any, float]]:\n \"\"\"\n Uses trained data to make a prediction about the quality of a record.\n \"\"\"\n if not os.path.isfile(os.path.join(self.model_path)):\n raise ModelNotTrained(\"Train model before prediction.\")\n\n self._model.eval()\n async for record in sources.with_features(self.features):\n feature_data = record.features(self.features)[self.features[0]]\n predict = await self.prediction_data_generator(feature_data)\n target = self.parent.config.predict.name\n\n with torch.no_grad():\n for val in predict:\n val = val.to(self.device)\n output = self._model(val)\n\n if self.classifications:\n prob = torch.nn.functional.softmax(output, dim=1)\n confidence, prediction_value = prob.topk(1, dim=1)\n record.predicted(\n target,\n self.cids[prediction_value.item()],\n confidence,\n )\n else:\n confidence = 1.0 - self.criterion(val, output).item()\n record.predicted(target, output, confidence)\n\n yield record\n"
] |
[
[
"torch.optim.lr_scheduler.StepLR",
"torch.nn.functional.softmax",
"torch.max",
"torch.load",
"torch.utils.data.DataLoader",
"torch.sum",
"torch.utils.data.random_split",
"torch.set_grad_enabled",
"torch.no_grad",
"torch.cuda.is_available",
"torch.device",
"numpy.array",
"torch.save"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ElielGez/Malicious_URL_zdm_new_features_lee_eliel
|
[
"04935b5b1b280906d684be36977151d89a09b4c6"
] |
[
"Tools/collect_entropy.py"
] |
[
"import pandas as pd\nfrom collections import defaultdict\nfrom urllib.parse import urlparse\nimport math\n\n\ndf = pd.read_csv('Final_newData_withFeatures.csv')\n\nurls = df['0']\n\nentropies = []\n\nfor index, url in enumerate(urls):\n domain=\"\"\n if url[:4] == 'http':\n domain = urlparse(url).netloc\n else:\n domain = urlparse('http://'+url).netloc\n \n entropy = 0\n str_len = len(domain)\n chars = defaultdict(int)\n for char in domain:\n chars[char] += 1\n for char in domain:\n pj = (chars[char]/str_len)\n entropy += pj*math.log(pj,2)\n entropies.append((-1)*entropy)\n\ndf['6'] = pd.Series(entropies)\n\n#df.drop('Unnamed: 0', inplace=True, axis=1)\n#df=df[df['length'] != -1]\ndf.to_csv('superFinal.csv')"
] |
[
[
"pandas.read_csv",
"pandas.Series"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
gayhub-wpp/alpha_pipe_oldversion
|
[
"877047442ae939df3f15611d48ea2df1476a940c"
] |
[
"alpha_pipe/analyzer/utils.py"
] |
[
"from numpy import sqrt, mean\nfrom collections import Iterable\nfrom functools import wraps\nimport six\nimport pandas as pd\nimport numpy as np\nimport re\nimport warnings\n\nfrom IPython.display import display\nfrom pandas.tseries.offsets import CustomBusinessDay, Day, BusinessDay\nfrom scipy.stats import mode\n\n\nclass NonMatchingTimezoneError(Exception):\n pass\n\n\nclass MaxLossExceededError(Exception):\n pass\n\n\ndef rethrow(exception, additional_message):\n \"\"\"\n Re-raise the last exception that was active in the current scope\n without losing the stacktrace but adding an additional message.\n This is hacky because it has to be compatible with both python 2/3\n \"\"\"\n e = exception\n m = additional_message\n if not e.args:\n e.args = (m,)\n else:\n e.args = (e.args[0] + m,) + e.args[1:]\n raise e\n\n\ndef non_unique_bin_edges_error(func):\n \"\"\"\n Give user a more informative error in case it is not possible\n to properly calculate quantiles on the input dataframe (factor)\n \"\"\"\n message = \"\"\"\n\n An error occurred while computing bins/quantiles on the input provided.\n This usually happens when the input contains too many identical\n values and they span more than one quantile. The quantiles are choosen\n to have the same number of records each, but the same value cannot span\n multiple quantiles. Possible workarounds are:\n 1 - Decrease the number of quantiles\n 2 - Specify a custom quantiles range, e.g. [0, .50, .75, 1.] to get unequal\n number of records per quantile\n 3 - Use 'bins' option instead of 'quantiles', 'bins' chooses the\n buckets to be evenly spaced according to the values themselves, while\n 'quantiles' forces the buckets to have the same number of records.\n 4 - for factors with discrete values use the 'bins' option with custom\n ranges and create a range for each discrete value\n Please see utils.get_clean_factor_and_forward_returns documentation for\n full documentation of 'bins' and 'quantiles' options.\n\n\"\"\"\n\n def dec(*args, **kwargs):\n try:\n return func(*args, **kwargs)\n except ValueError as e:\n if 'Bin edges must be unique' in str(e):\n rethrow(e, message)\n raise\n return dec\n\n\n@non_unique_bin_edges_error\ndef quantize_factor(factor_data,\n quantiles=5,\n bins=None,\n by_group=False,\n no_raise=False,\n zero_aware=False):\n \"\"\"\n Computes period wise factor quantiles.\n\n Parameters\n ----------\n factor_data : pd.DataFrame - MultiIndex\n A MultiIndex DataFrame indexed by date (level 0) and asset (level 1),\n containing the values for a single alpha factor, forward returns for\n each period, the factor quantile/bin that factor value belongs to, and\n (optionally) the group the asset belongs to.\n\n - See full explanation in utils.get_clean_factor_and_forward_returns\n\n quantiles : int or sequence[float]\n Number of equal-sized quantile buckets to use in factor bucketing.\n Alternately sequence of quantiles, allowing non-equal-sized buckets\n e.g. [0, .10, .5, .90, 1.] or [.05, .5, .95]\n Only one of 'quantiles' or 'bins' can be not-None\n bins : int or sequence[float]\n Number of equal-width (valuewise) bins to use in factor bucketing.\n Alternately sequence of bin edges allowing for non-uniform bin width\n e.g. [-4, -2, -0.5, 0, 10]\n Only one of 'quantiles' or 'bins' can be not-None\n by_group : bool, optional\n If True, compute quantile buckets separately for each group.\n no_raise: bool, optional\n If True, no exceptions are thrown and the values for which the\n exception would have been thrown are set to np.NaN\n zero_aware : bool, optional\n If True, compute quantile buckets separately for positive and negative\n signal values. This is useful if your signal is centered and zero is\n the separation between long and short signals, respectively.\n\n Returns\n -------\n factor_quantile : pd.Series\n Factor quantiles indexed by date and asset.\n \"\"\"\n if not ((quantiles is not None and bins is None) or\n (quantiles is None and bins is not None)):\n raise ValueError('Either quantiles or bins should be provided')\n\n if zero_aware and not (isinstance(quantiles, int)\n or isinstance(bins, int)):\n msg = (\"zero_aware should only be True when quantiles or bins is an\"\n \" integer\")\n raise ValueError(msg)\n\n def quantile_calc(x, _quantiles, _bins, _zero_aware, _no_raise):\n try:\n if _quantiles is not None and _bins is None and not _zero_aware:\n return pd.qcut(x, _quantiles, labels=False) + 1\n elif _quantiles is not None and _bins is None and _zero_aware:\n pos_quantiles = pd.qcut(x[x >= 0], _quantiles // 2,\n labels=False) + _quantiles // 2 + 1\n neg_quantiles = pd.qcut(x[x < 0], _quantiles // 2,\n labels=False) + 1\n return pd.concat([pos_quantiles, neg_quantiles]).sort_index()\n elif _bins is not None and _quantiles is None and not _zero_aware:\n return pd.cut(x, _bins, labels=False) + 1\n elif _bins is not None and _quantiles is None and _zero_aware:\n pos_bins = pd.cut(x[x >= 0], _bins // 2,\n labels=False) + _bins // 2 + 1\n neg_bins = pd.cut(x[x < 0], _bins // 2,\n labels=False) + 1\n return pd.concat([pos_bins, neg_bins]).sort_index()\n except Exception as e:\n if _no_raise:\n return pd.Series(index=x.index)\n raise e\n\n grouper = [factor_data.index.get_level_values('date')]\n if by_group:\n grouper.append('group')\n\n factor_quantile = factor_data.groupby(grouper)['factor'] \\\n .apply(quantile_calc, quantiles, bins, zero_aware, no_raise)\n factor_quantile.name = 'factor_quantile'\n\n return factor_quantile.dropna()\n\n\ndef infer_trading_calendar(factor_idx, prices_idx):\n \"\"\"\n Infer the trading calendar from factor and price information.\n\n Parameters\n ----------\n factor_idx : pd.DatetimeIndex\n The factor datetimes for which we are computing the forward returns\n prices_idx : pd.DatetimeIndex\n The prices datetimes associated withthe factor data\n\n Returns\n -------\n calendar : pd.DateOffset\n \"\"\"\n full_idx = factor_idx.union(prices_idx)\n\n traded_weekdays = []\n holidays = []\n\n days_of_the_week = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']\n for day, day_str in enumerate(days_of_the_week):\n\n weekday_mask = (full_idx.dayofweek == day)\n\n # drop days of the week that are not traded at all\n if not weekday_mask.any():\n continue\n traded_weekdays.append(day_str)\n\n # look for holidays\n used_weekdays = full_idx[weekday_mask].normalize()\n all_weekdays = pd.date_range(full_idx.min(), full_idx.max(),\n freq=CustomBusinessDay(weekmask=day_str)\n ).normalize()\n _holidays = all_weekdays.difference(used_weekdays)\n _holidays = [timestamp.date() for timestamp in _holidays]\n holidays.extend(_holidays)\n\n traded_weekdays = ' '.join(traded_weekdays)\n return CustomBusinessDay(weekmask=traded_weekdays, holidays=holidays)\n\n\ndef compute_forward_returns(factor,\n prices,\n periods=(1, 5, 10),\n filter_zscore=None,\n cumulative_returns=True):\n \"\"\"\n Finds the N period forward returns (as percent change) for each asset\n provided.\n\n Parameters\n ----------\n factor : pd.Series - MultiIndex\n A MultiIndex Series indexed by timestamp (level 0) and asset\n (level 1), containing the values for a single alpha factor.\n\n - See full explanation in utils.get_clean_factor_and_forward_returns\n\n prices : pd.DataFrame\n Pricing data to use in forward price calculation.\n Assets as columns, dates as index. Pricing data must\n span the factor analysis time period plus an additional buffer window\n that is greater than the maximum number of expected periods\n in the forward returns calculations.\n periods : sequence[int]\n periods to compute forward returns on.\n filter_zscore : int or float, optional\n Sets forward returns greater than X standard deviations\n from the the mean to nan. Set it to 'None' to avoid filtering.\n Caution: this outlier filtering incorporates lookahead bias.\n cumulative_returns : bool, optional\n If True, forward returns columns will contain cumulative returns.\n Setting this to False is useful if you want to analyze how predictive\n a factor is for a single forward day.\n\n Returns\n -------\n forward_returns : pd.DataFrame - MultiIndex\n A MultiIndex DataFrame indexed by timestamp (level 0) and asset\n (level 1), containing the forward returns for assets.\n Forward returns column names follow the format accepted by\n pd.Timedelta (e.g. '1D', '30m', '3h15m', '1D1h', etc).\n 'date' index freq property (forward_returns.index.levels[0].freq)\n will be set to a trading calendar (pandas DateOffset) inferred\n from the input data (see infer_trading_calendar for more details).\n \"\"\"\n\n factor_dateindex = factor.index.levels[0]\n if factor_dateindex.tz != prices.index.tz:\n raise NonMatchingTimezoneError(\"The timezone of 'factor' is not the \"\n \"same as the timezone of 'prices'. See \"\n \"the pandas methods tz_localize and \"\n \"tz_convert.\")\n\n freq = infer_trading_calendar(factor_dateindex, prices.index)\n\n factor_dateindex = factor_dateindex.intersection(prices.index)\n\n if len(factor_dateindex) == 0:\n raise ValueError(\"Factor and prices indices don't match: make sure \"\n \"they have the same convention in terms of datetimes \"\n \"and symbol-names\")\n\n # chop prices down to only the assets we care about (= unique assets in\n # `factor`). we could modify `prices` in place, but that might confuse\n # the caller.\n prices = prices.filter(items=factor.index.levels[1])\n\n raw_values_dict = {}\n column_list = []\n\n for period in sorted(periods):\n if cumulative_returns:\n returns = prices.pct_change(period)\n else:\n returns = prices.pct_change()\n\n forward_returns = \\\n returns.shift(-period).reindex(factor_dateindex)\n\n if filter_zscore is not None:\n mask = abs(\n forward_returns - forward_returns.mean()\n ) > (filter_zscore * forward_returns.std())\n forward_returns[mask] = np.nan\n\n #\n # Find the period length, which will be the column name. We'll test\n # several entries in order to find out the most likely period length\n # (in case the user passed inconsinstent data)\n #\n days_diffs = []\n for i in range(30):\n if i >= len(forward_returns.index):\n break\n p_idx = prices.index.get_loc(forward_returns.index[i])\n if p_idx is None or p_idx < 0 or (\n p_idx + period) >= len(prices.index):\n continue\n start = prices.index[p_idx]\n end = prices.index[p_idx + period]\n period_len = diff_custom_calendar_timedeltas(start, end, freq)\n days_diffs.append(period_len.components.days)\n\n delta_days = period_len.components.days - mode(days_diffs).mode[0]\n period_len -= pd.Timedelta(days=delta_days)\n label = timedelta_to_string(period_len)\n\n column_list.append(label)\n\n raw_values_dict[label] = np.concatenate(forward_returns.values)\n\n df = pd.DataFrame.from_dict(raw_values_dict)\n df.set_index(\n pd.MultiIndex.from_product(\n [factor_dateindex, prices.columns],\n names=['date', 'asset']\n ),\n inplace=True\n )\n df = df.reindex(factor.index)\n\n # now set the columns correctly\n df = df[column_list]\n\n df.index.levels[0].freq = freq\n df.index.set_names(['date', 'asset'], inplace=True)\n\n return df\n\n\ndef backshift_returns_series(series, N):\n \"\"\"Shift a multi-indexed series backwards by N observations in\n the first level.\n\n This can be used to convert backward-looking returns into a\n forward-returns series.\n \"\"\"\n ix = series.index\n dates, sids = ix.levels\n date_labels, sid_labels = map(np.array, ix.labels)\n\n # Output date labels will contain the all but the last N dates.\n new_dates = dates[:-N]\n\n # Output data will remove the first M rows, where M is the index of the\n # last record with one of the first N dates.\n cutoff = date_labels.searchsorted(N)\n new_date_labels = date_labels[cutoff:] - N\n new_sid_labels = sid_labels[cutoff:]\n new_values = series.values[cutoff:]\n\n assert new_date_labels[0] == 0\n\n new_index = pd.MultiIndex(\n levels=[new_dates, sids],\n labels=[new_date_labels, new_sid_labels],\n sortorder=1,\n names=ix.names,\n )\n\n return pd.Series(data=new_values, index=new_index)\n\n\ndef demean_forward_returns(factor_data, grouper=None):\n \"\"\"\n Convert forward returns to returns relative to mean\n period wise all-universe or group returns.\n group-wise normalization incorporates the assumption of a\n group neutral portfolio constraint and thus allows allows the\n factor to be evaluated across groups.\n\n For example, if AAPL 5 period return is 0.1% and mean 5 period\n return for the Technology stocks in our universe was 0.5% in the\n same period, the group adjusted 5 period return for AAPL in this\n period is -0.4%.\n\n Parameters\n ----------\n factor_data : pd.DataFrame - MultiIndex\n Forward returns indexed by date and asset.\n Separate column for each forward return window.\n grouper : list\n If True, demean according to group.\n\n Returns\n -------\n adjusted_forward_returns : pd.DataFrame - MultiIndex\n DataFrame of the same format as the input, but with each\n security's returns normalized by group.\n \"\"\"\n\n factor_data = factor_data.copy()\n # from IPython.display import display\n # display(factor_data)\n if not grouper:\n grouper = factor_data.index.get_level_values('date')\n\n cols = get_forward_returns_columns(factor_data.columns)\n\n # factor_data[cols] = factor_data.groupby(grouper)[cols] \\\n # .transform(lambda x: x - x.mean())\n factor_data[cols] = factor_data.groupby(\n grouper, as_index=False\n )[cols.append(pd.Index(['weights']))].apply(\n lambda x: x[cols].subtract(\n np.average(x[cols], axis=0,\n weights=x['weights'].fillna(0.0).values),\n axis=1\n )\n )\n\n return factor_data\n\n\ndef print_table(table, name=None, fmt=None):\n \"\"\"\n Pretty print a pandas DataFrame.\n\n Uses HTML output if running inside Jupyter Notebook, otherwise\n formatted text output.\n\n Parameters\n ----------\n table : pd.Series or pd.DataFrame\n Table to pretty-print.\n name : str, optional\n Table name to display in upper left corner.\n fmt : str, optional\n Formatter to use for displaying table elements.\n E.g. '{0:.2f}%' for displaying 100 as '100.00%'.\n Restores original setting after displaying.\n \"\"\"\n if isinstance(table, pd.Series):\n table = pd.DataFrame(table)\n\n if isinstance(table, pd.DataFrame):\n table.columns.name = name\n\n prev_option = pd.get_option('display.float_format')\n if fmt is not None:\n pd.set_option('display.float_format', lambda x: fmt.format(x))\n\n display(table)\n\n if fmt is not None:\n pd.set_option('display.float_format', prev_option)\n\n\ndef get_clean_factor(factor,\n forward_returns,\n groupby=None,\n binning_by_group=False,\n quantiles=5,\n bins=None,\n groupby_labels=None,\n max_loss=0.35,\n zero_aware=False):\n \"\"\"\n Formats the factor data, forward return data, and group mappings into a\n DataFrame that contains aligned MultiIndex indices of timestamp and asset.\n The returned data will be formatted to be suitable for Alphalens functions.\n\n It is safe to skip a call to this function and still make use of Alphalens\n functionalities as long as the factor data conforms to the format returned\n from get_clean_factor_and_forward_returns and documented here\n\n Parameters\n ----------\n factor : pd.Series - MultiIndex\n A MultiIndex Series indexed by timestamp (level 0) and asset\n (level 1), containing the values for a single alpha factor.\n ::\n -----------------------------------\n date | asset |\n -----------------------------------\n | AAPL | 0.5\n -----------------------\n | BA | -1.1\n -----------------------\n 2014-01-01 | CMG | 1.7\n -----------------------\n | DAL | -0.1\n -----------------------\n | LULU | 2.7\n -----------------------\n\n forward_returns : pd.DataFrame - MultiIndex\n A MultiIndex DataFrame indexed by timestamp (level 0) and asset\n (level 1), containing the forward returns for assets.\n Forward returns column names must follow the format accepted by\n pd.Timedelta (e.g. '1D', '30m', '3h15m', '1D1h', etc).\n 'date' index freq property must be set to a trading calendar\n (pandas DateOffset), see infer_trading_calendar for more details.\n This information is currently used only in cumulative returns\n computation\n ::\n ---------------------------------------\n | | 1D | 5D | 10D\n ---------------------------------------\n date | asset | | |\n ---------------------------------------\n | AAPL | 0.09|-0.01|-0.079\n ----------------------------\n | BA | 0.02| 0.06| 0.020\n ----------------------------\n 2014-01-01 | CMG | 0.03| 0.09| 0.036\n ----------------------------\n | DAL |-0.02|-0.06|-0.029\n ----------------------------\n | LULU |-0.03| 0.05|-0.009\n ----------------------------\n\n groupby : pd.Series - MultiIndex or dict\n Either A MultiIndex Series indexed by date and asset,\n containing the period wise group codes for each asset, or\n a dict of asset to group mappings. If a dict is passed,\n it is assumed that group mappings are unchanged for the\n entire time period of the passed factor data.\n binning_by_group : bool\n If True, compute quantile buckets separately for each group.\n This is useful when the factor values range vary considerably\n across gorups so that it is wise to make the binning group relative.\n You should probably enable this if the factor is intended\n to be analyzed for a group neutral portfolio\n quantiles : int or sequence[float]\n Number of equal-sized quantile buckets to use in factor bucketing.\n Alternately sequence of quantiles, allowing non-equal-sized buckets\n e.g. [0, .10, .5, .90, 1.] or [.05, .5, .95]\n Only one of 'quantiles' or 'bins' can be not-None\n bins : int or sequence[float]\n Number of equal-width (valuewise) bins to use in factor bucketing.\n Alternately sequence of bin edges allowing for non-uniform bin width\n e.g. [-4, -2, -0.5, 0, 10]\n Chooses the buckets to be evenly spaced according to the values\n themselves. Useful when the factor contains discrete values.\n Only one of 'quantiles' or 'bins' can be not-None\n groupby_labels : dict\n A dictionary keyed by group code with values corresponding\n to the display name for each group.\n max_loss : float, optional\n Maximum percentage (0.00 to 1.00) of factor data dropping allowed,\n computed comparing the number of items in the input factor index and\n the number of items in the output DataFrame index.\n Factor data can be partially dropped due to being flawed itself\n (e.g. NaNs), not having provided enough price data to compute\n forward returns for all factor values, or because it is not possible\n to perform binning.\n Set max_loss=0 to avoid Exceptions suppression.\n zero_aware : bool, optional\n If True, compute quantile buckets separately for positive and negative\n signal values. This is useful if your signal is centered and zero is\n the separation between long and short signals, respectively.\n 'quantiles' is None.\n\n Returns\n -------\n merged_data : pd.DataFrame - MultiIndex\n A MultiIndex Series indexed by date (level 0) and asset (level 1),\n containing the values for a single alpha factor, forward returns for\n each period, the factor quantile/bin that factor value belongs to, and\n (optionally) the group the asset belongs to.\n\n - forward returns column names follow the format accepted by\n pd.Timedelta (e.g. '1D', '30m', '3h15m', '1D1h', etc)\n\n - 'date' index freq property (merged_data.index.levels[0].freq) is the\n same as that of the input forward returns data. This is currently\n used only in cumulative returns computation\n ::\n -------------------------------------------------------------------\n | | 1D | 5D | 10D |factor|group|factor_quantile\n -------------------------------------------------------------------\n date | asset | | | | | |\n -------------------------------------------------------------------\n | AAPL | 0.09|-0.01|-0.079| 0.5 | G1 | 3\n --------------------------------------------------------\n | BA | 0.02| 0.06| 0.020| -1.1 | G2 | 5\n --------------------------------------------------------\n 2014-01-01 | CMG | 0.03| 0.09| 0.036| 1.7 | G2 | 1\n --------------------------------------------------------\n | DAL |-0.02|-0.06|-0.029| -0.1 | G3 | 5\n --------------------------------------------------------\n | LULU |-0.03| 0.05|-0.009| 2.7 | G1 | 2\n --------------------------------------------------------\n \"\"\"\n\n initial_amount = float(len(factor.index))\n\n factor_copy = factor.copy()\n factor_copy.index = factor_copy.index.rename(['date', 'asset'])\n factor_copy = factor_copy[np.isfinite(factor_copy)]\n\n merged_data = forward_returns.copy()\n merged_data['factor'] = factor_copy\n\n if groupby is not None:\n if isinstance(groupby, dict):\n diff = set(factor_copy.index.get_level_values(\n 'asset')) - set(groupby.keys())\n if len(diff) > 0:\n raise KeyError(\n \"Assets {} not in group mapping\".format(\n list(diff)))\n\n ss = pd.Series(groupby)\n groupby = pd.Series(index=factor_copy.index,\n data=ss[factor_copy.index.get_level_values(\n 'asset')].values)\n\n if groupby_labels is not None:\n diff = set(groupby.values) - set(groupby_labels.keys())\n if len(diff) > 0:\n raise KeyError(\n \"groups {} not in passed group names\".format(\n list(diff)))\n\n sn = pd.Series(groupby_labels)\n groupby = pd.Series(index=groupby.index,\n data=sn[groupby.values].values)\n\n merged_data['group'] = groupby.astype('category')\n\n merged_data = merged_data.dropna()\n\n fwdret_amount = float(len(merged_data.index))\n\n no_raise = False if max_loss == 0 else True\n quantile_data = quantize_factor(\n merged_data,\n quantiles,\n bins,\n binning_by_group,\n no_raise,\n zero_aware\n )\n\n merged_data['factor_quantile'] = quantile_data\n\n merged_data = merged_data.dropna()\n\n binning_amount = float(len(merged_data.index))\n\n tot_loss = (initial_amount - binning_amount) / initial_amount\n fwdret_loss = (initial_amount - fwdret_amount) / initial_amount\n bin_loss = tot_loss - fwdret_loss\n\n print(\"Dropped %.1f%% entries from factor data: %.1f%% in forward \"\n \"returns computation and %.1f%% in binning phase \"\n \"(set max_loss=0 to see potentially suppressed Exceptions).\" %\n (tot_loss * 100, fwdret_loss * 100, bin_loss * 100))\n\n if tot_loss > max_loss:\n message = (\"max_loss (%.1f%%) exceeded %.1f%%, consider increasing it.\"\n % (max_loss * 100, tot_loss * 100))\n raise MaxLossExceededError(message)\n else:\n print(\"max_loss is %.1f%%, not exceeded: OK!\" % (max_loss * 100))\n\n return merged_data\n\n\ndef get_clean_factor_and_forward_returns(factor,\n prices,\n groupby=None,\n binning_by_group=False,\n quantiles=5,\n bins=None,\n periods=(1, 5, 10),\n filter_zscore=20,\n groupby_labels=None,\n max_loss=0.35,\n zero_aware=False,\n cumulative_returns=True):\n \"\"\"\n Formats the factor data, pricing data, and group mappings into a DataFrame\n that contains aligned MultiIndex indices of timestamp and asset. The\n returned data will be formatted to be suitable for Alphalens functions.\n\n It is safe to skip a call to this function and still make use of Alphalens\n functionalities as long as the factor data conforms to the format returned\n from get_clean_factor_and_forward_returns and documented here\n\n Parameters\n ----------\n factor : pd.Series - MultiIndex\n A MultiIndex Series indexed by timestamp (level 0) and asset\n (level 1), containing the values for a single alpha factor.\n ::\n -----------------------------------\n date | asset |\n -----------------------------------\n | AAPL | 0.5\n -----------------------\n | BA | -1.1\n -----------------------\n 2014-01-01 | CMG | 1.7\n -----------------------\n | DAL | -0.1\n -----------------------\n | LULU | 2.7\n -----------------------\n\n prices : pd.DataFrame\n A wide form Pandas DataFrame indexed by timestamp with assets\n in the columns.\n Pricing data must span the factor analysis time period plus an\n additional buffer window that is greater than the maximum number\n of expected periods in the forward returns calculations.\n It is important to pass the correct pricing data in depending on\n what time of period your signal was generated so to avoid lookahead\n bias, or delayed calculations.\n 'Prices' must contain at least an entry for each timestamp/asset\n combination in 'factor'. This entry should reflect the buy price\n for the assets and usually it is the next available price after the\n factor is computed but it can also be a later price if the factor is\n meant to be traded later (e.g. if the factor is computed at market\n open but traded 1 hour after market open the price information should\n be 1 hour after market open).\n 'Prices' must also contain entries for timestamps following each\n timestamp/asset combination in 'factor', as many more timestamps\n as the maximum value in 'periods'. The asset price after 'period'\n timestamps will be considered the sell price for that asset when\n computing 'period' forward returns.\n ::\n ----------------------------------------------------\n | AAPL | BA | CMG | DAL | LULU |\n ----------------------------------------------------\n Date | | | | | |\n ----------------------------------------------------\n 2014-01-01 |605.12| 24.58| 11.72| 54.43 | 37.14 |\n ----------------------------------------------------\n 2014-01-02 |604.35| 22.23| 12.21| 52.78 | 33.63 |\n ----------------------------------------------------\n 2014-01-03 |607.94| 21.68| 14.36| 53.94 | 29.37 |\n ----------------------------------------------------\n\n groupby : pd.Series - MultiIndex or dict\n Either A MultiIndex Series indexed by date and asset,\n containing the period wise group codes for each asset, or\n a dict of asset to group mappings. If a dict is passed,\n it is assumed that group mappings are unchanged for the\n entire time period of the passed factor data.\n binning_by_group : bool\n If True, compute quantile buckets separately for each group.\n This is useful when the factor values range vary considerably\n across gorups so that it is wise to make the binning group relative.\n You should probably enable this if the factor is intended\n to be analyzed for a group neutral portfolio\n quantiles : int or sequence[float]\n Number of equal-sized quantile buckets to use in factor bucketing.\n Alternately sequence of quantiles, allowing non-equal-sized buckets\n e.g. [0, .10, .5, .90, 1.] or [.05, .5, .95]\n Only one of 'quantiles' or 'bins' can be not-None\n bins : int or sequence[float]\n Number of equal-width (valuewise) bins to use in factor bucketing.\n Alternately sequence of bin edges allowing for non-uniform bin width\n e.g. [-4, -2, -0.5, 0, 10]\n Chooses the buckets to be evenly spaced according to the values\n themselves. Useful when the factor contains discrete values.\n Only one of 'quantiles' or 'bins' can be not-None\n periods : sequence[int]\n periods to compute forward returns on.\n filter_zscore : int or float, optional\n Sets forward returns greater than X standard deviations\n from the the mean to nan. Set it to 'None' to avoid filtering.\n Caution: this outlier filtering incorporates lookahead bias.\n groupby_labels : dict\n A dictionary keyed by group code with values corresponding\n to the display name for each group.\n max_loss : float, optional\n Maximum percentage (0.00 to 1.00) of factor data dropping allowed,\n computed comparing the number of items in the input factor index and\n the number of items in the output DataFrame index.\n Factor data can be partially dropped due to being flawed itself\n (e.g. NaNs), not having provided enough price data to compute\n forward returns for all factor values, or because it is not possible\n to perform binning.\n Set max_loss=0 to avoid Exceptions suppression.\n zero_aware : bool, optional\n If True, compute quantile buckets separately for positive and negative\n signal values. This is useful if your signal is centered and zero is\n the separation between long and short signals, respectively.\n cumulative_returns : bool, optional\n If True, forward returns columns will contain cumulative returns.\n Setting this to False is useful if you want to analyze how predictive\n a factor is for a single forward day.\n\n Returns\n -------\n merged_data : pd.DataFrame - MultiIndex\n A MultiIndex Series indexed by date (level 0) and asset (level 1),\n containing the values for a single alpha factor, forward returns for\n each period, the factor quantile/bin that factor value belongs to, and\n (optionally) the group the asset belongs to.\n - forward returns column names follow the format accepted by\n pd.Timedelta (e.g. '1D', '30m', '3h15m', '1D1h', etc)\n - 'date' index freq property (merged_data.index.levels[0].freq) will be\n set to a trading calendar (pandas DateOffset) inferred from the input\n data (see infer_trading_calendar for more details). This is currently\n used only in cumulative returns computation\n ::\n -------------------------------------------------------------------\n | | 1D | 5D | 10D |factor|group|factor_quantile\n -------------------------------------------------------------------\n date | asset | | | | | |\n -------------------------------------------------------------------\n | AAPL | 0.09|-0.01|-0.079| 0.5 | G1 | 3\n --------------------------------------------------------\n | BA | 0.02| 0.06| 0.020| -1.1 | G2 | 5\n --------------------------------------------------------\n 2014-01-01 | CMG | 0.03| 0.09| 0.036| 1.7 | G2 | 1\n --------------------------------------------------------\n | DAL |-0.02|-0.06|-0.029| -0.1 | G3 | 5\n --------------------------------------------------------\n | LULU |-0.03| 0.05|-0.009| 2.7 | G1 | 2\n --------------------------------------------------------\n\n See Also\n --------\n utils.get_clean_factor\n For use when forward returns are already available.\n \"\"\"\n forward_returns = compute_forward_returns(\n factor,\n prices,\n periods,\n filter_zscore,\n cumulative_returns,\n )\n\n factor_data = get_clean_factor(factor, forward_returns, groupby=groupby,\n groupby_labels=groupby_labels,\n quantiles=quantiles, bins=bins,\n binning_by_group=binning_by_group,\n max_loss=max_loss, zero_aware=zero_aware)\n\n return factor_data\n\ndef rate_of_returns(ret, period):\n return ((np.nansum(ret) + 1)**(1. / period)) - 1\n\ndef rate_of_return(period_ret, base_period):\n \"\"\"\n Convert returns to 'one_period_len' rate of returns: that is the value the\n returns would have every 'one_period_len' if they had grown at a steady\n rate\n\n Parameters\n ----------\n period_ret: pd.DataFrame\n DataFrame containing returns values with column headings representing\n the return period.\n base_period: string\n The base period length used in the conversion\n It must follow pandas.Timedelta constructor format (e.g. '1 days',\n '1D', '30m', '3h', '1D1h', etc)\n\n Returns\n -------\n pd.DataFrame\n DataFrame in same format as input but with 'one_period_len' rate of\n returns values.\n \"\"\"\n period_len = period_ret.name\n conversion_factor = (pd.Timedelta(base_period) /\n pd.Timedelta(period_len))\n return period_ret.add(1).pow(conversion_factor).sub(1)\n\n\ndef std_conversion(period_std, base_period):\n \"\"\"\n one_period_len standard deviation (or standard error) approximation\n\n Parameters\n ----------\n period_std: pd.DataFrame\n DataFrame containing standard deviation or standard error values\n with column headings representing the return period.\n base_period: string\n The base period length used in the conversion\n It must follow pandas.Timedelta constructor format (e.g. '1 days',\n '1D', '30m', '3h', '1D1h', etc)\n\n Returns\n -------\n pd.DataFrame\n DataFrame in same format as input but with one-period\n standard deviation/error values.\n \"\"\"\n period_len = period_std.name\n conversion_factor = (pd.Timedelta(period_len) /\n pd.Timedelta(base_period))\n return period_std / np.sqrt(conversion_factor)\n\n\ndef get_forward_returns_columns(columns):\n \"\"\"\n 返回远期收益的序列\n \"\"\"\n pattern = re.compile(r\"^(return\\(.+\\))$\", re.IGNORECASE)\n valid_columns = [(pattern.match(col) is not None) for col in columns]\n return columns[valid_columns]\n\n\ndef timedelta_to_string(timedelta):\n \"\"\"\n Utility that converts a pandas.Timedelta to a string representation\n compatible with pandas.Timedelta constructor format\n\n Parameters\n ----------\n timedelta: pd.Timedelta\n\n Returns\n -------\n string\n string representation of 'timedelta'\n \"\"\"\n c = timedelta.components\n format = ''\n if c.days != 0:\n format += '%dD' % c.days\n if c.hours > 0:\n format += '%dh' % c.hours\n if c.minutes > 0:\n format += '%dm' % c.minutes\n if c.seconds > 0:\n format += '%ds' % c.seconds\n if c.milliseconds > 0:\n format += '%dms' % c.milliseconds\n if c.microseconds > 0:\n format += '%dus' % c.microseconds\n if c.nanoseconds > 0:\n format += '%dns' % c.nanoseconds\n return format\n\n\ndef timedelta_strings_to_integers(sequence):\n \"\"\"\n Converts pandas string representations of timedeltas into integers of days.\n\n Parameters\n ----------\n sequence : iterable\n List or array of timedelta string representations, e.g. ['1D', '5D'].\n\n Returns\n -------\n sequence : list\n Integer days corresponding to the input sequence, e.g. [1, 5].\n \"\"\"\n return list(map(lambda x: pd.Timedelta(x).days, sequence))\n\n\ndef add_custom_calendar_timedelta(input, timedelta, freq):\n \"\"\"\n Add timedelta to 'input' taking into consideration custom frequency, which\n is used to deal with custom calendars, such as a trading calendar\n\n Parameters\n ----------\n input : pd.DatetimeIndex or pd.Timestamp\n timedelta : pd.Timedelta\n freq : pd.DataOffset (CustomBusinessDay, Day or BusinessDay)\n\n Returns\n -------\n pd.DatetimeIndex or pd.Timestamp\n input + timedelta\n \"\"\"\n if not isinstance(freq, (Day, BusinessDay, CustomBusinessDay)):\n raise ValueError(\"freq must be Day, BDay or CustomBusinessDay\")\n days = timedelta.components.days\n offset = timedelta - pd.Timedelta(days=days)\n return input + freq * days + offset\n\n\ndef diff_custom_calendar_timedeltas(start, end, freq):\n \"\"\"\n Compute the difference between two pd.Timedelta taking into consideration\n custom frequency, which is used to deal with custom calendars, such as a\n trading calendar\n\n Parameters\n ----------\n start : pd.Timestamp\n end : pd.Timestamp\n freq : CustomBusinessDay (see infer_trading_calendar)\n freq : pd.DataOffset (CustomBusinessDay, Day or BDay)\n\n Returns\n -------\n pd.Timedelta\n end - start\n \"\"\"\n if not isinstance(freq, (Day, BusinessDay, CustomBusinessDay)):\n raise ValueError(\"freq must be Day, BusinessDay or CustomBusinessDay\")\n\n weekmask = getattr(freq, 'weekmask', None)\n holidays = getattr(freq, 'holidays', None)\n\n if weekmask is None and holidays is None:\n if isinstance(freq, Day):\n weekmask = 'Mon Tue Wed Thu Fri Sat Sun'\n holidays = []\n elif isinstance(freq, BusinessDay):\n weekmask = 'Mon Tue Wed Thu Fri'\n holidays = []\n\n if weekmask is not None and holidays is not None:\n # we prefer this method as it is faster\n actual_days = np.busday_count(np.array(start).astype('datetime64[D]'),\n np.array(end).astype('datetime64[D]'),\n weekmask, holidays)\n else:\n # default, it is slow\n actual_days = pd.date_range(start, end, freq=freq).shape[0] - 1\n if not freq.onOffset(start):\n actual_days -= 1\n\n timediff = end - start\n delta_days = timediff.components.days - actual_days\n return timediff - pd.Timedelta(days=delta_days)\n\n\ndef convert_to_forward_returns_columns(period):\n try:\n return 'period_{:d}'.format(period)\n except ValueError:\n return period\n\n\ndef ignore_warning(message='', category=Warning, module='', lineno=0, append=False):\n \"\"\"过滤 warnings\"\"\"\n def decorator(func):\n @wraps(func)\n def func_wrapper(*args, **kwargs):\n with warnings.catch_warnings():\n warnings.filterwarnings('ignore', message=message, category=category,\n module=module, lineno=lineno, append=append)\n return func(*args, **kwargs)\n return func_wrapper\n\n return decorator\n\n\ndef ensure_tuple(x):\n if isinstance(x, six.string_types) or not isinstance(x, Iterable):\n return (x,)\n else:\n return tuple(x)\n\n\ndef Indicators(value):\n\n columns = ['总收益', '年化收益', '波动率', '夏普比', '最大回撤', '卡玛比率', '日胜率', '盈亏比']\n\n def MaxBack(value):\n\n drawback = []\n\n for i, v in enumerate(value):\n\n drawback.append(max(1-value[i:]/v))\n\n MaxBack = max(drawback)\n\n return MaxBack\n\n value = [i/value[0] for i in value]\n\n AllRtn = round(value[-1]*100-100, 2)\n\n AulRtn = round(pow(value[-1], 250/len(value))*100-100, 2)\n\n value = pd.Series(value)\n Rtns = value.pct_change(1).dropna()\n\n Volity = round(sqrt(Rtns.var()*250)*100, 2)\n SpRatio = round((AulRtn-4)/Volity, 2)\n MaxBack = round(MaxBack(value)*100, 2)\n CmRatio = round(AulRtn/MaxBack, 2)\n\n R1 = [i for i in Rtns.values if i > 0]\n R2 = [i for i in Rtns.values if i < 0]\n\n WinRate = round(len(R1)/(len(R1)+len(R2))*100, 3)\n BidRatio = round(-mean(R1)/mean(R2), 3)\n\n data = [AllRtn, AulRtn, Volity, SpRatio,\n MaxBack, CmRatio, WinRate, BidRatio]\n result = pd.Series(index=columns, data=data)\n\n return result\n"
] |
[
[
"pandas.Series",
"numpy.sqrt",
"pandas.DataFrame",
"numpy.concatenate",
"numpy.mean",
"pandas.Index",
"numpy.nansum",
"pandas.cut",
"pandas.set_option",
"pandas.concat",
"pandas.MultiIndex",
"pandas.Timedelta",
"pandas.MultiIndex.from_product",
"pandas.date_range",
"pandas.DataFrame.from_dict",
"numpy.array",
"pandas.tseries.offsets.CustomBusinessDay",
"numpy.isfinite",
"scipy.stats.mode",
"pandas.get_option",
"pandas.qcut"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"0.16",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"1.3",
"1.8"
],
"tensorflow": []
}
] |
ExesiosPB/libm
|
[
"09c2638d895a4ba69e0d7f4f0e353f27d4b7911f",
"8cb05bd9704c77e700fd2462e032cefd9a3ef475"
] |
[
"tests/test_data_factory.py",
"tests/utils/ReferenceData.py"
] |
[
"import unittest\n\nimport pandas as pd\n\nfrom scripts import FilePaths\nfrom scripts import data_factory as factory\n\n\nclass TestDataFactory(unittest.TestCase):\n\n def setUp(self):\n self.__df = pd.read_pickle(FilePaths.us_patents_random_100_pickle_name)\n self.__df = self.__df.reset_index()\n\n def test_reads_xls(self):\n df = factory.get('tests/data/USPTO-random-100.xls')\n self.assertListEqual(list(self.__df['abstract']), list(df['abstract']))\n\n def test_reads_xlsx(self):\n df = factory.get('tests/data/USPTO-random-100.xlsx')\n self.assertListEqual(list(self.__df['abstract']), list(df['abstract']))\n\n @unittest.skip('Unicode char fails under windows; see task #172 bug')\n def test_reads_csv(self):\n df = factory.get('tests/data/USPTO-random-100.csv')\n self.assertListEqual(list(self.__df['abstract']), list(df['abstract']))\n\n def test_reads_pickles(self):\n df = factory.get('tests/data/USPTO-random-100.pkl.bz2')\n self.assertEquals(len(df['abstract']), 100)\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"import pandas as pd\n\ngenerated_date_year = 1900\n\ncold_abstracts = [\n ('1', '''A refrigerator including a main body provided with a refrigerating chamber at an upper\n section and with a freezing chamber at a lower section, an ice making tray disposed in an upper space \n of an ice making chamber ined in the refrigerating chamber, a first storage container disposed in a \n lower space of the ice making chamber to store ice falling down from the ice making tray, and a second \n storage container disposed in a freezing chamber to store ice transferred from the ice making tray. The \n main body includes a guide channel to guide, when the first storage container reaches an ice-full \n state, ice falling from the ice making tray to the second storage container in the freezing chamber. \n The size of the ice making chamber is greatly reduced while a sufficient amount of the ice may be \n stored, thus securing a larger available space in the refrigerating chamber.'''),\n ('2', '''The invention provides methods and compositions for maintaining a refrigerating chamber at a \n constant temperature. This will maintain a quantity of ice stored ready for consumption.'''),\n ('3', '''An ice piece release system that includes a chilled compartment set at a temperature below \n 0.degree. C., a warm section at a temperature above 0.degree. C., and a tray in thermal communication \n with the chilled compartment. The tray includes a plurality of ice piece-forming receptacles and a \n cavity in thermal communication with the receptacles. The ice piece release system also includes a \n primary reservoir assembly in thermal communication with the warm section and fluid communication with \n the cavity of the tray. The ice piece release system further includes a heat-exchanging fluid having a \n freezing point below that of water, and the fluid resides in the primary reservoir assembly and the\n cavity of the tray. The primary reservoir assembly is further adapted to move at least a portion of the\n heat-exchanging fluid in the reservoir assembly into the cavity.'''),\n ('4', '''A refrigerator, in particular a household refrigerator, includes an utility chamber for cooled \n goods and a control device, with which a cold air flow can be introduced into the utility chamber when\n a cooling signal is present. A defrost heating element is rendered operative by the control device to \n prevent the formation of condensate and/or ice due to the cold air flow fed into the utility chamber. \n A timing element keeps the heating element out of operation for a predetermined time interval in \n response to the generation of the cooling signal.''')\n]\n\nrandom_abstracts = [\n ('101', '''Acoustic volume indicators for determining liquid or gas volume within a container comprise a \n contactor to vibrate a container wall, a detector to receive vibration data from the container wall, \n a processor to convert vibration data to frequency information and compare the frequency information to \n characteristic container frequency vs. volume data to obtain the measured volume, and an indicator for \n displaying the measured volume. The processor may comprise a microprocessor disposed within a housing \n having lights that each represent a particular volume. The microprocessor is calibrated to provide an \n output signal to a light that indicates the container volume. The processor may comprise a computer and \n computer program that converts the data to frequency information, analyzes the frequency information to \n identify a peak frequency, compares the peak frequency to the characteristic frequency vs. volume data \n to determine the measured volume, and displays the measured volume on a video monitor. '''),\n ('102', '''A single-module deployable bolted flange connection apparatus ( 10 ) makes up standard flange \n joints ( 24, 32 ) for various pipeline tie-in situations, such as spool piece connection and \n flowline-tree connections, without the use of divers and auxiliary multiple pieces of equipment. An \n outer Flange Alignment Frame (FAF) ( 14 ), carries one or more claws ( 38 ) for grabbing the pipe/spool\n to provide flange alignment. The claws are suspended and driven by a novel arrangement of five \n hydraulic rams ( 412 - 420 ) A crash-resistant inner frame ( 148 ) houses complete connection tooling \n ( 150, 152 etc.) The tooling performs the final alignment steps, inserts the gasket and studs, applies \n the required tension, and connects the nuts. Studs and nuts are stored separately from the tooling in\n an indexed carou, to permit multiple operations, reverse operations (disconnection), and re-work of \n failed steps, all without external intervention. '''),\n ('103', '''A passenger seat with increased knee space for an aft-seated passenger, including a seat base for \n being attached to a supporting deck and at least one seat frame including a seat back and seat bottom\n carried by the seat base. At least one arm rest assembly is carried by the seat frame and including an\n arm rest mounted for pivotal movement about a pivot member between a use position with an upper support\n surface in a horizontal position for supporting a forearm of a passenger seated in the seat, and a \n stowed position wherein the upper support surface of the arm rest is perpendicular to the use position. \n The arm rest pivot member is mounted on the seat frame at a point forward of a plane ined by the seat \n back carried by the seat and above a point ined by the seat bottom for allowing the knee of an \n aft-seated passenger to occupy space behind the pivot member of the arm rest. '''),\n ('104', '''A bag having two accesses, an inlet and an outlet, both containing closing devices for releasing \n or stopping the flow of a liquid that flows into or out of the bag. The inlet has a filtering element \n for retaining particles possibly produced by the coring phenomenon which can occur when the spike of \n the inlet ruptures the plug of the bottle. Also provided is a safety device used for permanently \n attaching the bottle to the inlet. '''),\n ('105', '''An x-ray tube assembly is provided comprising a tube casing assembly including a plurality of \n vertical mount posts. An insulator plate is mounted to the plurality of vertical mount posts such that \n the insulator plate can translate vertically on the posts. A cathode assembly is mounted to the \n insulator plate and generates both an eccentric moment and a vertical expansion in response to a \n cathode power load. A semi-compressible element is positioned between at least one of the vertical \n mount posts and the insulator plate. The semi-compressible element becomes incompressible at a cathode \n power threshold such that the vertical expansion is translated into a correction moment countering the \n eccentric moment. ''')\n]\n\n\ndef patents_to_df(_patents, _generated_date_year):\n frames = []\n for patent in _patents:\n df = pd.DataFrame(\n {'patent_id': patent[0],\n 'application_id': \"blah\",\n 'related_document_ids': [[1, 2]],\n 'abstract': patent[1],\n 'inventor_names': [[3, 4]],\n 'inventor_countries': [[5, 6]],\n 'inventor_cities': [[7]],\n 'invention_title': \"blah\",\n 'claim1': \"blah\",\n 'classifications_cpc': [[8]],\n 'applicant_cities': [[9]],\n 'applicant_countries': [[12]],\n 'applicant_organisation': [[14]],\n 'application_date': \"blah\",\n 'publication_date': pd.Timestamp(f'{_generated_date_year}-11-04 00:00:00'),\n 'patents_cited': [[16]]\n })\n _generated_date_year += 1\n frames.append(df)\n\n combined_df = pd.concat(frames)\n combined_df.set_index('patent_id', inplace=True, drop=False, verify_integrity=True)\n return combined_df\n\n\ncold_df = patents_to_df(cold_abstracts, generated_date_year)\nrandom_df = patents_to_df(random_abstracts, generated_date_year)\n"
] |
[
[
"pandas.read_pickle"
],
[
"pandas.concat",
"pandas.Timestamp"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Zims/scraping-flask-sample
|
[
"083a4cf142d26bd40c807b718dcbabae2efd3cb2"
] |
[
"scrapers/city24_scraper.py"
] |
[
"from bs4 import BeautifulSoup\nimport requests\nimport pandas as pd\nimport time\nfrom datetime import datetime, timezone, timedelta\nimport pytz\n\ntz=pytz.timezone(\"Europe/Riga\")\ntime_now = datetime.now(tz)\nformat = \"%Y-%m-%d-%T\"\ntime_now = time_now.strftime(format)\ntime_now = time_now\n\ndef refresh_time_24():\n tz=pytz.timezone(\"Europe/Riga\")\n time_now = datetime.now(tz)\n format = \"%Y-%m-%d-%T\"\n time_now = time_now.strftime(format)\n return time_now\n\n\ndef parse_city24_scraper():\n def parse_page_city24(page=0):\n for row in rows:\n d = {}\n try:\n d[\"address\"] = row.find(\"a\", {\"class\": \"addressLink\"}).find(\"span\").text.split(\",\")[0]\n except:\n d[\"address\"] = None\n\n try:\n d[\"istabas\"] = int(row.find(\"div\", {\"class\": \"column\"}).find(\"ol\").find_all(\"li\")[1].find(\"strong\").text)\n except:\n d[\"istabas\"] = None\n\n try:\n d[\"platiba\"] = float(row.find(\"div\", {\"class\": \"column\"}).find(\"ol\").find_all(\"li\")[0].find(\"strong\").text.split(\" \")[0])\n except:\n d[\"platiba\"] = None\n\n try:\n d[\"stavs\"] = row.find(\"div\", {\"class\": \"column\"}).find(\"ol\").find_all(\"li\")[2].find(\"strong\").text\n except:\n d[\"stavs\"] = None\n\n\n try:\n d[\"price_m2\"] = float(row.find(\"div\", {\"class\": \"price_sqrm\"}).text.replace(\" \", \"\").replace(\"EUR/m²\", \"\").replace(\",\", \".\"))\n except:\n d[\"price_m2\"] = None\n\n try:\n d[\"price\"] = int(row.find(\"div\", {\"class\": \"price\"}).find(\"div\").text.replace(\" EUR\", \"\").replace(\" \", \"\").strip())\n except:\n d[\"price\"] = None\n\n try:\n d[\"links\"] = row.find(\"a\", href=True)[\"href\"]\n except:\n d[\"links\"] = None\n\n try:\n d[\"vieta\"] = row.find(\"a\", {\"class\": \"addressLink\"}).find(\"span\").text.split(\",\")[1]\n except:\n d[\"vieta\"] = None\n \n\n # try:\n # d[\"promo\"] = row.find_all(\"div\", {\"class\": \"column\"})[1].find(\"div\", {\"class\": \"promo\"}).find(\"span\").text\n # except:\n # d[\"promo\"] = None\n d_list.append(d)\n refresh_time_24()\n\n headers = {'User-agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:61.0) Gecko/20100101 Firefox/61.0'}\n\n d_list = []\n\n# TODO set range to (0, 9)\n for page in range(0, 1):\n url = f\"https://www.city24.lv/real-estate-search/apartments-for-sale/R%C4%ABga-%C4%80genskalns/id=25875-city/pg={page}\"\n print(f\"Processing page nr: {page} ...\")\n print(url)\n\n response = requests.get(url, headers=headers)\n content = response.text\n\n soup = BeautifulSoup(content, \"html.parser\")\n print(content)\n # write content to file\n with open(f\"city24_scraper_{page}.html\", \"w\") as f:\n f.write(content)\n \n # table = soup.find(\"div\", {\"id\": \"list-container\"})\n # rows = table.find_all(\"li\", {\"class\": \"new result regular\"})\n\n time.sleep(0.5)\n\n# TODO uncoment next line\n # parse_page_city24(page)\n\n\n # create file\n df = pd.DataFrame(d_list)\n # print(df)\n # real filtered file\n # import pandas as pd\n # Create a Pandas Excel writer using XlsxWriter as the engine.\n\n writer = pd.ExcelWriter(f\"output/{refresh_time_24()}_city24.xlsx\", engine='xlsxwriter')\n\n\n # Convert the dataframe to an XlsxWriter Excel object. We also turn off the\n # index column at the left of the output dataframe.\n df.to_excel(writer, sheet_name='Sludinajumi')\n# .dropna()\n # Get the xlsxwriter workbook and worksheet objects.\n workbook = writer.book\n worksheet = writer.sheets['Sludinajumi']\n\n # Get the dimensions of the dataframe.\n (max_row, max_col) = df.shape\n\n # Make the columns wider for clarity.\n worksheet.set_column(0, max_col - 1, 12)\n\n # Set the autofilter.\n worksheet.autofilter(0, 0, max_row, max_col)\n\n # Close the Pandas Excel writer and output the Excel file.\n writer.save()\n\n# print(\"Done!\")\n"
] |
[
[
"pandas.DataFrame"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
raijinspecial/imgclsmob
|
[
"c5d3ab207a6304f1343e4394f0467bdc7403a72a",
"c87c0942420876941868c016211073dec4392e4d",
"c5d3ab207a6304f1343e4394f0467bdc7403a72a",
"c5d3ab207a6304f1343e4394f0467bdc7403a72a",
"c5d3ab207a6304f1343e4394f0467bdc7403a72a",
"c5d3ab207a6304f1343e4394f0467bdc7403a72a",
"c5d3ab207a6304f1343e4394f0467bdc7403a72a",
"c5d3ab207a6304f1343e4394f0467bdc7403a72a",
"c5d3ab207a6304f1343e4394f0467bdc7403a72a"
] |
[
"tensorflow_/tensorflowcv/models/mnasnet.py",
"pytorch/pytorchcv/models/ibnbresnet.py",
"pytorch/pytorchcv/models/peleenet.py",
"gluon/gluoncv2/models/densenet_cifar.py",
"gluon/gluoncv2/models/resnet_cifar.py",
"train_ch_cifar.py",
"pytorch/pytorchcv/models/sharesnet.py",
"chainer_/chainercv2/models/fishnet.py",
"gluon/gluoncv2/models/xdensenet.py"
] |
[
"\"\"\"\n MnasNet, implemented in TensorFlow.\n Original paper: 'MnasNet: Platform-Aware Neural Architecture Search for Mobile,' https://arxiv.org/abs/1807.11626.\n\"\"\"\n\n__all__ = ['MnasNet', 'mnasnet']\n\nimport os\nimport tensorflow as tf\nfrom .common import conv2d, batchnorm, is_channels_first, flatten\n\n\ndef conv_block(x,\n in_channels,\n out_channels,\n kernel_size,\n strides,\n padding,\n groups,\n activate,\n training,\n data_format,\n name=\"conv_block\"):\n \"\"\"\n Standard convolution block with Batch normalization and ReLU activation.\n\n Parameters:\n ----------\n x : Tensor\n Input tensor.\n in_channels : int\n Number of input channels.\n out_channels : int\n Number of output channels.\n kernel_size : int or tuple/list of 2 int\n Convolution window size.\n strides : int or tuple/list of 2 int\n Strides of the convolution.\n padding : int or tuple/list of 2 int\n Padding value for convolution layer.\n groups : int\n Number of groups.\n activate : bool\n Whether activate the convolution block.\n training : bool, or a TensorFlow boolean scalar tensor\n Whether to return the output in training mode or in inference mode.\n data_format : str\n The ordering of the dimensions in tensors.\n name : str, default 'conv_block'\n Block name.\n\n Returns\n -------\n Tensor\n Resulted tensor.\n \"\"\"\n x = conv2d(\n x=x,\n in_channels=in_channels,\n out_channels=out_channels,\n kernel_size=kernel_size,\n strides=strides,\n padding=padding,\n groups=groups,\n use_bias=False,\n data_format=data_format,\n name=name + \"/conv\")\n x = batchnorm(\n x=x,\n training=training,\n data_format=data_format,\n name=name + \"/bn\")\n if activate:\n x = tf.nn.relu(x, name=name + \"/activ\")\n return x\n\n\ndef conv1x1_block(x,\n in_channels,\n out_channels,\n activate=True,\n training=False,\n data_format=\"channels_last\",\n name=\"conv1x1_block\"):\n \"\"\"\n 1x1 version of the standard convolution block.\n\n Parameters:\n ----------\n x : Tensor\n Input tensor.\n in_channels : int\n Number of input channels.\n out_channels : int\n Number of output channels.\n activate : bool, default True\n Whether activate the convolution block.\n training : bool, or a TensorFlow boolean scalar tensor, default False\n Whether to return the output in training mode or in inference mode.\n data_format : str, default 'channels_last'\n The ordering of the dimensions in tensors.\n name : str, default 'conv1x1_block'\n Block name.\n\n Returns\n -------\n Tensor\n Resulted tensor.\n \"\"\"\n return conv_block(\n x=x,\n in_channels=in_channels,\n out_channels=out_channels,\n kernel_size=1,\n strides=1,\n padding=0,\n groups=1,\n activate=activate,\n training=training,\n data_format=data_format,\n name=name)\n\n\ndef dwconv_block(x,\n in_channels,\n out_channels,\n kernel_size,\n strides,\n activate=True,\n training=False,\n data_format=\"channels_last\",\n name=\"dwconv_block\"):\n \"\"\"\n Depthwise version of the standard convolution block.\n\n Parameters:\n ----------\n x : Tensor\n Input tensor.\n in_channels : int\n Number of input channels.\n out_channels : int\n Number of output channels.\n kernel_size : int or tuple/list of 2 int\n Convolution window size.\n strides : int or tuple/list of 2 int\n Strides of the convolution.\n activate : bool, default True\n Whether activate the convolution block.\n training : bool, or a TensorFlow boolean scalar tensor, default False\n Whether to return the output in training mode or in inference mode.\n data_format : str, default 'channels_last'\n The ordering of the dimensions in tensors.\n name : str, default 'dwconv_block'\n Block name.\n\n Returns\n -------\n Tensor\n Resulted tensor.\n \"\"\"\n return conv_block(\n x=x,\n in_channels=in_channels,\n out_channels=out_channels,\n kernel_size=kernel_size,\n strides=strides,\n padding=(kernel_size // 2),\n groups=out_channels,\n activate=activate,\n training=training,\n data_format=data_format,\n name=name)\n\n\ndef dws_conv_block(x,\n in_channels,\n out_channels,\n training,\n data_format,\n name=\"dws_conv_block\"):\n \"\"\"\n Depthwise separable convolution block with BatchNorms and activations at each convolution layers.\n\n Parameters:\n ----------\n x : Tensor\n Input tensor.\n in_channels : int\n Number of input channels.\n out_channels : int\n Number of output channels.\n training : bool, or a TensorFlow boolean scalar tensor\n Whether to return the output in training mode or in inference mode.\n data_format : str\n The ordering of the dimensions in tensors.\n name : str, default 'dws_conv_block'\n Block name.\n\n Returns\n -------\n Tensor\n Resulted tensor.\n \"\"\"\n x = dwconv_block(\n x=x,\n in_channels=in_channels,\n out_channels=in_channels,\n kernel_size=3,\n strides=1,\n training=training,\n data_format=data_format,\n name=name + \"/dw_conv\")\n x = conv1x1_block(\n x=x,\n in_channels=in_channels,\n out_channels=out_channels,\n training=training,\n data_format=data_format,\n name=name + \"/pw_conv\")\n return x\n\n\ndef mnas_unit(x,\n in_channels,\n out_channels,\n kernel_size,\n strides,\n expansion_factor,\n training,\n data_format,\n name=\"mnas_unit\"):\n \"\"\"\n So-called 'Linear Bottleneck' layer. It is used as a MobileNetV2 unit.\n\n Parameters:\n ----------\n x : Tensor\n Input tensor.\n in_channels : int\n Number of input channels.\n out_channels : int\n Number of output channels.\n kernel_size : int or tuple/list of 2 int\n Convolution window size.\n strides : int or tuple/list of 2 int\n Strides of the convolution.\n expansion_factor : int\n Factor for expansion of channels.\n training : bool, or a TensorFlow boolean scalar tensor\n Whether to return the output in training mode or in inference mode.\n data_format : str\n The ordering of the dimensions in tensors.\n name : str, default 'mnas_unit'\n Unit name.\n\n Returns\n -------\n Tensor\n Resulted tensor.\n \"\"\"\n residual = (in_channels == out_channels) and (strides == 1)\n mid_channels = in_channels * expansion_factor\n\n if residual:\n identity = x\n\n x = conv1x1_block(\n x=x,\n in_channels=in_channels,\n out_channels=mid_channels,\n activate=True,\n training=training,\n data_format=data_format,\n name=name + \"/conv1\")\n x = dwconv_block(\n x=x,\n in_channels=mid_channels,\n out_channels=mid_channels,\n kernel_size=kernel_size,\n strides=strides,\n activate=True,\n training=training,\n data_format=data_format,\n name=name + \"/conv2\")\n x = conv1x1_block(\n x=x,\n in_channels=mid_channels,\n out_channels=out_channels,\n activate=False,\n training=training,\n data_format=data_format,\n name=name + \"/conv3\")\n\n if residual:\n x = x + identity\n\n return x\n\n\ndef mnas_init_block(x,\n in_channels,\n out_channels_list,\n training,\n data_format,\n name=\"mnas_init_block\"):\n \"\"\"\n Depthwise separable convolution block with BatchNorms and activations at each convolution layers.\n\n Parameters:\n ----------\n x : Tensor\n Input tensor.\n in_channels : int\n Number of input channels.\n out_channels_list : list of 2 int\n Numbers of output channels.\n training : bool, or a TensorFlow boolean scalar tensor\n Whether to return the output in training mode or in inference mode.\n data_format : str\n The ordering of the dimensions in tensors.\n name : str, default 'mnas_init_block'\n Block name.\n\n Returns\n -------\n Tensor\n Resulted tensor.\n \"\"\"\n x = conv_block(\n x=x,\n in_channels=in_channels,\n out_channels=out_channels_list[0],\n kernel_size=3,\n strides=2,\n padding=1,\n groups=1,\n activate=True,\n training=training,\n data_format=data_format,\n name=name + \"/conv1\")\n x = dws_conv_block(\n x=x,\n in_channels=out_channels_list[0],\n out_channels=out_channels_list[1],\n training=training,\n data_format=data_format,\n name=name + \"/conv2\")\n return x\n\n\nclass MnasNet(object):\n \"\"\"\n MnasNet model from 'MnasNet: Platform-Aware Neural Architecture Search for Mobile,'\n https://arxiv.org/abs/1807.11626.\n\n Parameters:\n ----------\n channels : list of list of int\n Number of output channels for each unit.\n init_block_channels : list of 2 int\n Numbers of output channels for the initial unit.\n final_block_channels : int\n Number of output channels for the final block of the feature extractor.\n kernel_sizes : list of list of int\n Number of kernel sizes for each unit.\n expansion_factors : list of list of int\n Number of expansion factors for each unit.\n in_channels : int, default 3\n Number of input channels.\n in_size : tuple of two ints, default (224, 224)\n Spatial size of the expected input image.\n classes : int, default 1000\n Number of classification classes.\n data_format : str, default 'channels_last'\n The ordering of the dimensions in tensors.\n \"\"\"\n def __init__(self,\n channels,\n init_block_channels,\n final_block_channels,\n kernel_sizes,\n expansion_factors,\n in_channels=3,\n in_size=(224, 224),\n classes=1000,\n data_format=\"channels_last\",\n **kwargs):\n super(MnasNet, self).__init__(**kwargs)\n assert (data_format in [\"channels_last\", \"channels_first\"])\n self.channels = channels\n self.init_block_channels = init_block_channels\n self.final_block_channels = final_block_channels\n self.kernel_sizes = kernel_sizes\n self.expansion_factors = expansion_factors\n self.in_channels = in_channels\n self.in_size = in_size\n self.classes = classes\n self.data_format = data_format\n\n def __call__(self,\n x,\n training=False):\n \"\"\"\n Build a model graph.\n\n Parameters:\n ----------\n x : Tensor\n Input tensor.\n training : bool, or a TensorFlow boolean scalar tensor, default False\n Whether to return the output in training mode or in inference mode.\n\n Returns\n -------\n Tensor\n Resulted tensor.\n \"\"\"\n in_channels = self.in_channels\n x = mnas_init_block(\n x=x,\n in_channels=in_channels,\n out_channels_list=self.init_block_channels,\n training=training,\n data_format=self.data_format,\n name=\"features/init_block\")\n in_channels = self.init_block_channels[-1]\n for i, channels_per_stage in enumerate(self.channels):\n kernel_sizes_per_stage = self.kernel_sizes[i]\n expansion_factors_per_stage = self.expansion_factors[i]\n for j, out_channels in enumerate(channels_per_stage):\n kernel_size = kernel_sizes_per_stage[j]\n expansion_factor = expansion_factors_per_stage[j]\n strides = 2 if (j == 0) else 1\n x = mnas_unit(\n x=x,\n in_channels=in_channels,\n out_channels=out_channels,\n kernel_size=kernel_size,\n strides=strides,\n expansion_factor=expansion_factor,\n training=training,\n data_format=self.data_format,\n name=\"features/stage{}/unit{}\".format(i + 1, j + 1))\n in_channels = out_channels\n x = conv1x1_block(\n x=x,\n in_channels=in_channels,\n out_channels=self.final_block_channels,\n activate=True,\n training=training,\n data_format=self.data_format,\n name=\"features/final_block\")\n # in_channels = self.final_block_channels\n x = tf.layers.average_pooling2d(\n inputs=x,\n pool_size=7,\n strides=1,\n data_format=self.data_format,\n name=\"features/final_pool\")\n\n # x = tf.layers.flatten(x)\n x = flatten(\n x=x,\n data_format=self.data_format)\n x = tf.layers.dense(\n inputs=x,\n units=self.classes,\n name=\"output\")\n\n return x\n\n\ndef get_mnasnet(model_name=None,\n pretrained=False,\n root=os.path.join('~', '.keras', 'models'),\n **kwargs):\n \"\"\"\n Create MnasNet model with specific parameters.\n\n Parameters:\n ----------\n model_name : str or None, default None\n Model name for loading pretrained model.\n pretrained : bool, default False\n Whether to load the pretrained weights for model.\n root : str, default '~/.tensorflow/models'\n Location for keeping the model parameters.\n\n Returns\n -------\n functor\n Functor for model graph creation with extra fields.\n \"\"\"\n\n init_block_channels = [32, 16]\n final_block_channels = 1280\n layers = [3, 3, 3, 2, 4, 1]\n downsample = [1, 1, 1, 0, 1, 0]\n channels_per_layers = [24, 40, 80, 96, 192, 320]\n expansion_factors_per_layers = [3, 3, 6, 6, 6, 6]\n kernel_sizes_per_layers = [3, 5, 5, 3, 5, 3]\n default_kernel_size = 3\n\n from functools import reduce\n channels = reduce(lambda x, y: x + [[y[0]] * y[1]] if y[2] != 0 else x[:-1] + [x[-1] + [y[0]] * y[1]],\n zip(channels_per_layers, layers, downsample), [])\n kernel_sizes = reduce(lambda x, y: x + [[y[0]] + [default_kernel_size] * (y[1] - 1)] if y[2] != 0 else x[:-1] + [\n x[-1] + [y[0]] + [default_kernel_size] * (y[1] - 1)], zip(kernel_sizes_per_layers, layers, downsample), [])\n expansion_factors = reduce(lambda x, y: x + [[y[0]] * y[1]] if y[2] != 0 else x[:-1] + [x[-1] + [y[0]] * y[1]],\n zip(expansion_factors_per_layers, layers, downsample), [])\n\n net = MnasNet(\n channels=channels,\n init_block_channels=init_block_channels,\n final_block_channels=final_block_channels,\n kernel_sizes=kernel_sizes,\n expansion_factors=expansion_factors,\n **kwargs)\n\n if pretrained:\n if (model_name is None) or (not model_name):\n raise ValueError(\"Parameter `model_name` should be properly initialized for loading pretrained model.\")\n from .model_store import download_state_dict\n net.state_dict, net.file_path = download_state_dict(\n model_name=model_name,\n local_model_store_dir_path=root)\n else:\n net.state_dict = None\n net.file_path = None\n\n return net\n\n\ndef mnasnet(**kwargs):\n \"\"\"\n MnasNet model from 'MnasNet: Platform-Aware Neural Architecture Search for Mobile,'\n https://arxiv.org/abs/1807.11626.\n\n Parameters:\n ----------\n pretrained : bool, default False\n Whether to load the pretrained weights for model.\n root : str, default '~/.tensorflow/models'\n Location for keeping the model parameters.\n\n Returns\n -------\n functor\n Functor for model graph creation with extra fields.\n \"\"\"\n return get_mnasnet(model_name=\"mnasnet\", **kwargs)\n\n\ndef _test():\n import numpy as np\n\n data_format = \"channels_last\"\n pretrained = False\n\n models = [\n mnasnet,\n ]\n\n for model in models:\n\n net = model(pretrained=pretrained, data_format=data_format)\n x = tf.placeholder(\n dtype=tf.float32,\n shape=(None, 3, 224, 224) if is_channels_first(data_format) else (None, 224, 224, 3),\n name=\"xx\")\n y_net = net(x)\n\n weight_count = np.sum([np.prod(v.get_shape().as_list()) for v in tf.trainable_variables()])\n print(\"m={}, {}\".format(model.__name__, weight_count))\n # assert (model != mnasnet or weight_count == 4308816)\n\n with tf.Session() as sess:\n if pretrained:\n from .model_store import init_variables_from_state_dict\n init_variables_from_state_dict(sess=sess, state_dict=net.state_dict)\n else:\n sess.run(tf.global_variables_initializer())\n x_value = np.zeros((1, 3, 224, 224) if is_channels_first(data_format) else (1, 224, 224, 3), np.float32)\n y = sess.run(y_net, feed_dict={x: x_value})\n assert (y.shape == (1, 1000))\n tf.reset_default_graph()\n\n\nif __name__ == \"__main__\":\n _test()\n",
"\"\"\"\n IBN(b)-ResNet, implemented in PyTorch.\n Original paper: 'Two at Once: Enhancing Learning and Generalization Capacities via IBN-Net,'\n https://arxiv.org/abs/1807.09441.\n\"\"\"\n\n__all__ = ['IBNbResNet', 'ibnb_resnet50', 'ibnb_resnet101', 'ibnb_resnet152']\n\nimport os\nimport torch.nn as nn\nimport torch.nn.init as init\nfrom .common import conv1x1_block\nfrom .resnet import ResBottleneck\n\n\nclass IBNbConvBlock(nn.Module):\n \"\"\"\n IBN(b)-ResNet specific convolution block with Instance normalization and ReLU activation.\n\n Parameters:\n ----------\n in_channels : int\n Number of input channels.\n out_channels : int\n Number of output channels.\n kernel_size : int or tuple/list of 2 int\n Convolution window size.\n stride : int or tuple/list of 2 int\n Strides of the convolution.\n padding : int or tuple/list of 2 int\n Padding value for convolution layer.\n dilation : int or tuple/list of 2 int, default 1\n Dilation value for convolution layer.\n groups : int, default 1\n Number of groups.\n bias : bool, default False\n Whether the layer uses a bias vector.\n activate : bool, default True\n Whether activate the convolution block.\n \"\"\"\n\n def __init__(self,\n in_channels,\n out_channels,\n kernel_size,\n stride,\n padding,\n dilation=1,\n groups=1,\n bias=False,\n activate=True):\n super(IBNbConvBlock, self).__init__()\n self.activate = activate\n\n self.conv = nn.Conv2d(\n in_channels=in_channels,\n out_channels=out_channels,\n kernel_size=kernel_size,\n stride=stride,\n padding=padding,\n dilation=dilation,\n groups=groups,\n bias=bias)\n self.inst_norm = nn.InstanceNorm2d(\n num_features=out_channels,\n affine=True)\n if self.activate:\n self.activ = nn.ReLU(inplace=True)\n\n def forward(self, x):\n x = self.conv(x)\n x = self.inst_norm(x)\n if self.activate:\n x = self.activ(x)\n return x\n\n\ndef ibnb_conv7x7_block(in_channels,\n out_channels,\n stride=1,\n padding=3,\n bias=False,\n activate=True):\n \"\"\"\n 7x7 version of the IBN(b)-ResNet specific convolution block.\n\n Parameters:\n ----------\n in_channels : int\n Number of input channels.\n out_channels : int\n Number of output channels.\n stride : int or tuple/list of 2 int, default 1\n Strides of the convolution.\n padding : int or tuple/list of 2 int, default 3\n Padding value for convolution layer.\n bias : bool, default False\n Whether the layer uses a bias vector.\n activate : bool, default True\n Whether activate the convolution block.\n \"\"\"\n return IBNbConvBlock(\n in_channels=in_channels,\n out_channels=out_channels,\n kernel_size=7,\n stride=stride,\n padding=padding,\n bias=bias,\n activate=activate)\n\n\nclass IBNbResUnit(nn.Module):\n \"\"\"\n IBN(b)-ResNet unit.\n\n Parameters:\n ----------\n in_channels : int\n Number of input channels.\n out_channels : int\n Number of output channels.\n stride : int or tuple/list of 2 int\n Strides of the convolution.\n use_inst_norm : bool\n Whether to use instance normalization.\n \"\"\"\n def __init__(self,\n in_channels,\n out_channels,\n stride,\n use_inst_norm):\n super(IBNbResUnit, self).__init__()\n self.use_inst_norm = use_inst_norm\n self.resize_identity = (in_channels != out_channels) or (stride != 1)\n\n self.body = ResBottleneck(\n in_channels=in_channels,\n out_channels=out_channels,\n stride=stride,\n conv1_stride=False)\n if self.resize_identity:\n self.identity_conv = conv1x1_block(\n in_channels=in_channels,\n out_channels=out_channels,\n stride=stride,\n activate=False)\n if self.use_inst_norm:\n self.inst_norm = nn.InstanceNorm2d(\n num_features=out_channels,\n affine=True)\n self.activ = nn.ReLU(inplace=True)\n\n def forward(self, x):\n if self.resize_identity:\n identity = self.identity_conv(x)\n else:\n identity = x\n x = self.body(x)\n x = x + identity\n if self.use_inst_norm:\n x = self.inst_norm(x)\n x = self.activ(x)\n return x\n\n\nclass IBNbResInitBlock(nn.Module):\n \"\"\"\n IBN(b)-ResNet specific initial block.\n\n Parameters:\n ----------\n in_channels : int\n Number of input channels.\n out_channels : int\n Number of output channels.\n \"\"\"\n def __init__(self,\n in_channels,\n out_channels):\n super(IBNbResInitBlock, self).__init__()\n self.conv = ibnb_conv7x7_block(\n in_channels=in_channels,\n out_channels=out_channels,\n stride=2)\n self.pool = nn.MaxPool2d(\n kernel_size=3,\n stride=2,\n padding=1)\n\n def forward(self, x):\n x = self.conv(x)\n x = self.pool(x)\n return x\n\n\nclass IBNbResNet(nn.Module):\n \"\"\"\n IBN(b)-ResNet model from 'Two at Once: Enhancing Learning and Generalization Capacities via IBN-Net,'\n https://arxiv.org/abs/1807.09441.\n\n Parameters:\n ----------\n channels : list of list of int\n Number of output channels for each unit.\n init_block_channels : int\n Number of output channels for the initial unit.\n in_channels : int, default 3\n Number of input channels.\n in_size : tuple of two ints, default (224, 224)\n Spatial size of the expected input image.\n num_classes : int, default 1000\n Number of classification classes.\n \"\"\"\n def __init__(self,\n channels,\n init_block_channels,\n in_channels=3,\n in_size=(224, 224),\n num_classes=1000):\n super(IBNbResNet, self).__init__()\n self.in_size = in_size\n self.num_classes = num_classes\n\n self.features = nn.Sequential()\n self.features.add_module(\"init_block\", IBNbResInitBlock(\n in_channels=in_channels,\n out_channels=init_block_channels))\n in_channels = init_block_channels\n for i, channels_per_stage in enumerate(channels):\n stage = nn.Sequential()\n for j, out_channels in enumerate(channels_per_stage):\n stride = 2 if (j == 0) and (i != 0) else 1\n use_inst_norm = (i < 2) and (j == len(channels_per_stage) - 1)\n stage.add_module(\"unit{}\".format(j + 1), IBNbResUnit(\n in_channels=in_channels,\n out_channels=out_channels,\n stride=stride,\n use_inst_norm=use_inst_norm))\n in_channels = out_channels\n self.features.add_module(\"stage{}\".format(i + 1), stage)\n self.features.add_module(\"final_pool\", nn.AvgPool2d(\n kernel_size=7,\n stride=1))\n\n self.output = nn.Linear(\n in_features=in_channels,\n out_features=num_classes)\n\n self._init_params()\n\n def _init_params(self):\n for name, module in self.named_modules():\n if isinstance(module, nn.Conv2d):\n init.kaiming_uniform_(module.weight)\n if module.bias is not None:\n init.constant_(module.bias, 0)\n\n def forward(self, x):\n x = self.features(x)\n x = x.view(x.size(0), -1)\n x = self.output(x)\n return x\n\n\ndef get_ibnbresnet(blocks,\n model_name=None,\n pretrained=False,\n root=os.path.join('~', '.torch', 'models'),\n **kwargs):\n \"\"\"\n Create IBN(b)-ResNet model with specific parameters.\n\n Parameters:\n ----------\n blocks : int\n Number of blocks.\n model_name : str or None, default None\n Model name for loading pretrained model.\n pretrained : bool, default False\n Whether to load the pretrained weights for model.\n root : str, default '~/.torch/models'\n Location for keeping the model parameters.\n \"\"\"\n\n if blocks == 50:\n layers = [3, 4, 6, 3]\n elif blocks == 101:\n layers = [3, 4, 23, 3]\n elif blocks == 152:\n layers = [3, 8, 36, 3]\n else:\n raise ValueError(\"Unsupported IBN(b)-ResNet with number of blocks: {}\".format(blocks))\n\n init_block_channels = 64\n channels_per_layers = [256, 512, 1024, 2048]\n channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]\n\n net = IBNbResNet(\n channels=channels,\n init_block_channels=init_block_channels,\n **kwargs)\n\n if pretrained:\n if (model_name is None) or (not model_name):\n raise ValueError(\"Parameter `model_name` should be properly initialized for loading pretrained model.\")\n from .model_store import download_model\n download_model(\n net=net,\n model_name=model_name,\n local_model_store_dir_path=root)\n\n return net\n\n\ndef ibnb_resnet50(**kwargs):\n \"\"\"\n IBN(b)-ResNet-50 model from 'Two at Once: Enhancing Learning and Generalization Capacities via IBN-Net,'\n https://arxiv.org/abs/1807.09441.\n\n Parameters:\n ----------\n pretrained : bool, default False\n Whether to load the pretrained weights for model.\n root : str, default '~/.torch/models'\n Location for keeping the model parameters.\n \"\"\"\n return get_ibnbresnet(blocks=50, model_name=\"ibnb_resnet50\", **kwargs)\n\n\ndef ibnb_resnet101(**kwargs):\n \"\"\"\n IBN(b)-ResNet-101 model from 'Two at Once: Enhancing Learning and Generalization Capacities via IBN-Net,'\n https://arxiv.org/abs/1807.09441.\n\n Parameters:\n ----------\n pretrained : bool, default False\n Whether to load the pretrained weights for model.\n root : str, default '~/.torch/models'\n Location for keeping the model parameters.\n \"\"\"\n return get_ibnbresnet(blocks=101, model_name=\"ibnb_resnet101\", **kwargs)\n\n\ndef ibnb_resnet152(**kwargs):\n \"\"\"\n IBN(b)-ResNet-152 model from 'Two at Once: Enhancing Learning and Generalization Capacities via IBN-Net,'\n https://arxiv.org/abs/1807.09441.\n\n Parameters:\n ----------\n pretrained : bool, default False\n Whether to load the pretrained weights for model.\n root : str, default '~/.torch/models'\n Location for keeping the model parameters.\n \"\"\"\n return get_ibnbresnet(blocks=152, model_name=\"ibnb_resnet152\", **kwargs)\n\n\ndef _calc_width(net):\n import numpy as np\n net_params = filter(lambda p: p.requires_grad, net.parameters())\n weight_count = 0\n for param in net_params:\n weight_count += np.prod(param.size())\n return weight_count\n\n\ndef _test():\n import torch\n from torch.autograd import Variable\n\n pretrained = False\n\n models = [\n ibnb_resnet50,\n ibnb_resnet101,\n ibnb_resnet152,\n ]\n\n for model in models:\n\n net = model(pretrained=pretrained)\n\n # net.train()\n net.eval()\n weight_count = _calc_width(net)\n print(\"m={}, {}\".format(model.__name__, weight_count))\n assert (model != ibnb_resnet50 or weight_count == 25558568)\n assert (model != ibnb_resnet101 or weight_count == 44550696)\n assert (model != ibnb_resnet152 or weight_count == 60194344)\n\n x = Variable(torch.randn(1, 3, 224, 224))\n y = net(x)\n assert (tuple(y.size()) == (1, 1000))\n\n\nif __name__ == \"__main__\":\n _test()\n",
"\"\"\"\n PeleeNet, implemented in PyTorch.\n Original paper: 'Pelee: A Real-Time Object Detection System on Mobile Devices,' https://arxiv.org/abs/1804.06882.\n\"\"\"\n\n__all__ = ['PeleeNet', 'peleenet']\n\nimport os\nimport torch\nimport torch.nn as nn\nimport torch.nn.init as init\nfrom .common import conv1x1_block, conv3x3_block, Concurrent\n\n\nclass PeleeBranch1(nn.Module):\n \"\"\"\n PeleeNet branch type 1 block.\n\n Parameters:\n ----------\n in_channels : int\n Number of input channels.\n out_channels : int\n Number of output channels.\n mid_channels : int\n Number of intermediate channels.\n stride : int or tuple/list of 2 int, default 1\n Strides of the second convolution.\n \"\"\"\n\n def __init__(self,\n in_channels,\n out_channels,\n mid_channels,\n stride=1):\n super(PeleeBranch1, self).__init__()\n self.conv1 = conv1x1_block(\n in_channels=in_channels,\n out_channels=mid_channels)\n self.conv2 = conv3x3_block(\n in_channels=mid_channels,\n out_channels=out_channels,\n stride=stride)\n\n def forward(self, x):\n x = self.conv1(x)\n x = self.conv2(x)\n return x\n\n\nclass PeleeBranch2(nn.Module):\n \"\"\"\n PeleeNet branch type 2 block.\n\n Parameters:\n ----------\n in_channels : int\n Number of input channels.\n out_channels : int\n Number of output channels.\n mid_channels : int\n Number of intermediate channels.\n \"\"\"\n\n def __init__(self,\n in_channels,\n out_channels,\n mid_channels):\n super(PeleeBranch2, self).__init__()\n self.conv1 = conv1x1_block(\n in_channels=in_channels,\n out_channels=mid_channels)\n self.conv2 = conv3x3_block(\n in_channels=mid_channels,\n out_channels=out_channels)\n self.conv3 = conv3x3_block(\n in_channels=out_channels,\n out_channels=out_channels)\n\n def forward(self, x):\n x = self.conv1(x)\n x = self.conv2(x)\n x = self.conv3(x)\n return x\n\n\nclass StemBlock(nn.Module):\n \"\"\"\n PeleeNet stem block.\n\n Parameters:\n ----------\n in_channels : int\n Number of input channels.\n out_channels : int\n Number of output channels.\n \"\"\"\n def __init__(self,\n in_channels,\n out_channels):\n super(StemBlock, self).__init__()\n mid1_channels = out_channels // 2\n mid2_channels = out_channels * 2\n\n self.first_conv = conv3x3_block(\n in_channels=in_channels,\n out_channels=out_channels,\n stride=2)\n\n self.branches = Concurrent()\n self.branches.add_module(\"branch1\", PeleeBranch1(\n in_channels=out_channels,\n out_channels=out_channels,\n mid_channels=mid1_channels,\n stride=2))\n self.branches.add_module(\"branch2\", nn.MaxPool2d(\n kernel_size=2,\n stride=2,\n padding=0))\n\n self.last_conv = conv1x1_block(\n in_channels=mid2_channels,\n out_channels=out_channels)\n\n def forward(self, x):\n x = self.first_conv(x)\n x = self.branches(x)\n x = self.last_conv(x)\n return x\n\n\nclass DenseBlock(nn.Module):\n \"\"\"\n PeleeNet dense block.\n\n Parameters:\n ----------\n in_channels : int\n Number of input channels.\n out_channels : int\n Number of output channels.\n bottleneck_size : int\n Bottleneck width.\n \"\"\"\n def __init__(self,\n in_channels,\n out_channels,\n bottleneck_size):\n super(DenseBlock, self).__init__()\n inc_channels = (out_channels - in_channels) // 2\n mid_channels = inc_channels * bottleneck_size\n\n self.branch1 = PeleeBranch1(\n in_channels=in_channels,\n out_channels=inc_channels,\n mid_channels=mid_channels)\n self.branch2 = PeleeBranch2(\n in_channels=in_channels,\n out_channels=inc_channels,\n mid_channels=mid_channels)\n\n def forward(self, x):\n x1 = self.branch1(x)\n x2 = self.branch2(x)\n x = torch.cat((x, x1, x2), dim=1)\n return x\n\n\nclass TransitionBlock(nn.Module):\n \"\"\"\n PeleeNet's transition block, like in DensNet, but with ordinary convolution block.\n\n Parameters:\n ----------\n in_channels : int\n Number of input channels.\n out_channels : int\n Number of output channels.\n \"\"\"\n\n def __init__(self,\n in_channels,\n out_channels):\n super(TransitionBlock, self).__init__()\n self.conv = conv1x1_block(\n in_channels=in_channels,\n out_channels=out_channels)\n self.pool = nn.AvgPool2d(\n kernel_size=2,\n stride=2,\n padding=0)\n\n def forward(self, x):\n x = self.conv(x)\n x = self.pool(x)\n return x\n\n\nclass PeleeNet(nn.Module):\n \"\"\"\n PeleeNet model from 'Pelee: A Real-Time Object Detection System on Mobile Devices,'\n https://arxiv.org/abs/1804.06882.\n\n Parameters:\n ----------\n channels : list of list of int\n Number of output channels for each unit.\n init_block_channels : int\n Number of output channels for the initial unit.\n bottleneck_sizes : list of int\n Bottleneck sizes for each stage.\n dropout_rate : float, default 0.5\n Parameter of Dropout layer. Faction of the input units to drop.\n in_channels : int, default 3\n Number of input channels.\n in_size : tuple of two ints, default (224, 224)\n Spatial size of the expected input image.\n num_classes : int, default 1000\n Number of classification classes.\n \"\"\"\n def __init__(self,\n channels,\n init_block_channels,\n bottleneck_sizes,\n dropout_rate=0.5,\n in_channels=3,\n in_size=(224, 224),\n num_classes=1000):\n super(PeleeNet, self).__init__()\n self.in_size = in_size\n self.num_classes = num_classes\n\n self.features = nn.Sequential()\n self.features.add_module(\"init_block\", StemBlock(\n in_channels=in_channels,\n out_channels=init_block_channels))\n in_channels = init_block_channels\n for i, channels_per_stage in enumerate(channels):\n bottleneck_size = bottleneck_sizes[i]\n stage = nn.Sequential()\n if i != 0:\n stage.add_module(\"trans{}\".format(i + 1), TransitionBlock(\n in_channels=in_channels,\n out_channels=in_channels))\n for j, out_channels in enumerate(channels_per_stage):\n stage.add_module(\"unit{}\".format(j + 1), DenseBlock(\n in_channels=in_channels,\n out_channels=out_channels,\n bottleneck_size=bottleneck_size))\n in_channels = out_channels\n self.features.add_module(\"stage{}\".format(i + 1), stage)\n self.features.add_module(\"final_block\", conv1x1_block(\n in_channels=in_channels,\n out_channels=in_channels))\n self.features.add_module(\"final_pool\", nn.AvgPool2d(\n kernel_size=7,\n stride=1))\n\n self.output = nn.Sequential()\n self.output.add_module('dropout', nn.Dropout(p=dropout_rate))\n self.output.add_module('fc', nn.Linear(\n in_features=in_channels,\n out_features=num_classes))\n\n self._init_params()\n\n def _init_params(self):\n for name, module in self.named_modules():\n if isinstance(module, nn.Conv2d):\n init.kaiming_uniform_(module.weight)\n if module.bias is not None:\n init.constant_(module.bias, 0)\n\n def forward(self, x):\n x = self.features(x)\n x = x.view(x.size(0), -1)\n x = self.output(x)\n return x\n\n\ndef get_peleenet(model_name=None,\n pretrained=False,\n root=os.path.join('~', '.torch', 'models'),\n **kwargs):\n \"\"\"\n Create PeleeNet model with specific parameters.\n\n Parameters:\n ----------\n model_name : str or None, default None\n Model name for loading pretrained model.\n pretrained : bool, default False\n Whether to load the pretrained weights for model.\n root : str, default '~/.torch/models'\n Location for keeping the model parameters.\n \"\"\"\n\n init_block_channels = 32\n growth_rate = 32\n layers = [3, 4, 8, 6]\n bottleneck_sizes = [1, 2, 4, 4]\n\n from functools import reduce\n channels = reduce(\n lambda xi, yi: xi + [reduce(\n lambda xj, yj: xj + [xj[-1] + yj],\n [growth_rate] * yi,\n [xi[-1][-1]])[1:]],\n layers,\n [[init_block_channels]])[1:]\n\n net = PeleeNet(\n channels=channels,\n init_block_channels=init_block_channels,\n bottleneck_sizes=bottleneck_sizes,\n **kwargs)\n\n if pretrained:\n if (model_name is None) or (not model_name):\n raise ValueError(\"Parameter `model_name` should be properly initialized for loading pretrained model.\")\n from .model_store import download_model\n download_model(\n net=net,\n model_name=model_name,\n local_model_store_dir_path=root)\n\n return net\n\n\ndef peleenet(**kwargs):\n \"\"\"\n PeleeNet model from 'Pelee: A Real-Time Object Detection System on Mobile Devices,'\n https://arxiv.org/abs/1804.06882.\n\n Parameters:\n ----------\n pretrained : bool, default False\n Whether to load the pretrained weights for model.\n root : str, default '~/.torch/models'\n Location for keeping the model parameters.\n \"\"\"\n return get_peleenet(model_name=\"peleenet\", **kwargs)\n\n\ndef _calc_width(net):\n import numpy as np\n net_params = filter(lambda p: p.requires_grad, net.parameters())\n weight_count = 0\n for param in net_params:\n weight_count += np.prod(param.size())\n return weight_count\n\n\ndef _test():\n import torch\n from torch.autograd import Variable\n\n pretrained = False\n\n models = [\n peleenet,\n ]\n\n for model in models:\n\n net = model(pretrained=pretrained)\n\n # net.train()\n net.eval()\n weight_count = _calc_width(net)\n print(\"m={}, {}\".format(model.__name__, weight_count))\n assert (model != peleenet or weight_count == 2802248)\n\n x = Variable(torch.randn(1, 3, 224, 224))\n y = net(x)\n assert (tuple(y.size()) == (1, 1000))\n\n\nif __name__ == \"__main__\":\n _test()\n",
"\"\"\"\n DenseNet for CIFAR, implemented in Gluon.\n Original paper: 'Densely Connected Convolutional Networks,' https://arxiv.org/abs/1608.06993.\n\"\"\"\n\n__all__ = ['CIFARDenseNet', 'densenet40_k12_cifar10', 'densenet40_k12_cifar100', 'densenet40_k12_bc_cifar10',\n 'densenet40_k12_bc_cifar100', 'densenet40_k24_bc_cifar10', 'densenet40_k24_bc_cifar100',\n 'densenet40_k36_bc_cifar10', 'densenet40_k36_bc_cifar100', 'densenet100_k12_cifar10',\n 'densenet100_k12_cifar100', 'densenet100_k24_cifar10', 'densenet100_k24_cifar100',\n 'densenet100_k12_bc_cifar10', 'densenet100_k12_bc_cifar10', 'densenet100_k12_bc_cifar100',\n 'densenet190_k40_bc_cifar10', 'densenet190_k40_bc_cifar100', 'densenet250_k24_bc_cifar10',\n 'densenet250_k24_bc_cifar100']\n\nimport os\nfrom mxnet import cpu\nfrom mxnet.gluon import nn, HybridBlock\nfrom .common import conv3x3, pre_conv3x3_block\nfrom .preresnet import PreResActivation\nfrom .densenet import DenseUnit, TransitionBlock\n\n\nclass DenseSimpleUnit(HybridBlock):\n \"\"\"\n DenseNet simple unit for CIFAR.\n\n Parameters:\n ----------\n in_channels : int\n Number of input channels.\n out_channels : int\n Number of output channels.\n bn_use_global_stats : bool\n Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.\n dropout_rate : bool\n Parameter of Dropout layer. Faction of the input units to drop.\n \"\"\"\n def __init__(self,\n in_channels,\n out_channels,\n bn_use_global_stats,\n dropout_rate,\n **kwargs):\n super(DenseSimpleUnit, self).__init__(**kwargs)\n self.use_dropout = (dropout_rate != 0.0)\n inc_channels = out_channels - in_channels\n\n with self.name_scope():\n self.conv = pre_conv3x3_block(\n in_channels=in_channels,\n out_channels=inc_channels,\n bn_use_global_stats=bn_use_global_stats)\n if self.use_dropout:\n self.dropout = nn.Dropout(rate=dropout_rate)\n\n def hybrid_forward(self, F, x):\n identity = x\n x = self.conv(x)\n if self.use_dropout:\n x = self.dropout(x)\n x = F.concat(identity, x, dim=1)\n return x\n\n\nclass CIFARDenseNet(HybridBlock):\n \"\"\"\n DenseNet model for CIFAR from 'Densely Connected Convolutional Networks,' https://arxiv.org/abs/1608.06993.\n\n Parameters:\n ----------\n channels : list of list of int\n Number of output channels for each unit.\n init_block_channels : int\n Number of output channels for the initial unit.\n bottleneck : bool\n Whether to use a bottleneck or simple block in units.\n bn_use_global_stats : bool, default False\n Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.\n Useful for fine-tuning.\n dropout_rate : float, default 0.0\n Parameter of Dropout layer. Faction of the input units to drop.\n in_channels : int, default 3\n Number of input channels.\n in_size : tuple of two ints, default (32, 32)\n Spatial size of the expected input image.\n classes : int, default 10\n Number of classification classes.\n \"\"\"\n def __init__(self,\n channels,\n init_block_channels,\n bottleneck,\n bn_use_global_stats=False,\n dropout_rate=0.0,\n in_channels=3,\n in_size=(32, 32),\n classes=10,\n **kwargs):\n super(CIFARDenseNet, self).__init__(**kwargs)\n self.in_size = in_size\n self.classes = classes\n unit_class = DenseUnit if bottleneck else DenseSimpleUnit\n\n with self.name_scope():\n self.features = nn.HybridSequential(prefix='')\n self.features.add(conv3x3(\n in_channels=in_channels,\n out_channels=init_block_channels))\n in_channels = init_block_channels\n for i, channels_per_stage in enumerate(channels):\n stage = nn.HybridSequential(prefix=\"stage{}_\".format(i + 1))\n with stage.name_scope():\n if i != 0:\n stage.add(TransitionBlock(\n in_channels=in_channels,\n out_channels=(in_channels // 2),\n bn_use_global_stats=bn_use_global_stats))\n in_channels = in_channels // 2\n for j, out_channels in enumerate(channels_per_stage):\n stage.add(unit_class(\n in_channels=in_channels,\n out_channels=out_channels,\n bn_use_global_stats=bn_use_global_stats,\n dropout_rate=dropout_rate))\n in_channels = out_channels\n self.features.add(stage)\n self.features.add(PreResActivation(\n in_channels=in_channels,\n bn_use_global_stats=bn_use_global_stats))\n self.features.add(nn.AvgPool2D(\n pool_size=8,\n strides=1))\n\n self.output = nn.HybridSequential(prefix='')\n self.output.add(nn.Flatten())\n self.output.add(nn.Dense(\n units=classes,\n in_units=in_channels))\n\n def hybrid_forward(self, F, x):\n x = self.features(x)\n x = self.output(x)\n return x\n\n\ndef get_densenet_cifar(classes,\n blocks,\n growth_rate,\n bottleneck,\n model_name=None,\n pretrained=False,\n ctx=cpu(),\n root=os.path.join('~', '.mxnet', 'models'),\n **kwargs):\n \"\"\"\n Create DenseNet model for CIFAR with specific parameters.\n\n Parameters:\n ----------\n classes : int\n Number of classification classes.\n blocks : int\n Number of blocks.\n growth_rate : int\n Growth rate.\n bottleneck : bool\n Whether to use a bottleneck or simple block in units.\n model_name : str or None, default None\n Model name for loading pretrained model.\n pretrained : bool, default False\n Whether to load the pretrained weights for model.\n ctx : Context, default CPU\n The context in which to load the pretrained weights.\n root : str, default '~/.mxnet/models'\n Location for keeping the model parameters.\n \"\"\"\n assert (classes in [10, 100])\n\n if bottleneck:\n assert ((blocks - 4) % 6 == 0)\n layers = [(blocks - 4) // 6] * 3\n else:\n assert ((blocks - 4) % 3 == 0)\n layers = [(blocks - 4) // 3] * 3\n init_block_channels = 2 * growth_rate\n\n from functools import reduce\n channels = reduce(\n lambda xi, yi: xi + [reduce(\n lambda xj, yj: xj + [xj[-1] + yj],\n [growth_rate] * yi,\n [xi[-1][-1] // 2])[1:]],\n layers,\n [[init_block_channels * 2]])[1:]\n\n net = CIFARDenseNet(\n channels=channels,\n init_block_channels=init_block_channels,\n classes=classes,\n bottleneck=bottleneck,\n **kwargs)\n\n if pretrained:\n if (model_name is None) or (not model_name):\n raise ValueError(\"Parameter `model_name` should be properly initialized for loading pretrained model.\")\n from .model_store import get_model_file\n net.load_parameters(\n filename=get_model_file(\n model_name=model_name,\n local_model_store_dir_path=root),\n ctx=ctx)\n\n return net\n\n\ndef densenet40_k12_cifar10(classes=10, **kwargs):\n \"\"\"\n DenseNet-40 (k=12) model for CIFAR-10 from 'Densely Connected Convolutional Networks,'\n https://arxiv.org/abs/1608.06993.\n\n Parameters:\n ----------\n classes : int, default 10\n Number of classification classes.\n pretrained : bool, default False\n Whether to load the pretrained weights for model.\n ctx : Context, default CPU\n The context in which to load the pretrained weights.\n root : str, default '~/.mxnet/models'\n Location for keeping the model parameters.\n \"\"\"\n return get_densenet_cifar(classes=classes, blocks=40, growth_rate=12, bottleneck=False,\n model_name=\"densenet40_k12_cifar10\", **kwargs)\n\n\ndef densenet40_k12_cifar100(classes=100, **kwargs):\n \"\"\"\n DenseNet-40 (k=12) model for CIFAR-100 from 'Densely Connected Convolutional Networks,'\n https://arxiv.org/abs/1608.06993.\n\n Parameters:\n ----------\n classes : int, default 100\n Number of classification classes.\n pretrained : bool, default False\n Whether to load the pretrained weights for model.\n ctx : Context, default CPU\n The context in which to load the pretrained weights.\n root : str, default '~/.mxnet/models'\n Location for keeping the model parameters.\n \"\"\"\n return get_densenet_cifar(classes=classes, blocks=40, growth_rate=12, bottleneck=False,\n model_name=\"densenet40_k12_cifar100\", **kwargs)\n\n\ndef densenet40_k12_bc_cifar10(classes=10, **kwargs):\n \"\"\"\n DenseNet-BC-40 (k=12) model for CIFAR-10 from 'Densely Connected Convolutional Networks,'\n https://arxiv.org/abs/1608.06993.\n\n Parameters:\n ----------\n classes : int, default 10\n Number of classification classes.\n pretrained : bool, default False\n Whether to load the pretrained weights for model.\n ctx : Context, default CPU\n The context in which to load the pretrained weights.\n root : str, default '~/.mxnet/models'\n Location for keeping the model parameters.\n \"\"\"\n return get_densenet_cifar(classes=classes, blocks=40, growth_rate=12, bottleneck=True,\n model_name=\"densenet40_k12_bc_cifar10\", **kwargs)\n\n\ndef densenet40_k12_bc_cifar100(classes=100, **kwargs):\n \"\"\"\n DenseNet-BC-40 (k=12) model for CIFAR-100 from 'Densely Connected Convolutional Networks,'\n https://arxiv.org/abs/1608.06993.\n\n Parameters:\n ----------\n classes : int, default 100\n Number of classification classes.\n pretrained : bool, default False\n Whether to load the pretrained weights for model.\n ctx : Context, default CPU\n The context in which to load the pretrained weights.\n root : str, default '~/.mxnet/models'\n Location for keeping the model parameters.\n \"\"\"\n return get_densenet_cifar(classes=classes, blocks=40, growth_rate=12, bottleneck=True,\n model_name=\"densenet40_k12_bc_cifar100\", **kwargs)\n\n\ndef densenet40_k24_bc_cifar10(classes=10, **kwargs):\n \"\"\"\n DenseNet-BC-40 (k=24) model for CIFAR-10 from 'Densely Connected Convolutional Networks,'\n https://arxiv.org/abs/1608.06993.\n\n Parameters:\n ----------\n classes : int, default 10\n Number of classification classes.\n pretrained : bool, default False\n Whether to load the pretrained weights for model.\n ctx : Context, default CPU\n The context in which to load the pretrained weights.\n root : str, default '~/.mxnet/models'\n Location for keeping the model parameters.\n \"\"\"\n return get_densenet_cifar(classes=classes, blocks=40, growth_rate=24, bottleneck=True,\n model_name=\"densenet40_k24_bc_cifar10\", **kwargs)\n\n\ndef densenet40_k24_bc_cifar100(classes=100, **kwargs):\n \"\"\"\n DenseNet-BC-40 (k=24) model for CIFAR-100 from 'Densely Connected Convolutional Networks,'\n https://arxiv.org/abs/1608.06993.\n\n Parameters:\n ----------\n classes : int, default 100\n Number of classification classes.\n pretrained : bool, default False\n Whether to load the pretrained weights for model.\n ctx : Context, default CPU\n The context in which to load the pretrained weights.\n root : str, default '~/.mxnet/models'\n Location for keeping the model parameters.\n \"\"\"\n return get_densenet_cifar(classes=classes, blocks=40, growth_rate=24, bottleneck=True,\n model_name=\"densenet40_k24_bc_cifar100\", **kwargs)\n\n\ndef densenet40_k36_bc_cifar10(classes=10, **kwargs):\n \"\"\"\n DenseNet-BC-40 (k=36) model for CIFAR-10 from 'Densely Connected Convolutional Networks,'\n https://arxiv.org/abs/1608.06993.\n\n Parameters:\n ----------\n classes : int, default 10\n Number of classification classes.\n pretrained : bool, default False\n Whether to load the pretrained weights for model.\n ctx : Context, default CPU\n The context in which to load the pretrained weights.\n root : str, default '~/.mxnet/models'\n Location for keeping the model parameters.\n \"\"\"\n return get_densenet_cifar(classes=classes, blocks=40, growth_rate=36, bottleneck=True,\n model_name=\"densenet40_k36_bc_cifar10\", **kwargs)\n\n\ndef densenet40_k36_bc_cifar100(classes=100, **kwargs):\n \"\"\"\n DenseNet-BC-40 (k=36) model for CIFAR-100 from 'Densely Connected Convolutional Networks,'\n https://arxiv.org/abs/1608.06993.\n\n Parameters:\n ----------\n classes : int, default 100\n Number of classification classes.\n pretrained : bool, default False\n Whether to load the pretrained weights for model.\n ctx : Context, default CPU\n The context in which to load the pretrained weights.\n root : str, default '~/.mxnet/models'\n Location for keeping the model parameters.\n \"\"\"\n return get_densenet_cifar(classes=classes, blocks=40, growth_rate=36, bottleneck=True,\n model_name=\"densenet40_k36_bc_cifar100\", **kwargs)\n\n\ndef densenet100_k12_cifar10(classes=10, **kwargs):\n \"\"\"\n DenseNet-100 (k=12) model for CIFAR-10 from 'Densely Connected Convolutional Networks,'\n https://arxiv.org/abs/1608.06993.\n\n Parameters:\n ----------\n classes : int, default 10\n Number of classification classes.\n pretrained : bool, default False\n Whether to load the pretrained weights for model.\n ctx : Context, default CPU\n The context in which to load the pretrained weights.\n root : str, default '~/.mxnet/models'\n Location for keeping the model parameters.\n \"\"\"\n return get_densenet_cifar(classes=classes, blocks=100, growth_rate=12, bottleneck=False,\n model_name=\"densenet100_k12_cifar10\", **kwargs)\n\n\ndef densenet100_k12_cifar100(classes=100, **kwargs):\n \"\"\"\n DenseNet-100 (k=12) model for CIFAR-100 from 'Densely Connected Convolutional Networks,'\n https://arxiv.org/abs/1608.06993.\n\n Parameters:\n ----------\n classes : int, default 100\n Number of classification classes.\n pretrained : bool, default False\n Whether to load the pretrained weights for model.\n ctx : Context, default CPU\n The context in which to load the pretrained weights.\n root : str, default '~/.mxnet/models'\n Location for keeping the model parameters.\n \"\"\"\n return get_densenet_cifar(classes=classes, blocks=100, growth_rate=12, bottleneck=False,\n model_name=\"densenet100_k12_cifar100\", **kwargs)\n\n\ndef densenet100_k24_cifar10(classes=10, **kwargs):\n \"\"\"\n DenseNet-100 (k=24) model for CIFAR-10 from 'Densely Connected Convolutional Networks,'\n https://arxiv.org/abs/1608.06993.\n\n Parameters:\n ----------\n classes : int, default 10\n Number of classification classes.\n pretrained : bool, default False\n Whether to load the pretrained weights for model.\n ctx : Context, default CPU\n The context in which to load the pretrained weights.\n root : str, default '~/.mxnet/models'\n Location for keeping the model parameters.\n \"\"\"\n return get_densenet_cifar(classes=classes, blocks=100, growth_rate=24, bottleneck=False,\n model_name=\"densenet100_k24_cifar10\", **kwargs)\n\n\ndef densenet100_k24_cifar100(classes=100, **kwargs):\n \"\"\"\n DenseNet-100 (k=24) model for CIFAR-100 from 'Densely Connected Convolutional Networks,'\n https://arxiv.org/abs/1608.06993.\n\n Parameters:\n ----------\n classes : int, default 100\n Number of classification classes.\n pretrained : bool, default False\n Whether to load the pretrained weights for model.\n ctx : Context, default CPU\n The context in which to load the pretrained weights.\n root : str, default '~/.mxnet/models'\n Location for keeping the model parameters.\n \"\"\"\n return get_densenet_cifar(classes=classes, blocks=100, growth_rate=24, bottleneck=False,\n model_name=\"densenet100_k24_cifar100\", **kwargs)\n\n\ndef densenet100_k12_bc_cifar10(classes=10, **kwargs):\n \"\"\"\n DenseNet-BC-100 (k=12) model for CIFAR-10 from 'Densely Connected Convolutional Networks,'\n https://arxiv.org/abs/1608.06993.\n\n Parameters:\n ----------\n classes : int, default 10\n Number of classification classes.\n pretrained : bool, default False\n Whether to load the pretrained weights for model.\n ctx : Context, default CPU\n The context in which to load the pretrained weights.\n root : str, default '~/.mxnet/models'\n Location for keeping the model parameters.\n \"\"\"\n return get_densenet_cifar(classes=classes, blocks=100, growth_rate=12, bottleneck=True,\n model_name=\"densenet100_k12_bc_cifar10\", **kwargs)\n\n\ndef densenet100_k12_bc_cifar100(classes=100, **kwargs):\n \"\"\"\n DenseNet-BC-100 (k=12) model for CIFAR-100 from 'Densely Connected Convolutional Networks,'\n https://arxiv.org/abs/1608.06993.\n\n Parameters:\n ----------\n classes : int, default 100\n Number of classification classes.\n pretrained : bool, default False\n Whether to load the pretrained weights for model.\n ctx : Context, default CPU\n The context in which to load the pretrained weights.\n root : str, default '~/.mxnet/models'\n Location for keeping the model parameters.\n \"\"\"\n return get_densenet_cifar(classes=classes, blocks=100, growth_rate=12, bottleneck=True,\n model_name=\"densenet100_k12_bc_cifar100\", **kwargs)\n\n\ndef densenet190_k40_bc_cifar10(classes=10, **kwargs):\n \"\"\"\n DenseNet-BC-190 (k=40) model for CIFAR-10 from 'Densely Connected Convolutional Networks,'\n https://arxiv.org/abs/1608.06993.\n\n Parameters:\n ----------\n classes : int, default 10\n Number of classification classes.\n pretrained : bool, default False\n Whether to load the pretrained weights for model.\n ctx : Context, default CPU\n The context in which to load the pretrained weights.\n root : str, default '~/.mxnet/models'\n Location for keeping the model parameters.\n \"\"\"\n return get_densenet_cifar(classes=classes, blocks=190, growth_rate=40, bottleneck=True,\n model_name=\"densenet190_k40_bc_cifar10\", **kwargs)\n\n\ndef densenet190_k40_bc_cifar100(classes=100, **kwargs):\n \"\"\"\n DenseNet-BC-190 (k=40) model for CIFAR-100 from 'Densely Connected Convolutional Networks,'\n https://arxiv.org/abs/1608.06993.\n\n Parameters:\n ----------\n classes : int, default 100\n Number of classification classes.\n pretrained : bool, default False\n Whether to load the pretrained weights for model.\n ctx : Context, default CPU\n The context in which to load the pretrained weights.\n root : str, default '~/.mxnet/models'\n Location for keeping the model parameters.\n \"\"\"\n return get_densenet_cifar(classes=classes, blocks=190, growth_rate=40, bottleneck=True,\n model_name=\"densenet190_k40_bc_cifar100\", **kwargs)\n\n\ndef densenet250_k24_bc_cifar10(classes=10, **kwargs):\n \"\"\"\n DenseNet-BC-250 (k=24) model for CIFAR-10 from 'Densely Connected Convolutional Networks,'\n https://arxiv.org/abs/1608.06993.\n\n Parameters:\n ----------\n classes : int, default 10\n Number of classification classes.\n pretrained : bool, default False\n Whether to load the pretrained weights for model.\n ctx : Context, default CPU\n The context in which to load the pretrained weights.\n root : str, default '~/.mxnet/models'\n Location for keeping the model parameters.\n \"\"\"\n return get_densenet_cifar(classes=classes, blocks=250, growth_rate=24, bottleneck=True,\n model_name=\"densenet250_k24_bc_cifar10\", **kwargs)\n\n\ndef densenet250_k24_bc_cifar100(classes=100, **kwargs):\n \"\"\"\n DenseNet-BC-250 (k=24) model for CIFAR-100 from 'Densely Connected Convolutional Networks,'\n https://arxiv.org/abs/1608.06993.\n\n Parameters:\n ----------\n classes : int, default 100\n Number of classification classes.\n pretrained : bool, default False\n Whether to load the pretrained weights for model.\n ctx : Context, default CPU\n The context in which to load the pretrained weights.\n root : str, default '~/.mxnet/models'\n Location for keeping the model parameters.\n \"\"\"\n return get_densenet_cifar(classes=classes, blocks=250, growth_rate=24, bottleneck=True,\n model_name=\"densenet250_k24_bc_cifar100\", **kwargs)\n\n\ndef _test():\n import numpy as np\n import mxnet as mx\n\n pretrained = False\n\n models = [\n (densenet40_k12_cifar10, 10),\n (densenet40_k12_cifar100, 100),\n (densenet40_k12_bc_cifar10, 10),\n (densenet40_k12_bc_cifar100, 100),\n (densenet40_k24_bc_cifar10, 10),\n (densenet40_k24_bc_cifar100, 100),\n (densenet40_k36_bc_cifar10, 10),\n (densenet40_k36_bc_cifar100, 100),\n (densenet100_k12_cifar10, 10),\n (densenet100_k12_cifar100, 100),\n (densenet100_k24_cifar10, 10),\n (densenet100_k24_cifar100, 100),\n (densenet100_k12_bc_cifar10, 10),\n (densenet100_k12_bc_cifar100, 100),\n (densenet190_k40_bc_cifar10, 10),\n (densenet190_k40_bc_cifar100, 100),\n (densenet250_k24_bc_cifar10, 10),\n (densenet250_k24_bc_cifar100, 100),\n ]\n\n for model, classes in models:\n\n net = model(pretrained=pretrained)\n\n ctx = mx.cpu()\n if not pretrained:\n net.initialize(ctx=ctx)\n\n # net.hybridize()\n net_params = net.collect_params()\n weight_count = 0\n for param in net_params.values():\n if (param.shape is None) or (not param._differentiable):\n continue\n weight_count += np.prod(param.shape)\n print(\"m={}, {}\".format(model.__name__, weight_count))\n assert (model != densenet40_k12_cifar10 or weight_count == 599050)\n assert (model != densenet40_k12_cifar100 or weight_count == 622360)\n assert (model != densenet40_k12_bc_cifar10 or weight_count == 176122)\n assert (model != densenet40_k12_bc_cifar100 or weight_count == 188092)\n assert (model != densenet40_k24_bc_cifar10 or weight_count == 690346)\n assert (model != densenet40_k24_bc_cifar100 or weight_count == 714196)\n assert (model != densenet40_k36_bc_cifar10 or weight_count == 1542682)\n assert (model != densenet40_k36_bc_cifar100 or weight_count == 1578412)\n assert (model != densenet100_k12_cifar10 or weight_count == 4068490)\n assert (model != densenet100_k12_cifar100 or weight_count == 4129600)\n assert (model != densenet100_k24_cifar10 or weight_count == 16114138)\n assert (model != densenet100_k24_cifar100 or weight_count == 16236268)\n assert (model != densenet100_k12_bc_cifar10 or weight_count == 769162)\n assert (model != densenet100_k12_bc_cifar100 or weight_count == 800032)\n assert (model != densenet190_k40_bc_cifar10 or weight_count == 25624430)\n assert (model != densenet190_k40_bc_cifar100 or weight_count == 25821620)\n assert (model != densenet250_k24_bc_cifar10 or weight_count == 15324406)\n assert (model != densenet250_k24_bc_cifar100 or weight_count == 15480556)\n\n x = mx.nd.zeros((1, 3, 32, 32), ctx=ctx)\n y = net(x)\n assert (y.shape == (1, classes))\n\n\nif __name__ == \"__main__\":\n _test()\n",
"\"\"\"\n ResNet for CIFAR, implemented in Gluon.\n Original paper: 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.\n\"\"\"\n\n__all__ = ['CIFARResNet', 'resnet20_cifar10', 'resnet20_cifar100', 'resnet56_cifar10', 'resnet56_cifar100',\n 'resnet110_cifar10', 'resnet110_cifar100', 'resnet164bn_cifar10', 'resnet164bn_cifar100',\n 'resnet1001_cifar10', 'resnet1001_cifar100', 'resnet1202_cifar10', 'resnet1202_cifar100']\n\nimport os\nfrom mxnet import cpu\nfrom mxnet.gluon import nn, HybridBlock\nfrom .common import conv3x3_block\nfrom .resnet import ResUnit\n\n\nclass CIFARResNet(HybridBlock):\n \"\"\"\n ResNet model for CIFAR from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.\n\n Parameters:\n ----------\n channels : list of list of int\n Number of output channels for each unit.\n init_block_channels : int\n Number of output channels for the initial unit.\n bottleneck : bool\n Whether to use a bottleneck or simple block in units.\n bn_use_global_stats : bool, default False\n Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.\n Useful for fine-tuning.\n in_channels : int, default 3\n Number of input channels.\n in_size : tuple of two ints, default (32, 32)\n Spatial size of the expected input image.\n classes : int, default 10\n Number of classification classes.\n \"\"\"\n def __init__(self,\n channels,\n init_block_channels,\n bottleneck,\n bn_use_global_stats=False,\n in_channels=3,\n in_size=(32, 32),\n classes=10,\n **kwargs):\n super(CIFARResNet, self).__init__(**kwargs)\n self.in_size = in_size\n self.classes = classes\n\n with self.name_scope():\n self.features = nn.HybridSequential(prefix='')\n self.features.add(conv3x3_block(\n in_channels=in_channels,\n out_channels=init_block_channels,\n bn_use_global_stats=bn_use_global_stats))\n in_channels = init_block_channels\n for i, channels_per_stage in enumerate(channels):\n stage = nn.HybridSequential(prefix=\"stage{}_\".format(i + 1))\n with stage.name_scope():\n for j, out_channels in enumerate(channels_per_stage):\n strides = 2 if (j == 0) and (i != 0) else 1\n stage.add(ResUnit(\n in_channels=in_channels,\n out_channels=out_channels,\n strides=strides,\n bn_use_global_stats=bn_use_global_stats,\n bottleneck=bottleneck,\n conv1_stride=False))\n in_channels = out_channels\n self.features.add(stage)\n self.features.add(nn.AvgPool2D(\n pool_size=8,\n strides=1))\n\n self.output = nn.HybridSequential(prefix='')\n self.output.add(nn.Flatten())\n self.output.add(nn.Dense(\n units=classes,\n in_units=in_channels))\n\n def hybrid_forward(self, F, x):\n x = self.features(x)\n x = self.output(x)\n return x\n\n\ndef get_resnet_cifar(classes,\n blocks,\n bottleneck,\n model_name=None,\n pretrained=False,\n ctx=cpu(),\n root=os.path.join('~', '.mxnet', 'models'),\n **kwargs):\n \"\"\"\n Create ResNet model for CIFAR with specific parameters.\n\n Parameters:\n ----------\n classes : int\n Number of classification classes.\n blocks : int\n Number of blocks.\n bottleneck : bool\n Whether to use a bottleneck or simple block in units.\n model_name : str or None, default None\n Model name for loading pretrained model.\n pretrained : bool, default False\n Whether to load the pretrained weights for model.\n ctx : Context, default CPU\n The context in which to load the pretrained weights.\n root : str, default '~/.mxnet/models'\n Location for keeping the model parameters.\n \"\"\"\n assert (classes in [10, 100])\n\n if bottleneck:\n assert ((blocks - 2) % 9 == 0)\n layers = [(blocks - 2) // 9] * 3\n else:\n assert ((blocks - 2) % 6 == 0)\n layers = [(blocks - 2) // 6] * 3\n\n channels_per_layers = [16, 32, 64]\n init_block_channels = 16\n\n channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]\n\n if bottleneck:\n channels = [[cij * 4 for cij in ci] for ci in channels]\n\n net = CIFARResNet(\n channels=channels,\n init_block_channels=init_block_channels,\n bottleneck=bottleneck,\n classes=classes,\n **kwargs)\n\n if pretrained:\n if (model_name is None) or (not model_name):\n raise ValueError(\"Parameter `model_name` should be properly initialized for loading pretrained model.\")\n from .model_store import get_model_file\n net.load_parameters(\n filename=get_model_file(\n model_name=model_name,\n local_model_store_dir_path=root),\n ctx=ctx)\n\n return net\n\n\ndef resnet20_cifar10(classes=10, **kwargs):\n \"\"\"\n ResNet-20 model for CIFAR-10 from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.\n\n Parameters:\n ----------\n classes : int, default 10\n Number of classification classes.\n pretrained : bool, default False\n Whether to load the pretrained weights for model.\n ctx : Context, default CPU\n The context in which to load the pretrained weights.\n root : str, default '~/.mxnet/models'\n Location for keeping the model parameters.\n \"\"\"\n return get_resnet_cifar(classes=classes, blocks=20, bottleneck=False, model_name=\"resnet20_cifar10\", **kwargs)\n\n\ndef resnet20_cifar100(classes=100, **kwargs):\n \"\"\"\n ResNet-20 model for CIFAR-100 from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.\n\n Parameters:\n ----------\n classes : int, default 100\n Number of classification classes.\n pretrained : bool, default False\n Whether to load the pretrained weights for model.\n ctx : Context, default CPU\n The context in which to load the pretrained weights.\n root : str, default '~/.mxnet/models'\n Location for keeping the model parameters.\n \"\"\"\n return get_resnet_cifar(classes=classes, blocks=20, bottleneck=False, model_name=\"resnet20_cifar100\", **kwargs)\n\n\ndef resnet56_cifar10(classes=10, **kwargs):\n \"\"\"\n ResNet-56 model for CIFAR-10 from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.\n\n Parameters:\n ----------\n classes : int, default 10\n Number of classification classes.\n pretrained : bool, default False\n Whether to load the pretrained weights for model.\n ctx : Context, default CPU\n The context in which to load the pretrained weights.\n root : str, default '~/.mxnet/models'\n Location for keeping the model parameters.\n \"\"\"\n return get_resnet_cifar(classes=classes, blocks=56, bottleneck=False, model_name=\"resnet56_cifar10\", **kwargs)\n\n\ndef resnet56_cifar100(classes=100, **kwargs):\n \"\"\"\n ResNet-56 model for CIFAR-100 from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.\n\n Parameters:\n ----------\n classes : int, default 100\n Number of classification classes.\n pretrained : bool, default False\n Whether to load the pretrained weights for model.\n ctx : Context, default CPU\n The context in which to load the pretrained weights.\n root : str, default '~/.mxnet/models'\n Location for keeping the model parameters.\n \"\"\"\n return get_resnet_cifar(classes=classes, blocks=56, bottleneck=False, model_name=\"resnet56_cifar100\", **kwargs)\n\n\ndef resnet110_cifar10(classes=10, **kwargs):\n \"\"\"\n ResNet-110 model for CIFAR-10 from 'Deep Residual Learning for Image Recognition,' https://arxiv.org/abs/1512.03385.\n\n Parameters:\n ----------\n classes : int, default 10\n Number of classification classes.\n pretrained : bool, default False\n Whether to load the pretrained weights for model.\n ctx : Context, default CPU\n The context in which to load the pretrained weights.\n root : str, default '~/.mxnet/models'\n Location for keeping the model parameters.\n \"\"\"\n return get_resnet_cifar(classes=classes, blocks=110, bottleneck=False, model_name=\"resnet110_cifar10\", **kwargs)\n\n\ndef resnet110_cifar100(classes=100, **kwargs):\n \"\"\"\n ResNet-110 model for CIFAR-100 from 'Deep Residual Learning for Image Recognition,'\n https://arxiv.org/abs/1512.03385.\n\n Parameters:\n ----------\n classes : int, default 100\n Number of classification classes.\n pretrained : bool, default False\n Whether to load the pretrained weights for model.\n ctx : Context, default CPU\n The context in which to load the pretrained weights.\n root : str, default '~/.mxnet/models'\n Location for keeping the model parameters.\n \"\"\"\n return get_resnet_cifar(classes=classes, blocks=110, bottleneck=False, model_name=\"resnet110_cifar100\", **kwargs)\n\n\ndef resnet164bn_cifar10(classes=10, **kwargs):\n \"\"\"\n ResNet-164(BN) model for CIFAR-10 from 'Deep Residual Learning for Image Recognition,'\n https://arxiv.org/abs/1512.03385.\n\n Parameters:\n ----------\n classes : int, default 10\n Number of classification classes.\n pretrained : bool, default False\n Whether to load the pretrained weights for model.\n ctx : Context, default CPU\n The context in which to load the pretrained weights.\n root : str, default '~/.mxnet/models'\n Location for keeping the model parameters.\n \"\"\"\n return get_resnet_cifar(classes=classes, blocks=164, bottleneck=True, model_name=\"resnet164bn_cifar10\", **kwargs)\n\n\ndef resnet164bn_cifar100(classes=100, **kwargs):\n \"\"\"\n ResNet-164(BN) model for CIFAR-100 from 'Deep Residual Learning for Image Recognition,'\n https://arxiv.org/abs/1512.03385.\n\n Parameters:\n ----------\n classes : int, default 100\n Number of classification classes.\n pretrained : bool, default False\n Whether to load the pretrained weights for model.\n ctx : Context, default CPU\n The context in which to load the pretrained weights.\n root : str, default '~/.mxnet/models'\n Location for keeping the model parameters.\n \"\"\"\n return get_resnet_cifar(classes=classes, blocks=164, bottleneck=True, model_name=\"resnet164bn_cifar100\", **kwargs)\n\n\ndef resnet1001_cifar10(classes=10, **kwargs):\n \"\"\"\n ResNet-1001 model for CIFAR-10 from 'Deep Residual Learning for Image Recognition,'\n https://arxiv.org/abs/1512.03385.\n\n Parameters:\n ----------\n classes : int, default 10\n Number of classification classes.\n pretrained : bool, default False\n Whether to load the pretrained weights for model.\n ctx : Context, default CPU\n The context in which to load the pretrained weights.\n root : str, default '~/.mxnet/models'\n Location for keeping the model parameters.\n \"\"\"\n return get_resnet_cifar(classes=classes, blocks=1001, bottleneck=True, model_name=\"resnet1001_cifar10\", **kwargs)\n\n\ndef resnet1001_cifar100(classes=100, **kwargs):\n \"\"\"\n ResNet-1001 model for CIFAR-100 from 'Deep Residual Learning for Image Recognition,'\n https://arxiv.org/abs/1512.03385.\n\n Parameters:\n ----------\n classes : int, default 100\n Number of classification classes.\n pretrained : bool, default False\n Whether to load the pretrained weights for model.\n ctx : Context, default CPU\n The context in which to load the pretrained weights.\n root : str, default '~/.mxnet/models'\n Location for keeping the model parameters.\n \"\"\"\n return get_resnet_cifar(classes=classes, blocks=1001, bottleneck=True, model_name=\"resnet1001_cifar100\", **kwargs)\n\n\ndef resnet1202_cifar10(classes=10, **kwargs):\n \"\"\"\n ResNet-1202 model for CIFAR-10 from 'Deep Residual Learning for Image Recognition,'\n https://arxiv.org/abs/1512.03385.\n\n Parameters:\n ----------\n classes : int, default 10\n Number of classification classes.\n pretrained : bool, default False\n Whether to load the pretrained weights for model.\n ctx : Context, default CPU\n The context in which to load the pretrained weights.\n root : str, default '~/.mxnet/models'\n Location for keeping the model parameters.\n \"\"\"\n return get_resnet_cifar(classes=classes, blocks=1202, bottleneck=False, model_name=\"resnet1202_cifar10\", **kwargs)\n\n\ndef resnet1202_cifar100(classes=100, **kwargs):\n \"\"\"\n ResNet-1202 model for CIFAR-100 from 'Deep Residual Learning for Image Recognition,'\n https://arxiv.org/abs/1512.03385.\n\n Parameters:\n ----------\n classes : int, default 100\n Number of classification classes.\n pretrained : bool, default False\n Whether to load the pretrained weights for model.\n ctx : Context, default CPU\n The context in which to load the pretrained weights.\n root : str, default '~/.mxnet/models'\n Location for keeping the model parameters.\n \"\"\"\n return get_resnet_cifar(classes=classes, blocks=1202, bottleneck=False, model_name=\"resnet1202_cifar100\", **kwargs)\n\n\ndef _test():\n import numpy as np\n import mxnet as mx\n\n pretrained = False\n\n models = [\n (resnet20_cifar10, 10),\n (resnet20_cifar100, 100),\n (resnet56_cifar10, 10),\n (resnet56_cifar100, 100),\n (resnet110_cifar10, 10),\n (resnet110_cifar100, 100),\n (resnet164bn_cifar10, 10),\n (resnet164bn_cifar100, 100),\n (resnet1001_cifar10, 10),\n (resnet1001_cifar100, 100),\n (resnet1202_cifar10, 10),\n (resnet1202_cifar100, 100),\n ]\n\n for model, classes in models:\n\n net = model(pretrained=pretrained)\n\n ctx = mx.cpu()\n if not pretrained:\n net.initialize(ctx=ctx)\n\n # net.hybridize()\n net_params = net.collect_params()\n weight_count = 0\n for param in net_params.values():\n if (param.shape is None) or (not param._differentiable):\n continue\n weight_count += np.prod(param.shape)\n print(\"m={}, {}\".format(model.__name__, weight_count))\n assert (model != resnet20_cifar10 or weight_count == 272474)\n assert (model != resnet20_cifar100 or weight_count == 278324)\n assert (model != resnet56_cifar10 or weight_count == 855770)\n assert (model != resnet56_cifar100 or weight_count == 861620)\n assert (model != resnet110_cifar10 or weight_count == 1730714)\n assert (model != resnet110_cifar100 or weight_count == 1736564)\n assert (model != resnet164bn_cifar10 or weight_count == 1704154)\n assert (model != resnet164bn_cifar100 or weight_count == 1727284)\n assert (model != resnet1001_cifar10 or weight_count == 10328602)\n assert (model != resnet1001_cifar100 or weight_count == 10351732)\n assert (model != resnet1202_cifar10 or weight_count == 19424026)\n assert (model != resnet1202_cifar100 or weight_count == 19429876)\n\n x = mx.nd.zeros((1, 3, 32, 32), ctx=ctx)\n y = net(x)\n assert (y.shape == (1, classes))\n\n\nif __name__ == \"__main__\":\n _test()\n",
"import argparse\nimport numpy as np\n\nimport chainer\nfrom chainer import cuda\nfrom chainer import training\nfrom chainer.training import extensions\nfrom chainer.serializers import save_npz\n\nfrom common.logger_utils import initialize_logging\nfrom chainer_.utils import prepare_model\nfrom chainer_.cifar import add_dataset_parser_arguments\nfrom chainer_.cifar import get_data_iterators\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(\n description='Train a model for image classification (Chainer/CIFAR)',\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n\n parser.add_argument(\n '--dataset',\n type=str,\n default=\"CIFAR10\",\n help='dataset name. options are CIFAR10 and CIFAR100')\n\n args, _ = parser.parse_known_args()\n add_dataset_parser_arguments(parser, args.dataset)\n\n parser.add_argument(\n '--model',\n type=str,\n required=True,\n help='type of model to use. see model_provider for options.')\n parser.add_argument(\n '--use-pretrained',\n action='store_true',\n help='enable using pretrained model from gluon.')\n parser.add_argument(\n '--resume',\n type=str,\n default='',\n help='resume from previously saved parameters if not None')\n parser.add_argument(\n '--resume-state',\n type=str,\n default='',\n help='resume from previously saved optimizer state if not None')\n\n parser.add_argument(\n '--num-gpus',\n type=int,\n default=0,\n help='number of gpus to use.')\n parser.add_argument(\n '-j',\n '--num-data-workers',\n dest='num_workers',\n default=4,\n type=int,\n help='number of preprocessing workers')\n\n parser.add_argument(\n '--batch-size',\n type=int,\n default=512,\n help='training batch size per device (CPU/GPU).')\n parser.add_argument(\n '--num-epochs',\n type=int,\n default=120,\n help='number of training epochs.')\n parser.add_argument(\n '--start-epoch',\n type=int,\n default=1,\n help='starting epoch for resuming, default is 1 for new training')\n parser.add_argument(\n '--attempt',\n type=int,\n default=1,\n help='current number of training')\n\n parser.add_argument(\n '--optimizer-name',\n type=str,\n default='nag',\n help='optimizer name')\n parser.add_argument(\n '--lr',\n type=float,\n default=0.1,\n help='learning rate. default is 0.1')\n parser.add_argument(\n '--lr-mode',\n type=str,\n default='cosine',\n help='learning rate scheduler mode. options are step, poly and cosine')\n parser.add_argument(\n '--lr-decay',\n type=float,\n default=0.1,\n help='decay rate of learning rate. default is 0.1')\n parser.add_argument(\n '--lr-decay-period',\n type=int,\n default=0,\n help='interval for periodic learning rate decays. default is 0 to disable.')\n parser.add_argument(\n '--lr-decay-epoch',\n type=str,\n default='40,60',\n help='epoches at which learning rate decays. default is 40,60.')\n parser.add_argument(\n '--target-lr',\n type=float,\n default=1e-8,\n help='ending learning rate; default is 1e-8')\n parser.add_argument(\n '--momentum',\n type=float,\n default=0.9,\n help='momentum value for optimizer; default is 0.9')\n parser.add_argument(\n '--wd',\n type=float,\n default=0.0001,\n help='weight decay rate. default is 0.0001.')\n\n parser.add_argument(\n '--log-interval',\n type=int,\n default=50,\n help='number of batches to wait before logging.')\n parser.add_argument(\n '--save-interval',\n type=int,\n default=4,\n help='saving parameters epoch interval, best model will always be saved')\n parser.add_argument(\n '--save-dir',\n type=str,\n default='',\n help='directory of saved models and log-files')\n parser.add_argument(\n '--logging-file-name',\n type=str,\n default='train.log',\n help='filename of training log')\n\n parser.add_argument(\n '--seed',\n type=int,\n default=-1,\n help='Random seed to be fixed')\n parser.add_argument(\n '--log-packages',\n type=str,\n default='mxnet',\n help='list of python packages for logging')\n parser.add_argument(\n '--log-pip-packages',\n type=str,\n default='mxnet-cu92, gluoncv',\n help='list of pip packages for logging')\n args = parser.parse_args()\n return args\n\n\ndef init_rand(seed):\n if seed <= 0:\n seed = np.random.randint(10000)\n return seed\n\n\ndef prepare_trainer(net,\n optimizer_name,\n lr,\n momentum,\n num_epochs,\n train_iter,\n val_iter,\n logging_dir_path,\n num_gpus=0):\n if optimizer_name == \"sgd\":\n optimizer = chainer.optimizers.MomentumSGD(lr=lr, momentum=momentum)\n elif optimizer_name == \"nag\":\n optimizer = chainer.optimizers.NesterovAG(lr=lr, momentum=momentum)\n else:\n raise Exception('Unsupported optimizer: {}'.format(optimizer_name))\n optimizer.setup(net)\n\n # devices = tuple(range(num_gpus)) if num_gpus > 0 else (-1, )\n devices = (0,) if num_gpus > 0 else (-1,)\n\n updater = training.updaters.StandardUpdater(\n iterator=train_iter,\n optimizer=optimizer,\n device=devices[0])\n trainer = training.Trainer(\n updater=updater,\n stop_trigger=(num_epochs, 'epoch'),\n out=logging_dir_path)\n\n val_interval = 100000, 'iteration'\n log_interval = 1000, 'iteration'\n\n trainer.extend(\n extension=extensions.Evaluator(\n val_iter,\n net,\n device=devices[0]),\n trigger=val_interval)\n trainer.extend(extensions.dump_graph('main/loss'))\n trainer.extend(extensions.snapshot(), trigger=val_interval)\n trainer.extend(\n extensions.snapshot_object(\n net,\n 'model_iter_{.updater.iteration}'),\n trigger=val_interval)\n trainer.extend(extensions.LogReport(trigger=log_interval))\n trainer.extend(extensions.observe_lr(), trigger=log_interval)\n trainer.extend(\n extensions.PrintReport([\n 'epoch', 'iteration', 'main/loss', 'validation/main/loss', 'main/accuracy', 'validation/main/accuracy',\n 'lr']),\n trigger=log_interval)\n trainer.extend(extensions.ProgressBar(update_interval=10))\n\n return trainer\n\n\ndef save_params(file_stem,\n net,\n trainer):\n save_npz(\n file=file_stem + '.npz',\n obj=net)\n save_npz(\n file=file_stem + '.states',\n obj=trainer)\n\n\ndef main():\n args = parse_args()\n args.seed = init_rand(seed=args.seed)\n\n _, log_file_exist = initialize_logging(\n logging_dir_path=args.save_dir,\n logging_file_name=args.logging_file_name,\n script_args=args,\n log_packages=args.log_packages,\n log_pip_packages=args.log_pip_packages)\n\n num_gpus = args.num_gpus\n if num_gpus > 0:\n cuda.get_device(0).use()\n batch_size = args.batch_size\n\n net = prepare_model(\n model_name=args.model,\n use_pretrained=args.use_pretrained,\n pretrained_model_file_path=args.resume.strip(),\n num_gpus=num_gpus)\n\n train_iter, val_iter = get_data_iterators(\n batch_size=batch_size,\n num_workers=args.num_workers)\n\n trainer = prepare_trainer(\n net=net,\n optimizer_name=args.optimizer_name,\n lr=args.lr,\n momentum=args.momentum,\n num_epochs=args.num_epochs,\n train_iter=train_iter,\n val_iter=val_iter,\n logging_dir_path=args.save_dir,\n num_gpus=num_gpus)\n\n # if args.save_dir and args.save_interval:\n # lp_saver = TrainLogParamSaver(\n # checkpoint_file_name_prefix='imagenet_{}'.format(args.model),\n # last_checkpoint_file_name_suffix=\"last\",\n # best_checkpoint_file_name_suffix=None,\n # last_checkpoint_dir_path=args.save_dir,\n # best_checkpoint_dir_path=None,\n # last_checkpoint_file_count=2,\n # best_checkpoint_file_count=2,\n # checkpoint_file_save_callback=save_params,\n # checkpoint_file_exts=['.npz', '.states'],\n # save_interval=args.save_interval,\n # num_epochs=args.num_epochs,\n # param_names=['Val.Top1', 'Train.Top1', 'Val.Top5', 'Train.Loss', 'LR'],\n # acc_ind=2,\n # # bigger=[True],\n # # mask=None,\n # score_log_file_path=os.path.join(args.save_dir, 'score.log'),\n # score_log_attempt_value=args.attempt,\n # best_map_log_file_path=os.path.join(args.save_dir, 'best_map.log'))\n # else:\n # lp_saver = None\n\n trainer.run()\n\n\nif __name__ == '__main__':\n main()\n",
"\"\"\"\n ShaResNet, implemented in PyTorch.\n Original paper: 'ShaResNet: reducing residual network parameter number by sharing weights,'\n https://arxiv.org/abs/1702.08782.\n\"\"\"\n\n__all__ = ['ShaResNet', 'sharesnet18', 'sharesnet34', 'sharesnet50', 'sharesnet50b', 'sharesnet101', 'sharesnet101b',\n 'sharesnet152', 'sharesnet152b']\n\nimport os\nfrom inspect import isfunction\nimport torch.nn as nn\nimport torch.nn.init as init\nfrom .common import conv1x1_block, conv3x3_block\nfrom .resnet import ResInitBlock\n\n\nclass ShaConvBlock(nn.Module):\n \"\"\"\n Shared convolution block with Batch normalization and ReLU/ReLU6 activation.\n\n Parameters:\n ----------\n in_channels : int\n Number of input channels.\n out_channels : int\n Number of output channels.\n kernel_size : int or tuple/list of 2 int\n Convolution window size.\n stride : int or tuple/list of 2 int\n Strides of the convolution.\n padding : int or tuple/list of 2 int\n Padding value for convolution layer.\n dilation : int or tuple/list of 2 int, default 1\n Dilation value for convolution layer.\n groups : int, default 1\n Number of groups.\n bias : bool, default False\n Whether the layer uses a bias vector.\n activation : function or str or None, default nn.ReLU(inplace=True)\n Activation function or name of activation function.\n activate : bool, default True\n Whether activate the convolution block.\n shared_conv : Module, default None\n Shared convolution layer.\n \"\"\"\n def __init__(self,\n in_channels,\n out_channels,\n kernel_size,\n stride,\n padding,\n dilation=1,\n groups=1,\n bias=False,\n activation=(lambda: nn.ReLU(inplace=True)),\n activate=True,\n shared_conv=None):\n super(ShaConvBlock, self).__init__()\n self.activate = activate\n\n if shared_conv is None:\n self.conv = nn.Conv2d(\n in_channels=in_channels,\n out_channels=out_channels,\n kernel_size=kernel_size,\n stride=stride,\n padding=padding,\n dilation=dilation,\n groups=groups,\n bias=bias)\n else:\n self.conv = shared_conv\n self.bn = nn.BatchNorm2d(num_features=out_channels)\n if self.activate:\n assert (activation is not None)\n if isfunction(activation):\n self.activ = activation()\n elif isinstance(activation, str):\n if activation == \"relu\":\n self.activ = nn.ReLU(inplace=True)\n elif activation == \"relu6\":\n self.activ = nn.ReLU6(inplace=True)\n else:\n raise NotImplementedError()\n else:\n self.activ = activation\n\n def forward(self, x):\n x = self.conv(x)\n x = self.bn(x)\n if self.activate:\n x = self.activ(x)\n return x\n\n\ndef sha_conv3x3_block(in_channels,\n out_channels,\n stride=1,\n padding=1,\n dilation=1,\n groups=1,\n bias=False,\n activation=(lambda: nn.ReLU(inplace=True)),\n activate=True,\n shared_conv=None):\n \"\"\"\n 3x3 version of the shared convolution block.\n\n Parameters:\n ----------\n in_channels : int\n Number of input channels.\n out_channels : int\n Number of output channels.\n stride : int or tuple/list of 2 int, default 1\n Strides of the convolution.\n padding : int or tuple/list of 2 int, default 1\n Padding value for convolution layer.\n dilation : int or tuple/list of 2 int, default 1\n Dilation value for convolution layer.\n groups : int, default 1\n Number of groups.\n bias : bool, default False\n Whether the layer uses a bias vector.\n activation : function or str or None, default nn.ReLU(inplace=True)\n Activation function or name of activation function.\n activate : bool, default True\n Whether activate the convolution block.\n shared_conv : Module, default None\n Shared convolution layer.\n \"\"\"\n return ShaConvBlock(\n in_channels=in_channels,\n out_channels=out_channels,\n kernel_size=3,\n stride=stride,\n padding=padding,\n dilation=dilation,\n groups=groups,\n bias=bias,\n activation=activation,\n activate=activate,\n shared_conv=shared_conv)\n\n\nclass ShaResBlock(nn.Module):\n \"\"\"\n Simple ShaResNet block for residual path in ShaResNet unit.\n\n Parameters:\n ----------\n in_channels : int\n Number of input channels.\n out_channels : int\n Number of output channels.\n stride : int or tuple/list of 2 int\n Strides of the convolution.\n shared_conv : Module, default None\n Shared convolution layer.\n \"\"\"\n def __init__(self,\n in_channels,\n out_channels,\n stride,\n shared_conv=None):\n super(ShaResBlock, self).__init__()\n self.conv1 = conv3x3_block(\n in_channels=in_channels,\n out_channels=out_channels,\n stride=stride)\n self.conv2 = sha_conv3x3_block(\n in_channels=out_channels,\n out_channels=out_channels,\n activation=None,\n activate=False,\n shared_conv=shared_conv)\n\n def forward(self, x):\n x = self.conv1(x)\n x = self.conv2(x)\n return x\n\n\nclass ShaResBottleneck(nn.Module):\n \"\"\"\n ShaResNet bottleneck block for residual path in ShaResNet unit.\n\n Parameters:\n ----------\n in_channels : int\n Number of input channels.\n out_channels : int\n Number of output channels.\n stride : int or tuple/list of 2 int\n Strides of the convolution.\n bottleneck_factor : int, default 4\n Bottleneck factor.\n conv1_stride : bool, default False\n Whether to use stride in the first or the second convolution layer of the block.\n shared_conv : Module, default None\n Shared convolution layer.\n \"\"\"\n def __init__(self,\n in_channels,\n out_channels,\n stride,\n conv1_stride=False,\n bottleneck_factor=4,\n shared_conv=None):\n super(ShaResBottleneck, self).__init__()\n assert (conv1_stride or not ((stride > 1) and (shared_conv is not None)))\n mid_channels = out_channels // bottleneck_factor\n\n self.conv1 = conv1x1_block(\n in_channels=in_channels,\n out_channels=mid_channels,\n stride=(stride if conv1_stride else 1))\n self.conv2 = sha_conv3x3_block(\n in_channels=mid_channels,\n out_channels=mid_channels,\n stride=(1 if conv1_stride else stride),\n shared_conv=shared_conv)\n self.conv3 = conv1x1_block(\n in_channels=mid_channels,\n out_channels=out_channels,\n activation=None,\n activate=False)\n\n def forward(self, x):\n x = self.conv1(x)\n x = self.conv2(x)\n x = self.conv3(x)\n return x\n\n\nclass ShaResUnit(nn.Module):\n \"\"\"\n ShaResNet unit.\n\n Parameters:\n ----------\n in_channels : int\n Number of input channels.\n out_channels : int\n Number of output channels.\n stride : int or tuple/list of 2 int\n Strides of the convolution.\n bottleneck : bool\n Whether to use a bottleneck or simple block in units.\n conv1_stride : bool\n Whether to use stride in the first or the second convolution layer of the block.\n shared_conv : Module, default None\n Shared convolution layer.\n \"\"\"\n def __init__(self,\n in_channels,\n out_channels,\n stride,\n bottleneck,\n conv1_stride,\n shared_conv=None):\n super(ShaResUnit, self).__init__()\n self.resize_identity = (in_channels != out_channels) or (stride != 1)\n\n if bottleneck:\n self.body = ShaResBottleneck(\n in_channels=in_channels,\n out_channels=out_channels,\n stride=stride,\n conv1_stride=conv1_stride,\n shared_conv=shared_conv)\n else:\n self.body = ShaResBlock(\n in_channels=in_channels,\n out_channels=out_channels,\n stride=stride,\n shared_conv=shared_conv)\n if self.resize_identity:\n self.identity_conv = conv1x1_block(\n in_channels=in_channels,\n out_channels=out_channels,\n stride=stride,\n activate=False)\n self.activ = nn.ReLU(inplace=True)\n\n def forward(self, x):\n if self.resize_identity:\n identity = self.identity_conv(x)\n else:\n identity = x\n x = self.body(x)\n x = x + identity\n x = self.activ(x)\n return x\n\n\nclass ShaResNet(nn.Module):\n \"\"\"\n ShaResNet model from 'ShaResNet: reducing residual network parameter number by sharing weights,'\n https://arxiv.org/abs/1702.08782.\n\n Parameters:\n ----------\n channels : list of list of int\n Number of output channels for each unit.\n init_block_channels : int\n Number of output channels for the initial unit.\n bottleneck : bool\n Whether to use a bottleneck or simple block in units.\n conv1_stride : bool\n Whether to use stride in the first or the second convolution layer in units.\n in_channels : int, default 3\n Number of input channels.\n in_size : tuple of two ints, default (224, 224)\n Spatial size of the expected input image.\n num_classes : int, default 1000\n Number of classification classes.\n \"\"\"\n def __init__(self,\n channels,\n init_block_channels,\n bottleneck,\n conv1_stride,\n in_channels=3,\n in_size=(224, 224),\n num_classes=1000):\n super(ShaResNet, self).__init__()\n self.in_size = in_size\n self.num_classes = num_classes\n\n self.features = nn.Sequential()\n self.features.add_module(\"init_block\", ResInitBlock(\n in_channels=in_channels,\n out_channels=init_block_channels))\n in_channels = init_block_channels\n for i, channels_per_stage in enumerate(channels):\n stage = nn.Sequential()\n shared_conv = None\n for j, out_channels in enumerate(channels_per_stage):\n stride = 2 if (j == 0) and (i != 0) else 1\n unit = ShaResUnit(\n in_channels=in_channels,\n out_channels=out_channels,\n stride=stride,\n bottleneck=bottleneck,\n conv1_stride=conv1_stride,\n shared_conv=shared_conv)\n if (shared_conv is None) and not (bottleneck and not conv1_stride and stride > 1):\n shared_conv = unit.body.conv2.conv\n stage.add_module(\"unit{}\".format(j + 1), unit)\n in_channels = out_channels\n self.features.add_module(\"stage{}\".format(i + 1), stage)\n self.features.add_module(\"final_pool\", nn.AvgPool2d(\n kernel_size=7,\n stride=1))\n\n self.output = nn.Linear(\n in_features=in_channels,\n out_features=num_classes)\n\n self._init_params()\n\n def _init_params(self):\n for name, module in self.named_modules():\n if isinstance(module, nn.Conv2d):\n init.kaiming_uniform_(module.weight)\n if module.bias is not None:\n init.constant_(module.bias, 0)\n\n def forward(self, x):\n x = self.features(x)\n x = x.view(x.size(0), -1)\n x = self.output(x)\n return x\n\n\ndef get_sharesnet(blocks,\n conv1_stride=True,\n model_name=None,\n pretrained=False,\n root=os.path.join('~', '.torch', 'models'),\n **kwargs):\n \"\"\"\n Create ShaResNet model with specific parameters.\n\n Parameters:\n ----------\n blocks : int\n Number of blocks.\n conv1_stride : bool, default True\n Whether to use stride in the first or the second convolution layer in units.\n model_name : str or None, default None\n Model name for loading pretrained model.\n pretrained : bool, default False\n Whether to load the pretrained weights for model.\n root : str, default '~/.torch/models'\n Location for keeping the model parameters.\n \"\"\"\n\n if blocks == 18:\n layers = [2, 2, 2, 2]\n elif blocks == 34:\n layers = [3, 4, 6, 3]\n elif blocks == 50:\n layers = [3, 4, 6, 3]\n elif blocks == 101:\n layers = [3, 4, 23, 3]\n elif blocks == 152:\n layers = [3, 8, 36, 3]\n elif blocks == 200:\n layers = [3, 24, 36, 3]\n else:\n raise ValueError(\"Unsupported ShaResNet with number of blocks: {}\".format(blocks))\n\n init_block_channels = 64\n\n if blocks < 50:\n channels_per_layers = [64, 128, 256, 512]\n bottleneck = False\n else:\n channels_per_layers = [256, 512, 1024, 2048]\n bottleneck = True\n\n channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]\n\n net = ShaResNet(\n channels=channels,\n init_block_channels=init_block_channels,\n bottleneck=bottleneck,\n conv1_stride=conv1_stride,\n **kwargs)\n\n if pretrained:\n if (model_name is None) or (not model_name):\n raise ValueError(\"Parameter `model_name` should be properly initialized for loading pretrained model.\")\n from .model_store import download_model\n download_model(\n net=net,\n model_name=model_name,\n local_model_store_dir_path=root)\n\n return net\n\n\ndef sharesnet18(**kwargs):\n \"\"\"\n ShaResNet-18 model from 'ShaResNet: reducing residual network parameter number by sharing weights,'\n https://arxiv.org/abs/1702.08782.\n\n Parameters:\n ----------\n pretrained : bool, default False\n Whether to load the pretrained weights for model.\n root : str, default '~/.torch/models'\n Location for keeping the model parameters.\n \"\"\"\n return get_sharesnet(blocks=18, model_name=\"sharesnet18\", **kwargs)\n\n\ndef sharesnet34(**kwargs):\n \"\"\"\n ShaResNet-34 model from 'ShaResNet: reducing residual network parameter number by sharing weights,'\n https://arxiv.org/abs/1702.08782.\n\n Parameters:\n ----------\n pretrained : bool, default False\n Whether to load the pretrained weights for model.\n root : str, default '~/.torch/models'\n Location for keeping the model parameters.\n \"\"\"\n return get_sharesnet(blocks=34, model_name=\"sharesnet34\", **kwargs)\n\n\ndef sharesnet50(**kwargs):\n \"\"\"\n ShaResNet-50 model from 'ShaResNet: reducing residual network parameter number by sharing weights,'\n https://arxiv.org/abs/1702.08782.\n\n Parameters:\n ----------\n pretrained : bool, default False\n Whether to load the pretrained weights for model.\n root : str, default '~/.torch/models'\n Location for keeping the model parameters.\n \"\"\"\n return get_sharesnet(blocks=50, model_name=\"sharesnet50\", **kwargs)\n\n\ndef sharesnet50b(**kwargs):\n \"\"\"\n ShaResNet-50b model with stride at the second convolution in bottleneck block from 'ShaResNet: reducing residual\n network parameter number by sharing weights,' https://arxiv.org/abs/1702.08782.\n\n Parameters:\n ----------\n pretrained : bool, default False\n Whether to load the pretrained weights for model.\n root : str, default '~/.torch/models'\n Location for keeping the model parameters.\n \"\"\"\n return get_sharesnet(blocks=50, conv1_stride=False, model_name=\"sharesnet50b\", **kwargs)\n\n\ndef sharesnet101(**kwargs):\n \"\"\"\n ShaResNet-101 model from 'ShaResNet: reducing residual network parameter number by sharing weights,'\n https://arxiv.org/abs/1702.08782.\n\n Parameters:\n ----------\n pretrained : bool, default False\n Whether to load the pretrained weights for model.\n root : str, default '~/.torch/models'\n Location for keeping the model parameters.\n \"\"\"\n return get_sharesnet(blocks=101, model_name=\"sharesnet101\", **kwargs)\n\n\ndef sharesnet101b(**kwargs):\n \"\"\"\n ShaResNet-101b model with stride at the second convolution in bottleneck block from 'ShaResNet: reducing residual\n network parameter number by sharing weights,' https://arxiv.org/abs/1702.08782.\n\n Parameters:\n ----------\n pretrained : bool, default False\n Whether to load the pretrained weights for model.\n root : str, default '~/.torch/models'\n Location for keeping the model parameters.\n \"\"\"\n return get_sharesnet(blocks=101, conv1_stride=False, model_name=\"sharesnet101b\", **kwargs)\n\n\ndef sharesnet152(**kwargs):\n \"\"\"\n ShaResNet-152 model from 'ShaResNet: reducing residual network parameter number by sharing weights,'\n https://arxiv.org/abs/1702.08782.\n\n Parameters:\n ----------\n pretrained : bool, default False\n Whether to load the pretrained weights for model.\n root : str, default '~/.torch/models'\n Location for keeping the model parameters.\n \"\"\"\n return get_sharesnet(blocks=152, model_name=\"sharesnet152\", **kwargs)\n\n\ndef sharesnet152b(**kwargs):\n \"\"\"\n ShaResNet-152b model with stride at the second convolution in bottleneck block from 'ShaResNet: reducing residual\n network parameter number by sharing weights,' https://arxiv.org/abs/1702.08782.\n\n Parameters:\n ----------\n pretrained : bool, default False\n Whether to load the pretrained weights for model.\n root : str, default '~/.torch/models'\n Location for keeping the model parameters.\n \"\"\"\n return get_sharesnet(blocks=152, conv1_stride=False, model_name=\"sharesnet152b\", **kwargs)\n\n\ndef _calc_width(net):\n import numpy as np\n net_params = filter(lambda p: p.requires_grad, net.parameters())\n weight_count = 0\n for param in net_params:\n weight_count += np.prod(param.size())\n return weight_count\n\n\ndef _test():\n import torch\n from torch.autograd import Variable\n\n pretrained = False\n\n models = [\n sharesnet18,\n sharesnet34,\n sharesnet50,\n sharesnet50b,\n sharesnet101,\n sharesnet101b,\n sharesnet152,\n sharesnet152b,\n ]\n\n for model in models:\n\n net = model(pretrained=pretrained)\n\n # net.train()\n net.eval()\n weight_count = _calc_width(net)\n print(\"m={}, {}\".format(model.__name__, weight_count))\n assert (model != sharesnet18 or weight_count == 8556072)\n assert (model != sharesnet34 or weight_count == 13613864)\n assert (model != sharesnet50 or weight_count == 17373224)\n assert (model != sharesnet50b or weight_count == 20469800)\n assert (model != sharesnet101 or weight_count == 26338344)\n assert (model != sharesnet101b or weight_count == 29434920)\n assert (model != sharesnet152 or weight_count == 33724456)\n assert (model != sharesnet152b or weight_count == 36821032)\n\n x = Variable(torch.randn(1, 3, 224, 224))\n y = net(x)\n assert (tuple(y.size()) == (1, 1000))\n\n\nif __name__ == \"__main__\":\n _test()\n",
"\"\"\"\n FishNet, implemented in Chainer.\n Original paper: 'FishNet: A Versatile Backbone for Image, Region, and Pixel Level Prediction,'\n http://papers.nips.cc/paper/7356-fishnet-a-versatile-backbone-for-image-region-and-pixel-level-prediction.pdf.\n\"\"\"\n\n__all__ = ['FishNet', 'fishnet99', 'fishnet150']\n\nimport os\nimport chainer.functions as F\nimport chainer.links as L\nfrom chainer import Chain\nfrom functools import partial\nfrom chainer.serializers import load_npz\nfrom .common import pre_conv1x1_block, pre_conv3x3_block, conv1x1, SesquialteralHourglass, SimpleSequential\nfrom .preresnet import PreResActivation\nfrom .senet import SEInitBlock\n\n\ndef channel_squeeze(x,\n groups):\n \"\"\"\n Channel squeeze operation.\n\n Parameters:\n ----------\n x : chainer.Variable or numpy.ndarray or cupy.ndarray\n Input variable.\n groups : int\n Number of groups.\n\n Returns\n -------\n chainer.Variable or numpy.ndarray or cupy.ndarray\n Resulted variable.\n \"\"\"\n batch, channels, height, width = x.shape\n channels_per_group = channels // groups\n x = F.reshape(x, shape=(batch, channels_per_group, groups, height, width))\n x = F.sum(x, axis=2)\n return x\n\n\nclass ChannelSqueeze(Chain):\n \"\"\"\n Channel squeeze layer. This is a wrapper over the same operation. It is designed to save the number of groups.\n\n Parameters:\n ----------\n channels : int\n Number of channels.\n groups : int\n Number of groups.\n \"\"\"\n def __init__(self,\n channels,\n groups):\n super(ChannelSqueeze, self).__init__()\n assert (channels % groups == 0)\n self.groups = groups\n\n def __call__(self, x):\n return channel_squeeze(x, self.groups)\n\n\nclass InterpolationBlock(Chain):\n \"\"\"\n Interpolation block.\n\n Parameters:\n ----------\n scale_factor : int\n Multiplier for spatial size.\n \"\"\"\n def __init__(self,\n scale_factor):\n super(InterpolationBlock, self).__init__()\n self.scale_factor = scale_factor\n\n def __call__(self, x):\n return F.unpooling_2d(\n x=x,\n ksize=self.scale_factor,\n cover_all=False)\n\n\nclass PreSEAttBlock(Chain):\n \"\"\"\n FishNet specific Squeeze-and-Excitation attention block.\n\n Parameters:\n ----------\n in_channels : int\n Number of input channels.\n out_channels : int\n Number of output channels.\n reduction : int, default 16\n Squeeze reduction value.\n \"\"\"\n\n def __init__(self,\n in_channels,\n out_channels,\n reduction=16):\n super(PreSEAttBlock, self).__init__()\n mid_cannels = out_channels // reduction\n\n with self.init_scope():\n self.bn = L.BatchNormalization(\n size=in_channels,\n eps=1e-5)\n self.conv1 = conv1x1(\n in_channels=in_channels,\n out_channels=mid_cannels,\n use_bias=True)\n self.conv2 = conv1x1(\n in_channels=mid_cannels,\n out_channels=out_channels,\n use_bias=True)\n\n def __call__(self, x):\n x = self.bn(x)\n x = F.relu(x)\n x = F.average_pooling_2d(x, ksize=x.shape[2:])\n x = self.conv1(x)\n x = F.relu(x)\n x = self.conv2(x)\n x = F.sigmoid(x)\n return x\n\n\nclass FishBottleneck(Chain):\n \"\"\"\n FishNet bottleneck block for residual unit.\n\n Parameters:\n ----------\n in_channels : int\n Number of input channels.\n out_channels : int\n Number of output channels.\n stride : int or tuple/list of 2 int\n Stride of the convolution.\n dilate : int or tuple/list of 2 int\n Dilation value for convolution layer.\n \"\"\"\n def __init__(self,\n in_channels,\n out_channels,\n stride,\n dilate):\n super(FishBottleneck, self).__init__()\n mid_channels = out_channels // 4\n\n with self.init_scope():\n self.conv1 = pre_conv1x1_block(\n in_channels=in_channels,\n out_channels=mid_channels)\n self.conv2 = pre_conv3x3_block(\n in_channels=mid_channels,\n out_channels=mid_channels,\n stride=stride,\n pad=dilate,\n dilate=dilate)\n self.conv3 = pre_conv1x1_block(\n in_channels=mid_channels,\n out_channels=out_channels)\n\n def __call__(self, x):\n x = self.conv1(x)\n x = self.conv2(x)\n x = self.conv3(x)\n return x\n\n\nclass FishBlock(Chain):\n \"\"\"\n FishNet block.\n\n Parameters:\n ----------\n in_channels : int\n Number of input channels.\n out_channels : int\n Number of output channels.\n stride : int or tuple/list of 2 int, default 1\n Stride of the convolution.\n dilate : int or tuple/list of 2 int, default 1\n Dilation value for convolution layer.\n squeeze : bool, default False\n Whether to use a channel squeeze operation.\n \"\"\"\n def __init__(self,\n in_channels,\n out_channels,\n stride=1,\n dilate=1,\n squeeze=False):\n super(FishBlock, self).__init__()\n self.squeeze = squeeze\n self.resize_identity = (in_channels != out_channels) or (stride != 1)\n\n with self.init_scope():\n self.body = FishBottleneck(\n in_channels=in_channels,\n out_channels=out_channels,\n stride=stride,\n dilate=dilate)\n if self.squeeze:\n assert (in_channels // 2 == out_channels)\n self.c_squeeze = ChannelSqueeze(\n channels=in_channels,\n groups=2)\n elif self.resize_identity:\n self.identity_conv = pre_conv1x1_block(\n in_channels=in_channels,\n out_channels=out_channels,\n stride=stride)\n\n def __call__(self, x):\n if self.squeeze:\n identity = self.c_squeeze(x)\n elif self.resize_identity:\n identity = self.identity_conv(x)\n else:\n identity = x\n x = self.body(x)\n x = x + identity\n return x\n\n\nclass DownUnit(Chain):\n \"\"\"\n FishNet down unit.\n\n Parameters:\n ----------\n in_channels : int\n Number of input channels.\n out_channels_list : list of int\n Number of output channels for each block.\n \"\"\"\n def __init__(self,\n in_channels,\n out_channels_list):\n super(DownUnit, self).__init__()\n with self.init_scope():\n self.blocks = SimpleSequential()\n with self.blocks.init_scope():\n for i, out_channels in enumerate(out_channels_list):\n setattr(self.blocks, \"block{}\".format(i + 1), FishBlock(\n in_channels=in_channels,\n out_channels=out_channels))\n in_channels = out_channels\n self.pool = partial(\n F.max_pooling_2d,\n ksize=2,\n stride=2,\n cover_all=False)\n\n def __call__(self, x):\n x = self.blocks(x)\n x = self.pool(x)\n return x\n\n\nclass UpUnit(Chain):\n \"\"\"\n FishNet up unit.\n\n Parameters:\n ----------\n in_channels : int\n Number of input channels.\n out_channels_list : list of int\n Number of output channels for each block.\n dilate : int or tuple/list of 2 int, default 1\n Dilation value for convolution layer.\n \"\"\"\n def __init__(self,\n in_channels,\n out_channels_list,\n dilate=1):\n super(UpUnit, self).__init__()\n with self.init_scope():\n self.blocks = SimpleSequential()\n with self.blocks.init_scope():\n for i, out_channels in enumerate(out_channels_list):\n squeeze = (dilate > 1) and (i == 0)\n setattr(self.blocks, \"block{}\".format(i + 1), FishBlock(\n in_channels=in_channels,\n out_channels=out_channels,\n dilate=dilate,\n squeeze=squeeze))\n in_channels = out_channels\n self.upsample = InterpolationBlock(scale_factor=2)\n\n def __call__(self, x):\n x = self.blocks(x)\n x = self.upsample(x)\n return x\n\n\nclass SkipUnit(Chain):\n \"\"\"\n FishNet skip connection unit.\n\n Parameters:\n ----------\n in_channels : int\n Number of input channels.\n out_channels_list : list of int\n Number of output channels for each block.\n \"\"\"\n def __init__(self,\n in_channels,\n out_channels_list):\n super(SkipUnit, self).__init__()\n with self.init_scope():\n self.blocks = SimpleSequential()\n with self.blocks.init_scope():\n for i, out_channels in enumerate(out_channels_list):\n setattr(self.blocks, \"block{}\".format(i + 1), FishBlock(\n in_channels=in_channels,\n out_channels=out_channels))\n in_channels = out_channels\n\n def __call__(self, x):\n x = self.blocks(x)\n return x\n\n\nclass SkipAttUnit(Chain):\n \"\"\"\n FishNet skip connection unit with attention block.\n\n Parameters:\n ----------\n in_channels : int\n Number of input channels.\n out_channels_list : list of int\n Number of output channels for each block.\n \"\"\"\n def __init__(self,\n in_channels,\n out_channels_list):\n super(SkipAttUnit, self).__init__()\n mid_channels1 = in_channels // 2\n mid_channels2 = 2 * in_channels\n\n with self.init_scope():\n self.conv1 = pre_conv1x1_block(\n in_channels=in_channels,\n out_channels=mid_channels1)\n self.conv2 = pre_conv1x1_block(\n in_channels=mid_channels1,\n out_channels=mid_channels2,\n use_bias=True)\n in_channels = mid_channels2\n\n self.se = PreSEAttBlock(\n in_channels=mid_channels2,\n out_channels=out_channels_list[-1])\n\n self.blocks = SimpleSequential()\n with self.blocks.init_scope():\n for i, out_channels in enumerate(out_channels_list):\n setattr(self.blocks, \"block{}\".format(i + 1), FishBlock(\n in_channels=in_channels,\n out_channels=out_channels))\n in_channels = out_channels\n\n def __call__(self, x):\n x = self.conv1(x)\n x = self.conv2(x)\n w = self.se(x)\n x = self.blocks(x)\n x = x * w + w\n return x\n\n\nclass FishFinalBlock(Chain):\n \"\"\"\n FishNet final block.\n\n Parameters:\n ----------\n in_channels : int\n Number of input channels.\n \"\"\"\n def __init__(self,\n in_channels):\n super(FishFinalBlock, self).__init__()\n mid_channels = in_channels // 2\n\n with self.init_scope():\n self.conv1 = pre_conv1x1_block(\n in_channels=in_channels,\n out_channels=mid_channels)\n self.preactiv = PreResActivation(\n in_channels=mid_channels)\n\n def __call__(self, x):\n x = self.conv1(x)\n x = self.preactiv(x)\n return x\n\n\nclass FishNet(Chain):\n \"\"\"\n FishNet model from 'FishNet: A Versatile Backbone for Image, Region, and Pixel Level Prediction,'\n http://papers.nips.cc/paper/7356-fishnet-a-versatile-backbone-for-image-region-and-pixel-level-prediction.pdf.\n\n Parameters:\n ----------\n direct_channels : list of list of list of int\n Number of output channels for each unit along the straight path.\n skip_channels : list of list of list of int\n Number of output channels for each skip connection unit.\n init_block_channels : int\n Number of output channels for the initial unit.\n in_channels : int, default 3\n Number of input channels.\n in_size : tuple of two ints, default (224, 224)\n Spatial size of the expected input image.\n classes : int, default 1000\n Number of classification classes.\n \"\"\"\n def __init__(self,\n direct_channels,\n skip_channels,\n init_block_channels,\n in_channels=3,\n in_size=(224, 224),\n classes=1000):\n super(FishNet, self).__init__()\n self.in_size = in_size\n self.classes = classes\n\n depth = len(direct_channels[0])\n down1_channels = direct_channels[0]\n up_channels = direct_channels[1]\n down2_channels = direct_channels[2]\n skip1_channels = skip_channels[0]\n skip2_channels = skip_channels[1]\n\n with self.init_scope():\n self.features = SimpleSequential()\n with self.features.init_scope():\n setattr(self.features, \"init_block\", SEInitBlock(\n in_channels=in_channels,\n out_channels=init_block_channels))\n in_channels = init_block_channels\n\n down1_seq = SimpleSequential()\n skip1_seq = SimpleSequential()\n for i in range(depth + 1):\n skip1_channels_list = skip1_channels[i]\n if i < depth:\n with skip1_seq.init_scope():\n setattr(skip1_seq, \"unit{}\".format(i + 1), SkipUnit(\n in_channels=in_channels,\n out_channels_list=skip1_channels_list))\n down1_channels_list = down1_channels[i]\n with down1_seq.init_scope():\n setattr(down1_seq, \"unit{}\".format(i + 1), DownUnit(\n in_channels=in_channels,\n out_channels_list=down1_channels_list))\n in_channels = down1_channels_list[-1]\n else:\n with skip1_seq.init_scope():\n setattr(skip1_seq, \"unit{}\".format(i + 1), SkipAttUnit(\n in_channels=in_channels,\n out_channels_list=skip1_channels_list))\n in_channels = skip1_channels_list[-1]\n\n up_seq = SimpleSequential()\n skip2_seq = SimpleSequential()\n for i in range(depth + 1):\n skip2_channels_list = skip2_channels[i]\n if i > 0:\n in_channels += skip1_channels[depth - i][-1]\n if i < depth:\n with skip2_seq.init_scope():\n setattr(skip2_seq, \"unit{}\".format(i + 1), SkipUnit(\n in_channels=in_channels,\n out_channels_list=skip2_channels_list))\n up_channels_list = up_channels[i]\n dilate = 2 ** i\n with up_seq.init_scope():\n setattr(up_seq, \"unit{}\".format(i + 1), UpUnit(\n in_channels=in_channels,\n out_channels_list=up_channels_list,\n dilate=dilate))\n in_channels = up_channels_list[-1]\n else:\n with skip2_seq.init_scope():\n setattr(skip2_seq, \"unit{}\".format(i + 1), F.identity)\n\n down2_seq = SimpleSequential()\n with down2_seq.init_scope():\n for i in range(depth):\n down2_channels_list = down2_channels[i]\n setattr(down2_seq, \"unit{}\".format(i + 1), DownUnit(\n in_channels=in_channels,\n out_channels_list=down2_channels_list))\n in_channels = down2_channels_list[-1] + skip2_channels[depth - 1 - i][-1]\n\n setattr(self.features, \"hg\", SesquialteralHourglass(\n down1_seq=down1_seq,\n skip1_seq=skip1_seq,\n up_seq=up_seq,\n skip2_seq=skip2_seq,\n down2_seq=down2_seq))\n setattr(self.features, \"final_block\", FishFinalBlock(in_channels=in_channels))\n in_channels = in_channels // 2\n setattr(self.features, \"final_pool\", partial(\n F.average_pooling_2d,\n ksize=7,\n stride=1))\n\n self.output = SimpleSequential()\n with self.output.init_scope():\n setattr(self.output, 'final_conv', conv1x1(\n in_channels=in_channels,\n out_channels=classes,\n use_bias=True))\n setattr(self.output, 'final_flatten', partial(\n F.reshape,\n shape=(-1, classes)))\n\n def __call__(self, x):\n x = self.features(x)\n x = self.output(x)\n return x\n\n\ndef get_fishnet(blocks,\n model_name=None,\n pretrained=False,\n root=os.path.join('~', '.chainer', 'models'),\n **kwargs):\n \"\"\"\n Create FishNet model with specific parameters.\n\n Parameters:\n ----------\n blocks : int\n Number of blocks.\n model_name : str or None, default None\n Model name for loading pretrained model.\n pretrained : bool, default False\n Whether to load the pretrained weights for model.\n root : str, default '~/.mxnet/models'\n Location for keeping the model parameters.\n \"\"\"\n\n if blocks == 99:\n direct_layers = [[2, 2, 6], [1, 1, 1], [1, 2, 2]]\n skip_layers = [[1, 1, 1, 2], [4, 1, 1, 0]]\n elif blocks == 150:\n direct_layers = [[2, 4, 8], [2, 2, 2], [2, 2, 4]]\n skip_layers = [[2, 2, 2, 4], [4, 2, 2, 0]]\n else:\n raise ValueError(\"Unsupported FishNet with number of blocks: {}\".format(blocks))\n\n direct_channels_per_layers = [[128, 256, 512], [512, 384, 256], [320, 832, 1600]]\n skip_channels_per_layers = [[64, 128, 256, 512], [512, 768, 512, 0]]\n\n direct_channels = [[[b] * c for (b, c) in zip(*a)] for a in\n ([(ci, li) for (ci, li) in zip(direct_channels_per_layers, direct_layers)])]\n skip_channels = [[[b] * c for (b, c) in zip(*a)] for a in\n ([(ci, li) for (ci, li) in zip(skip_channels_per_layers, skip_layers)])]\n\n init_block_channels = 64\n\n net = FishNet(\n direct_channels=direct_channels,\n skip_channels=skip_channels,\n init_block_channels=init_block_channels,\n **kwargs)\n\n if pretrained:\n if (model_name is None) or (not model_name):\n raise ValueError(\"Parameter `model_name` should be properly initialized for loading pretrained model.\")\n from .model_store import get_model_file\n load_npz(\n file=get_model_file(\n model_name=model_name,\n local_model_store_dir_path=root),\n obj=net)\n\n return net\n\n\ndef fishnet99(**kwargs):\n \"\"\"\n FishNet-99 model from 'FishNet: A Versatile Backbone for Image, Region, and Pixel Level Prediction,'\n http://papers.nips.cc/paper/7356-fishnet-a-versatile-backbone-for-image-region-and-pixel-level-prediction.pdf.\n\n Parameters:\n ----------\n pretrained : bool, default False\n Whether to load the pretrained weights for model.\n root : str, default '~/.chainer/models'\n Location for keeping the model parameters.\n \"\"\"\n return get_fishnet(blocks=99, model_name=\"fishnet99\", **kwargs)\n\n\ndef fishnet150(**kwargs):\n \"\"\"\n FishNet-150 model from 'FishNet: A Versatile Backbone for Image, Region, and Pixel Level Prediction,'\n http://papers.nips.cc/paper/7356-fishnet-a-versatile-backbone-for-image-region-and-pixel-level-prediction.pdf.\n\n Parameters:\n ----------\n pretrained : bool, default False\n Whether to load the pretrained weights for model.\n root : str, default '~/.chainer/models'\n Location for keeping the model parameters.\n \"\"\"\n return get_fishnet(blocks=150, model_name=\"fishnet150\", **kwargs)\n\n\ndef _test():\n import numpy as np\n import chainer\n\n chainer.global_config.train = False\n\n pretrained = False\n\n models = [\n fishnet99,\n fishnet150,\n ]\n\n for model in models:\n\n net = model(pretrained=pretrained)\n weight_count = net.count_params()\n print(\"m={}, {}\".format(model.__name__, weight_count))\n assert (model != fishnet99 or weight_count == 16628904)\n assert (model != fishnet150 or weight_count == 24959400)\n\n x = np.zeros((1, 3, 224, 224), np.float32)\n y = net(x)\n assert (y.shape == (1, 1000))\n\n\nif __name__ == \"__main__\":\n _test()\n",
"\"\"\"\n X-DenseNet, implemented in Gluon.\n Original paper: 'Deep Expander Networks: Efficient Deep Networks from Graph Theory,'\n https://arxiv.org/abs/1711.08757.\n\"\"\"\n\n__all__ = ['XDenseNet', 'xdensenet121_2', 'xdensenet161_2', 'xdensenet169_2', 'xdensenet201_2', 'pre_xconv3x3_block',\n 'XDenseUnit']\n\nimport os\nimport mxnet as mx\nfrom mxnet import cpu\nfrom mxnet.gluon import nn, HybridBlock\nfrom .preresnet import PreResInitBlock, PreResActivation\nfrom .densenet import TransitionBlock\n\n\[email protected]\nclass XMaskInit(mx.init.Initializer):\n \"\"\"\n Returns an initializer performing \"X-Net\" initialization for masks.\n\n Parameters:\n ----------\n expand_ratio : int\n Ratio of expansion.\n \"\"\"\n def __init__(self,\n expand_ratio,\n **kwargs):\n super(XMaskInit, self).__init__(**kwargs)\n assert (expand_ratio > 0)\n self.expand_ratio = expand_ratio\n\n def _init_weight(self, _, arr):\n shape = arr.shape\n expand_size = max(shape[1] // self.expand_ratio, 1)\n shape1_arange = mx.nd.arange(shape[1], ctx=arr.context)\n arr[:] = 0\n for i in range(shape[0]):\n jj = mx.nd.random.shuffle(shape1_arange)[:expand_size]\n arr[i, jj, :, :] = 1\n\n\nclass XConv2D(nn.Conv2D):\n \"\"\"\n X-Convolution layer.\n\n Parameters:\n ----------\n in_channels : int\n Number of input channels.\n out_channels : int\n Number of output channels.\n kernel_size : int or tuple/list of 2 int\n Convolution window size.\n groups : int, default 1\n Number of groups.\n expand_ratio : int, default 2\n Ratio of expansion.\n \"\"\"\n def __init__(self,\n in_channels,\n out_channels,\n kernel_size,\n groups=1,\n expand_ratio=2,\n **kwargs):\n super(XConv2D, self).__init__(\n in_channels=in_channels,\n channels=out_channels,\n kernel_size=kernel_size,\n groups=groups,\n **kwargs)\n if isinstance(kernel_size, int):\n kernel_size = (kernel_size, kernel_size)\n grouped_in_channels = in_channels // groups\n self.mask = self.params.get(\n name=\"mask\",\n grad_req=\"null\",\n shape=(out_channels, grouped_in_channels, kernel_size[0], kernel_size[1]),\n init=XMaskInit(expand_ratio=expand_ratio),\n differentiable=False)\n\n def hybrid_forward(self, F, x, weight, bias=None, mask=None):\n masked_weight = weight * mask\n return super(XConv2D, self).hybrid_forward(F, x, weight=masked_weight, bias=bias)\n\n\nclass PreXConvBlock(HybridBlock):\n \"\"\"\n X-Convolution block with Batch normalization and ReLU pre-activation.\n\n Parameters:\n ----------\n in_channels : int\n Number of input channels.\n out_channels : int\n Number of output channels.\n kernel_size : int or tuple/list of 2 int\n Convolution window size.\n strides : int or tuple/list of 2 int\n Strides of the convolution.\n padding : int or tuple/list of 2 int\n Padding value for convolution layer.\n dilation : int or tuple/list of 2 int, default 1\n Dilation value for convolution layer.\n groups : int, default 1\n Number of groups.\n use_bias : bool, default False\n Whether the layer uses a bias vector.\n bn_use_global_stats : bool, default False\n Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.\n return_preact : bool, default False\n Whether return pre-activation. It's used by PreResNet.\n activate : bool, default True\n Whether activate the convolution block.\n expand_ratio : int, default 2\n Ratio of expansion.\n \"\"\"\n def __init__(self,\n in_channels,\n out_channels,\n kernel_size,\n strides,\n padding,\n dilation=1,\n groups=1,\n use_bias=False,\n bn_use_global_stats=False,\n return_preact=False,\n activate=True,\n expand_ratio=2,\n **kwargs):\n super(PreXConvBlock, self).__init__(**kwargs)\n self.return_preact = return_preact\n self.activate = activate\n\n with self.name_scope():\n self.bn = nn.BatchNorm(\n in_channels=in_channels,\n use_global_stats=bn_use_global_stats)\n if self.activate:\n self.activ = nn.Activation(\"relu\")\n self.conv = XConv2D(\n in_channels=in_channels,\n out_channels=out_channels,\n kernel_size=kernel_size,\n strides=strides,\n padding=padding,\n dilation=dilation,\n groups=groups,\n use_bias=use_bias,\n expand_ratio=expand_ratio)\n\n def hybrid_forward(self, F, x):\n x = self.bn(x)\n if self.activate:\n x = self.activ(x)\n if self.return_preact:\n x_pre_activ = x\n x = self.conv(x)\n if self.return_preact:\n return x, x_pre_activ\n else:\n return x\n\n\ndef pre_xconv1x1_block(in_channels,\n out_channels,\n strides=1,\n use_bias=False,\n bn_use_global_stats=False,\n return_preact=False,\n activate=True,\n expand_ratio=2):\n \"\"\"\n 1x1 version of the pre-activated x-convolution block.\n\n Parameters:\n ----------\n in_channels : int\n Number of input channels.\n out_channels : int\n Number of output channels.\n strides : int or tuple/list of 2 int, default 1\n Strides of the convolution.\n use_bias : bool, default False\n Whether the layer uses a bias vector.\n bn_use_global_stats : bool, default False\n Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.\n return_preact : bool, default False\n Whether return pre-activation.\n activate : bool, default True\n Whether activate the convolution block.\n expand_ratio : int, default 2\n Ratio of expansion.\n \"\"\"\n return PreXConvBlock(\n in_channels=in_channels,\n out_channels=out_channels,\n kernel_size=1,\n strides=strides,\n padding=0,\n use_bias=use_bias,\n bn_use_global_stats=bn_use_global_stats,\n return_preact=return_preact,\n activate=activate,\n expand_ratio=expand_ratio)\n\n\ndef pre_xconv3x3_block(in_channels,\n out_channels,\n strides=1,\n padding=1,\n dilation=1,\n groups=1,\n bn_use_global_stats=False,\n return_preact=False,\n activate=True,\n expand_ratio=2):\n \"\"\"\n 3x3 version of the pre-activated x-convolution block.\n\n Parameters:\n ----------\n in_channels : int\n Number of input channels.\n out_channels : int\n Number of output channels.\n strides : int or tuple/list of 2 int, default 1\n Strides of the convolution.\n padding : int or tuple/list of 2 int, default 1\n Padding value for convolution layer.\n dilation : int or tuple/list of 2 int, default 1\n Dilation value for convolution layer.\n groups : int, default 1\n Number of groups.\n bn_use_global_stats : bool, default False\n Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.\n return_preact : bool, default False\n Whether return pre-activation.\n activate : bool, default True\n Whether activate the convolution block.\n expand_ratio : int, default 2\n Ratio of expansion.\n \"\"\"\n return PreXConvBlock(\n in_channels=in_channels,\n out_channels=out_channels,\n kernel_size=3,\n strides=strides,\n padding=padding,\n dilation=dilation,\n groups=groups,\n bn_use_global_stats=bn_use_global_stats,\n return_preact=return_preact,\n activate=activate,\n expand_ratio=expand_ratio)\n\n\nclass XDenseUnit(HybridBlock):\n \"\"\"\n X-DenseNet unit.\n\n Parameters:\n ----------\n in_channels : int\n Number of input channels.\n out_channels : int\n Number of output channels.\n bn_use_global_stats : bool\n Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.\n dropout_rate : bool\n Parameter of Dropout layer. Faction of the input units to drop.\n expand_ratio : int\n Ratio of expansion.\n \"\"\"\n def __init__(self,\n in_channels,\n out_channels,\n bn_use_global_stats,\n dropout_rate,\n expand_ratio,\n **kwargs):\n super(XDenseUnit, self).__init__(**kwargs)\n self.use_dropout = (dropout_rate != 0.0)\n bn_size = 4\n inc_channels = out_channels - in_channels\n mid_channels = inc_channels * bn_size\n\n with self.name_scope():\n self.conv1 = pre_xconv1x1_block(\n in_channels=in_channels,\n out_channels=mid_channels,\n bn_use_global_stats=bn_use_global_stats,\n expand_ratio=expand_ratio)\n self.conv2 = pre_xconv3x3_block(\n in_channels=mid_channels,\n out_channels=inc_channels,\n bn_use_global_stats=bn_use_global_stats,\n expand_ratio=expand_ratio)\n if self.use_dropout:\n self.dropout = nn.Dropout(rate=dropout_rate)\n\n def hybrid_forward(self, F, x):\n identity = x\n x = self.conv1(x)\n x = self.conv2(x)\n if self.use_dropout:\n x = self.dropout(x)\n x = F.concat(identity, x, dim=1)\n return x\n\n\nclass XDenseNet(HybridBlock):\n \"\"\"\n X-DenseNet model from 'Deep Expander Networks: Efficient Deep Networks from Graph Theory,'\n https://arxiv.org/abs/1711.08757.\n\n Parameters:\n ----------\n channels : list of list of int\n Number of output channels for each unit.\n init_block_channels : int\n Number of output channels for the initial unit.\n bn_use_global_stats : bool, default False\n Whether global moving statistics is used instead of local batch-norm for BatchNorm layers.\n Useful for fine-tuning.\n dropout_rate : float, default 0.0\n Parameter of Dropout layer. Faction of the input units to drop.\n expand_ratio : int, default 2\n Ratio of expansion.\n in_channels : int, default 3\n Number of input channels.\n in_size : tuple of two ints, default (224, 224)\n Spatial size of the expected input image.\n classes : int, default 1000\n Number of classification classes.\n \"\"\"\n def __init__(self,\n channels,\n init_block_channels,\n bn_use_global_stats=False,\n dropout_rate=0.0,\n expand_ratio=2,\n in_channels=3,\n in_size=(224, 224),\n classes=1000,\n **kwargs):\n super(XDenseNet, self).__init__(**kwargs)\n self.in_size = in_size\n self.classes = classes\n\n with self.name_scope():\n self.features = nn.HybridSequential(prefix='')\n self.features.add(PreResInitBlock(\n in_channels=in_channels,\n out_channels=init_block_channels,\n bn_use_global_stats=bn_use_global_stats))\n in_channels = init_block_channels\n for i, channels_per_stage in enumerate(channels):\n stage = nn.HybridSequential(prefix=\"stage{}_\".format(i + 1))\n with stage.name_scope():\n if i != 0:\n stage.add(TransitionBlock(\n in_channels=in_channels,\n out_channels=(in_channels // 2),\n bn_use_global_stats=bn_use_global_stats))\n in_channels = in_channels // 2\n for j, out_channels in enumerate(channels_per_stage):\n stage.add(XDenseUnit(\n in_channels=in_channels,\n out_channels=out_channels,\n bn_use_global_stats=bn_use_global_stats,\n dropout_rate=dropout_rate,\n expand_ratio=expand_ratio))\n in_channels = out_channels\n self.features.add(stage)\n self.features.add(PreResActivation(\n in_channels=in_channels,\n bn_use_global_stats=bn_use_global_stats))\n self.features.add(nn.AvgPool2D(\n pool_size=7,\n strides=1))\n\n self.output = nn.HybridSequential(prefix='')\n self.output.add(nn.Flatten())\n self.output.add(nn.Dense(\n units=classes,\n in_units=in_channels))\n\n def hybrid_forward(self, F, x):\n x = self.features(x)\n x = self.output(x)\n return x\n\n\ndef get_xdensenet(blocks,\n expand_ratio=2,\n model_name=None,\n pretrained=False,\n ctx=cpu(),\n root=os.path.join('~', '.mxnet', 'models'),\n **kwargs):\n \"\"\"\n Create X-DenseNet model with specific parameters.\n\n Parameters:\n ----------\n blocks : int\n Number of blocks.\n expand_ratio : int, default 2\n Ratio of expansion.\n model_name : str or None, default None\n Model name for loading pretrained model.\n pretrained : bool, default False\n Whether to load the pretrained weights for model.\n ctx : Context, default CPU\n The context in which to load the pretrained weights.\n root : str, default '~/.mxnet/models'\n Location for keeping the model parameters.\n \"\"\"\n\n if blocks == 121:\n init_block_channels = 64\n growth_rate = 32\n layers = [6, 12, 24, 16]\n elif blocks == 161:\n init_block_channels = 96\n growth_rate = 48\n layers = [6, 12, 36, 24]\n elif blocks == 169:\n init_block_channels = 64\n growth_rate = 32\n layers = [6, 12, 32, 32]\n elif blocks == 201:\n init_block_channels = 64\n growth_rate = 32\n layers = [6, 12, 48, 32]\n else:\n raise ValueError(\"Unsupported X-DenseNet version with number of layers {}\".format(blocks))\n\n from functools import reduce\n channels = reduce(\n lambda xi, yi: xi + [reduce(\n lambda xj, yj: xj + [xj[-1] + yj],\n [growth_rate] * yi,\n [xi[-1][-1] // 2])[1:]],\n layers,\n [[init_block_channels * 2]])[1:]\n\n net = XDenseNet(\n channels=channels,\n init_block_channels=init_block_channels,\n expand_ratio=expand_ratio,\n **kwargs)\n\n if pretrained:\n if (model_name is None) or (not model_name):\n raise ValueError(\"Parameter `model_name` should be properly initialized for loading pretrained model.\")\n from .model_store import get_model_file\n net.load_parameters(\n filename=get_model_file(\n model_name=model_name,\n local_model_store_dir_path=root),\n ctx=ctx)\n\n return net\n\n\ndef xdensenet121_2(**kwargs):\n \"\"\"\n X-DenseNet-121-2 model from 'Deep Expander Networks: Efficient Deep Networks from Graph Theory,'\n https://arxiv.org/abs/1711.08757.\n\n Parameters:\n ----------\n pretrained : bool, default False\n Whether to load the pretrained weights for model.\n ctx : Context, default CPU\n The context in which to load the pretrained weights.\n root : str, default '~/.mxnet/models'\n Location for keeping the model parameters.\n \"\"\"\n return get_xdensenet(blocks=121, model_name=\"xdensenet121_2\", **kwargs)\n\n\ndef xdensenet161_2(**kwargs):\n \"\"\"\n X-DenseNet-161-2 model from 'Deep Expander Networks: Efficient Deep Networks from Graph Theory,'\n https://arxiv.org/abs/1711.08757.\n\n Parameters:\n ----------\n pretrained : bool, default False\n Whether to load the pretrained weights for model.\n ctx : Context, default CPU\n The context in which to load the pretrained weights.\n root : str, default '~/.mxnet/models'\n Location for keeping the model parameters.\n \"\"\"\n return get_xdensenet(blocks=161, model_name=\"xdensenet161_2\", **kwargs)\n\n\ndef xdensenet169_2(**kwargs):\n \"\"\"\n X-DenseNet-169-2 model from 'Deep Expander Networks: Efficient Deep Networks from Graph Theory,'\n https://arxiv.org/abs/1711.08757.\n\n Parameters:\n ----------\n pretrained : bool, default False\n Whether to load the pretrained weights for model.\n ctx : Context, default CPU\n The context in which to load the pretrained weights.\n root : str, default '~/.mxnet/models'\n Location for keeping the model parameters.\n \"\"\"\n return get_xdensenet(blocks=169, model_name=\"xdensenet169_2\", **kwargs)\n\n\ndef xdensenet201_2(**kwargs):\n \"\"\"\n X-DenseNet-201-2 model from 'Deep Expander Networks: Efficient Deep Networks from Graph Theory,'\n https://arxiv.org/abs/1711.08757.\n\n Parameters:\n ----------\n pretrained : bool, default False\n Whether to load the pretrained weights for model.\n ctx : Context, default CPU\n The context in which to load the pretrained weights.\n root : str, default '~/.mxnet/models'\n Location for keeping the model parameters.\n \"\"\"\n return get_xdensenet(blocks=201, model_name=\"xdensenet201_2\", **kwargs)\n\n\ndef _test():\n import numpy as np\n import mxnet as mx\n\n pretrained = False\n\n models = [\n xdensenet121_2,\n xdensenet161_2,\n xdensenet169_2,\n xdensenet201_2,\n ]\n\n for model in models:\n\n net = model(pretrained=pretrained)\n\n ctx = mx.cpu()\n if not pretrained:\n net.initialize(ctx=ctx)\n\n # net.hybridize()\n net_params = net.collect_params()\n weight_count = 0\n for param in net_params.values():\n if (param.shape is None) or (not param._differentiable):\n continue\n weight_count += np.prod(param.shape)\n print(\"m={}, {}\".format(model.__name__, weight_count))\n assert (model != xdensenet121_2 or weight_count == 7978856)\n assert (model != xdensenet161_2 or weight_count == 28681000)\n assert (model != xdensenet169_2 or weight_count == 14149480)\n assert (model != xdensenet201_2 or weight_count == 20013928)\n\n x = mx.nd.zeros((1, 3, 224, 224), ctx=ctx)\n y = net(x)\n assert (y.shape == (1, 1000))\n\n\nif __name__ == \"__main__\":\n _test()\n"
] |
[
[
"tensorflow.nn.relu",
"tensorflow.layers.dense",
"tensorflow.global_variables_initializer",
"tensorflow.reset_default_graph",
"tensorflow.Session",
"tensorflow.layers.average_pooling2d",
"tensorflow.trainable_variables"
],
[
"torch.nn.Sequential",
"torch.nn.init.constant_",
"torch.randn",
"torch.nn.Conv2d",
"torch.nn.init.kaiming_uniform_",
"torch.nn.MaxPool2d",
"torch.nn.Linear",
"torch.nn.AvgPool2d",
"torch.nn.InstanceNorm2d",
"torch.nn.ReLU"
],
[
"torch.nn.Sequential",
"torch.nn.Dropout",
"torch.cat",
"torch.nn.init.constant_",
"torch.randn",
"torch.nn.init.kaiming_uniform_",
"torch.nn.MaxPool2d",
"torch.nn.AvgPool2d",
"torch.nn.Linear"
],
[
"numpy.prod"
],
[
"numpy.prod"
],
[
"numpy.random.randint"
],
[
"torch.nn.Sequential",
"torch.nn.ReLU6",
"torch.nn.init.constant_",
"torch.randn",
"torch.nn.Conv2d",
"torch.nn.init.kaiming_uniform_",
"torch.nn.Linear",
"torch.nn.AvgPool2d",
"torch.nn.BatchNorm2d",
"torch.nn.ReLU"
],
[
"numpy.zeros"
],
[
"numpy.prod"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
JTZ18/auto-tinder
|
[
"c1d0795a402fa6c10d625d3b283597eb6b25c06b"
] |
[
"person_detector.py"
] |
[
"import numpy as np\nimport tensorflow as tf\nfrom object_detection.utils import ops as utils_ops\nfrom PIL import Image\n\n\n\nPERSON_CLASS = 1\nSCORE_THRESHOLD = 0.5\n\ndef run_inference_for_single_image(image, sess):\n ops = tf.compat.v1.get_default_graph().get_operations()\n all_tensor_names = {output.name for op in ops for output in op.outputs}\n tensor_dict = {}\n for key in [\n 'num_detections', 'detection_boxes', 'detection_scores',\n 'detection_classes', 'detection_masks'\n ]:\n tensor_name = key + ':0'\n if tensor_name in all_tensor_names:\n tensor_dict[key] = tf.compat.v1.get_default_graph().get_tensor_by_name(\n tensor_name)\n if 'detection_masks' in tensor_dict:\n # The following processing is only for single image\n detection_boxes = tf.squeeze(tensor_dict['detection_boxes'], [0])\n detection_masks = tf.squeeze(tensor_dict['detection_masks'], [0])\n # Reframe is required to translate mask from box coordinates to image coordinates and fit the image size.\n real_num_detection = tf.cast(tensor_dict['num_detections'][0], tf.int32)\n detection_boxes = tf.slice(detection_boxes, [0, 0], [real_num_detection, -1])\n detection_masks = tf.slice(detection_masks, [0, 0, 0], [real_num_detection, -1, -1])\n detection_masks_reframed = utils_ops.reframe_box_masks_to_image_masks(\n detection_masks, detection_boxes, image.shape[1], image.shape[2])\n detection_masks_reframed = tf.cast(\n tf.greater(detection_masks_reframed, 0.5), tf.uint8)\n # Follow the convention by adding back the batch dimension\n tensor_dict['detection_masks'] = tf.expand_dims(\n detection_masks_reframed, 0)\n image_tensor = tf.compat.v1.get_default_graph().get_tensor_by_name('image_tensor:0')\n\n # Run inference\n output_dict = sess.run(tensor_dict,\n feed_dict={image_tensor: image})\n\n # all outputs are float32 numpy arrays, so convert types as appropriate\n output_dict['num_detections'] = int(output_dict['num_detections'][0])\n output_dict['detection_classes'] = output_dict[\n 'detection_classes'][0].astype(np.int64)\n output_dict['detection_boxes'] = output_dict['detection_boxes'][0]\n output_dict['detection_scores'] = output_dict['detection_scores'][0]\n if 'detection_masks' in output_dict:\n output_dict['detection_masks'] = output_dict['detection_masks'][0]\n return output_dict\n\n\ndef open_graph():\n detection_graph = tf.Graph()\n with detection_graph.as_default():\n od_graph_def = tf.compat.v1.GraphDef()\n with tf.compat.v2.io.gfile.GFile('ssd_mobilenet_v1_coco_2017_11_17/frozen_inference_graph.pb', 'rb') as fid:\n serialized_graph = fid.read()\n od_graph_def.ParseFromString(serialized_graph)\n tf.import_graph_def(od_graph_def, name='')\n return detection_graph\n\n\ndef load_image_into_numpy_array(image):\n (im_width, im_height) = image.size\n return np.array(image.getdata()).reshape(\n (im_height, im_width, 3)).astype(np.uint8)\n\n\ndef get_person(image_path, sess):\n img = Image.open(image_path)\n image_np = load_image_into_numpy_array(img)\n image_np_expanded = np.expand_dims(image_np, axis=0)\n output_dict = run_inference_for_single_image(image_np_expanded, sess)\n\n persons_coordinates = []\n for i in range(len(output_dict[\"detection_boxes\"])):\n score = output_dict[\"detection_scores\"][i]\n classtype = output_dict[\"detection_classes\"][i]\n if score > SCORE_THRESHOLD and classtype == PERSON_CLASS:\n persons_coordinates.append(output_dict[\"detection_boxes\"][i])\n\n w, h = img.size\n for person_coordinate in persons_coordinates:\n cropped_img = img.crop((\n int(w * person_coordinate[1]),\n int(h * person_coordinate[0]),\n int(w * person_coordinate[3]),\n int(h * person_coordinate[2]),\n ))\n return cropped_img\n return None"
] |
[
[
"tensorflow.Graph",
"numpy.expand_dims",
"tensorflow.compat.v1.get_default_graph",
"tensorflow.import_graph_def",
"tensorflow.greater",
"tensorflow.slice",
"tensorflow.compat.v2.io.gfile.GFile",
"tensorflow.cast",
"tensorflow.squeeze",
"tensorflow.expand_dims",
"tensorflow.compat.v1.GraphDef"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
kknet/imalookalike
|
[
"ca0917bbc24b74d560ebe5b83dd882a6683abddf"
] |
[
"benchmark/net/facenet_predictor.py"
] |
[
"import tensorflow as tf\nimport benchmark.net.facenet.facenet as facenet\nfrom benchmark.net.facenet import detect_face\nimport cv2\nimport numpy as np\n\n\n# some constants kept as default from facenet\nminsize = 20\nthreshold = [0.6, 0.7, 0.7]\nfactor = 0.709\nmargin = 44\ninput_image_size = 160\n\nsess = None\nimages_placeholder = None\nembeddings = None\nphase_train_placeholder = None\nembedding_size = None\npnet = None\nrnet = None\nonet = None\n\n\ndef init_model(model_path):\n global sess\n global images_placeholder\n global embeddings\n global phase_train_placeholder\n global embedding_size\n global pnet\n global rnet\n global onet\n\n facenet.load_model(model_path)\n\n sess = tf.Session()\n\n images_placeholder = tf.get_default_graph().get_tensor_by_name(\"input:0\")\n embeddings = tf.get_default_graph().get_tensor_by_name(\"embeddings:0\")\n phase_train_placeholder = tf.get_default_graph().get_tensor_by_name(\"phase_train:0\")\n embedding_size = embeddings.get_shape()[1]\n pnet, rnet, onet = detect_face.create_mtcnn(sess, 'facenet\\\\align')\n\n\ndef get_cropped_image(img):\n img_size = np.asarray(img.shape)[0:2]\n bounding_boxes, points = detect_face.detect_face(img, minsize, pnet, rnet, onet, threshold, factor)\n if not len(bounding_boxes) == 0:\n for face in bounding_boxes:\n if face[4] > 0.50:\n det = np.squeeze(face[0:4])\n bb = np.zeros(4, dtype=np.int32)\n bb[0] = np.maximum(det[0] - margin / 2, 0)\n bb[1] = np.maximum(det[1] - margin / 2, 0)\n bb[2] = np.minimum(det[2] + margin / 2, img_size[1])\n bb[3] = np.minimum(det[3] + margin / 2, img_size[0])\n cropped = img[bb[1]:bb[3], bb[0]:bb[2], :]\n return cropped\n\n\ndef predict_embeddings(imgs):\n prewhiteneds = []\n for img in imgs:\n cropped = get_cropped_image(img)\n if cropped is None:\n print(\"not detected\")\n cropped = img\n resized = cv2.resize(cropped, (input_image_size, input_image_size), interpolation=cv2.INTER_CUBIC)\n prewhitened = facenet.prewhiten(resized)\n prewhiteneds.append(prewhitened)\n reshaped = np.array(prewhiteneds).reshape((-1, input_image_size, input_image_size, 3))\n feed_dict = {images_placeholder: reshaped, phase_train_placeholder: False}\n embedding = sess.run(embeddings, feed_dict=feed_dict)\n return embedding\n\n\n"
] |
[
[
"numpy.maximum",
"numpy.minimum",
"numpy.asarray",
"numpy.squeeze",
"tensorflow.Session",
"tensorflow.get_default_graph",
"numpy.array",
"numpy.zeros"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
}
] |
hanranCode/mega.pytorch
|
[
"28c8a184372aa57a942576a944b3526590bc1ace",
"28c8a184372aa57a942576a944b3526590bc1ace"
] |
[
"mega_core/modeling/roi_heads/box_head/inference.py",
"tests/test_box_coder.py"
] |
[
"# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\nimport torch\nimport torch.nn.functional as F\nfrom torch import nn\n\nfrom mega_core.structures.bounding_box import BoxList\nfrom mega_core.structures.boxlist_ops import boxlist_nms\nfrom mega_core.structures.boxlist_ops import cat_boxlist\nfrom mega_core.modeling.box_coder import BoxCoder\n\n\nclass PostProcessor(nn.Module):\n \"\"\"\n From a set of classification scores, box regression and proposals,\n computes the post-processed boxes, and applies NMS to obtain the\n final results\n \"\"\"\n\n def __init__(\n self,\n score_thresh=0.05,\n nms=0.5,\n detections_per_img=100,\n box_coder=None,\n cls_agnostic_bbox_reg=False,\n bbox_aug_enabled=False\n ):\n \"\"\"\n Arguments:\n score_thresh (float)\n nms (float)\n detections_per_img (int)\n box_coder (BoxCoder)\n \"\"\"\n super(PostProcessor, self).__init__()\n self.score_thresh = score_thresh\n self.nms = nms\n self.detections_per_img = detections_per_img\n if box_coder is None:\n box_coder = BoxCoder(weights=(10., 10., 5., 5.))\n self.box_coder = box_coder\n self.cls_agnostic_bbox_reg = cls_agnostic_bbox_reg\n self.bbox_aug_enabled = bbox_aug_enabled\n\n def forward(self, x, boxes):\n \"\"\"\n Arguments:\n x (tuple[tensor, tensor]): x contains the class logits\n and the box_regression from the model.\n boxes (list[BoxList]): bounding boxes that are used as\n reference, one for each image\n\n Returns:\n results (list[BoxList]): one BoxList for each image, containing\n the extra fields labels and scores\n \"\"\"\n class_logits, box_regression = x\n class_prob = F.softmax(class_logits, -1)\n\n # TODO think about a representation of batch of boxes\n image_shapes = [box.size for box in boxes]\n boxes_per_image = [len(box) for box in boxes]\n concat_boxes = torch.cat([a.bbox for a in boxes], dim=0)\n\n if self.cls_agnostic_bbox_reg:\n box_regression = box_regression[:, -4:]\n proposals = self.box_coder.decode(\n box_regression.view(sum(boxes_per_image), -1), concat_boxes\n )\n if self.cls_agnostic_bbox_reg:\n proposals = proposals.repeat(1, class_prob.shape[1])\n\n num_classes = class_prob.shape[1]\n\n proposals = proposals.split(boxes_per_image, dim=0)\n class_prob = class_prob.split(boxes_per_image, dim=0)\n\n results = []\n for prob, boxes_per_img, image_shape in zip(\n class_prob, proposals, image_shapes\n ):\n boxlist = self.prepare_boxlist(boxes_per_img, prob, image_shape)\n boxlist = boxlist.clip_to_image(remove_empty=False)\n if not self.bbox_aug_enabled: # If bbox aug is enabled, we will do it later\n boxlist = self.filter_results(boxlist, num_classes)\n results.append(boxlist)\n return results\n\n def prepare_boxlist(self, boxes, scores, image_shape):\n \"\"\"\n Returns BoxList from `boxes` and adds probability scores information\n as an extra field\n `boxes` has shape (#detections, 4 * #classes), where each row represents\n a list of predicted bounding boxes for each of the object classes in the\n dataset (including the background class). The detections in each row\n originate from the same object proposal.\n `scores` has shape (#detection, #classes), where each row represents a list\n of object detection confidence scores for each of the object classes in the\n dataset (including the background class). `scores[i, j]`` corresponds to the\n box at `boxes[i, j * 4:(j + 1) * 4]`.\n \"\"\"\n boxes = boxes.reshape(-1, 4)\n scores = scores.reshape(-1)\n boxlist = BoxList(boxes, image_shape, mode=\"xyxy\")\n boxlist.add_field(\"scores\", scores)\n return boxlist\n\n def filter_results(self, boxlist, num_classes):\n \"\"\"Returns bounding-box detection results by thresholding on scores and\n applying non-maximum suppression (NMS).\n \"\"\"\n # unwrap the boxlist to avoid additional overhead.\n # if we had multi-class NMS, we could perform this directly on the boxlist\n boxes = boxlist.bbox.reshape(-1, num_classes * 4)\n scores = boxlist.get_field(\"scores\").reshape(-1, num_classes)\n\n device = scores.device\n result = []\n # Apply threshold on detection probabilities and apply NMS\n # Skip j = 0, because it's the background class\n inds_all = scores > self.score_thresh\n for j in range(1, num_classes):\n inds = inds_all[:, j].nonzero().squeeze(1)\n scores_j = scores[inds, j]\n boxes_j = boxes[inds, j * 4 : (j + 1) * 4]\n boxlist_for_class = BoxList(boxes_j, boxlist.size, mode=\"xyxy\")\n boxlist_for_class.add_field(\"scores\", scores_j)\n boxlist_for_class = boxlist_nms(\n boxlist_for_class, self.nms\n )\n num_labels = len(boxlist_for_class)\n boxlist_for_class.add_field(\n \"labels\", torch.full((num_labels,), j, dtype=torch.int64, device=device)\n )\n result.append(boxlist_for_class)\n\n result = cat_boxlist(result)\n number_of_detections = len(result)\n\n # Limit to max_per_image detections **over all classes**\n if number_of_detections > self.detections_per_img > 0:\n cls_scores = result.get_field(\"scores\")\n image_thresh, _ = torch.kthvalue(\n cls_scores.cpu(), number_of_detections - self.detections_per_img + 1\n )\n keep = cls_scores >= image_thresh.item()\n keep = torch.nonzero(keep).squeeze(1)\n result = result[keep]\n return result\n\n\ndef make_roi_box_post_processor(cfg):\n use_fpn = cfg.MODEL.ROI_HEADS.USE_FPN\n\n bbox_reg_weights = cfg.MODEL.ROI_HEADS.BBOX_REG_WEIGHTS\n box_coder = BoxCoder(weights=bbox_reg_weights)\n\n score_thresh = cfg.MODEL.ROI_HEADS.SCORE_THRESH\n nms_thresh = cfg.MODEL.ROI_HEADS.NMS\n detections_per_img = cfg.MODEL.ROI_HEADS.DETECTIONS_PER_IMG\n cls_agnostic_bbox_reg = cfg.MODEL.CLS_AGNOSTIC_BBOX_REG\n bbox_aug_enabled = cfg.TEST.BBOX_AUG.ENABLED\n\n postprocessor = PostProcessor(\n score_thresh,\n nms_thresh,\n detections_per_img,\n box_coder,\n cls_agnostic_bbox_reg,\n bbox_aug_enabled\n )\n return postprocessor\n",
"# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n\nimport unittest\n\nimport numpy as np\nimport torch\nfrom mega_core.modeling.box_coder import BoxCoder\n\n\nclass TestBoxCoder(unittest.TestCase):\n def test_box_decoder(self):\n \"\"\" Match unit test UtilsBoxesTest.TestBboxTransformRandom in\n caffe2/operators/generate_proposals_op_util_boxes_test.cc\n \"\"\"\n box_coder = BoxCoder(weights=(1.0, 1.0, 1.0, 1.0))\n bbox = torch.from_numpy(\n np.array(\n [\n 175.62031555,\n 20.91103172,\n 253.352005,\n 155.0145874,\n 169.24636841,\n 4.85241556,\n 228.8605957,\n 105.02092743,\n 181.77426147,\n 199.82876587,\n 192.88427734,\n 214.0255127,\n 174.36262512,\n 186.75761414,\n 296.19091797,\n 231.27906799,\n 22.73153877,\n 92.02596283,\n 135.5695343,\n 208.80291748,\n ]\n )\n .astype(np.float32)\n .reshape(-1, 4)\n )\n\n deltas = torch.from_numpy(\n np.array(\n [\n 0.47861834,\n 0.13992102,\n 0.14961673,\n 0.71495209,\n 0.29915856,\n -0.35664671,\n 0.89018666,\n 0.70815367,\n -0.03852064,\n 0.44466892,\n 0.49492538,\n 0.71409376,\n 0.28052918,\n 0.02184832,\n 0.65289006,\n 1.05060139,\n -0.38172557,\n -0.08533806,\n -0.60335309,\n 0.79052375,\n ]\n )\n .astype(np.float32)\n .reshape(-1, 4)\n )\n\n gt_bbox = (\n np.array(\n [\n 206.949539,\n -30.715202,\n 297.387665,\n 244.448486,\n 143.871216,\n -83.342888,\n 290.502289,\n 121.053398,\n 177.430283,\n 198.666245,\n 196.295273,\n 228.703079,\n 152.251892,\n 145.431564,\n 387.215454,\n 274.594238,\n 5.062420,\n 11.040955,\n 66.328903,\n 269.686218,\n ]\n )\n .astype(np.float32)\n .reshape(-1, 4)\n )\n\n results = box_coder.decode(deltas, bbox)\n\n np.testing.assert_allclose(results.detach().numpy(), gt_bbox, atol=1e-4)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n"
] |
[
[
"torch.nn.functional.softmax",
"torch.nonzero",
"torch.full",
"torch.cat"
],
[
"numpy.array"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
jschneidewind/pyH2A
|
[
"66f3943916ebdf5a4bae1706b74e5cef6131460e",
"66f3943916ebdf5a4bae1706b74e5cef6131460e"
] |
[
"Code/Plugins/Replacement_Plugin.py",
"Code/find_nearest.py"
] |
[
"from input_modification import insert, process_input, sum_all_tables\nimport numpy as np\nimport find_nearest as fn\n\nclass Replacement_Plugin:\n\t'''\n\t______________\n\tRequired Input\n\t______________\n\n\t# Planned Replacement\n\tName | Frequency (years) | Cost ($) | Path (optional)\n\t--- | --- | ---\n\tstr | num | num\n\n\tprocess_input() is used, meaning that if a path is specified, the corresponding value at that path is retrieved and \n\tmultiplied by \"Cost ($)\" to obtain the actual replacement cost.\n\n\t# Unplanned Replacement [...]\n\tName | Value\n\t--- | ---\n\tstr | num\n\n\tsum_all_tables() processed\n\n\t______________\n\tOutput\n\t______________\n\n\tInsertion of \"Summed Total\" for each sum_all_tables() processed table\n\n\tReplacement > Total > Value\n\n\t'''\n\tdef __init__(replace, self, print_info):\n\t\treplace.initialize_yearly_costs(self)\n\t\treplace.calculate_planned_replacement(self)\n\t\treplace.unplanned_replacement(self, print_info)\n\n\t\tyearly_inflated = replace.yearly * self.inflation_correction * self.inflation_factor\n\n\t\tinsert(self, 'Replacement', 'Total', 'Value', yearly_inflated, __name__, print_info = print_info)\n\n\tdef initialize_yearly_costs(replace, self):\n\n\t\treplace.yearly = np.zeros(len(self.inflation_factor))\n\n\tdef calculate_planned_replacement(replace, self):\n\n\t\tfor key in self.inp['Planned Replacement']:\n\t\t\tplanned_replacement = Planned_Replacement(self.inp['Planned Replacement'][key], key, self)\n\t\t\treplace.yearly[planned_replacement.years_idx] += planned_replacement.cost\n\n\tdef unplanned_replacement(replace, self, print_info):\n\n\t\treplace.unplanned = sum_all_tables(self.inp, 'Unplanned Replacement', 'Value', insert_total = True, class_object = self, print_info = print_info)\n\t\treplace.yearly += replace.unplanned\n\nclass Planned_Replacement:\n\t'''Replacement costs are billed annually, replacements which are performed at a non-integer rate are corrected using non_interger_correction\n\t'''\n\n\tdef __init__(planned, dictionary, key, self):\n\t\tplanned.calculate_yearly_cost(dictionary, key, self)\n\t\t\n\tdef calculate_yearly_cost(planned, dictionary, key, self):\n\n\t\treplacement_frequency = int(np.ceil(dictionary['Frequency (years)']))\n\t\tnon_integer_correction = replacement_frequency / dictionary['Frequency (years)']\n\n\t\traw_replacement_cost = process_input(self.inp, 'Planned Replacement', key, 'Cost ($)')\n\t\tinitial_replacement_year_idx = fn.find_nearest(self.plant_years, replacement_frequency)[0]\n\n\t\tplanned.cost = raw_replacement_cost * non_integer_correction * self.combined_inflator\n\t\tplanned.years = self.plant_years[initial_replacement_year_idx:][0::replacement_frequency]\n\t\tplanned.years_idx = fn.find_nearest(self.plant_years, planned.years)",
"import numpy as np\nimport math as math\n\ndef find_nearest(array, values):\n \n if array.ndim != 1:\n array_1d = array[:,0]\n else:\n array_1d = array\n\n values = np.atleast_1d(values)\n hits = []\n\n for i in range(len(values)):\n\n idx = np.searchsorted(array_1d, values[i], side= \"left\")\n if idx > 0 and (idx == len(array_1d) or math.fabs(values[i] - array_1d[idx-1]) < math.fabs(values[i] - array_1d[idx])):\n hits.append(idx-1)\n else:\n hits.append(idx)\n\n return(hits)"
] |
[
[
"numpy.ceil"
],
[
"numpy.atleast_1d",
"numpy.searchsorted"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ishine/qa_match
|
[
"f1ede11a3e799edfb5e90d5b4396b304d2365778",
"1edfcb3a87233522eba81b7ab47e05e5acec1e59",
"f1ede11a3e799edfb5e90d5b4396b304d2365778"
] |
[
"sptm/run_classifier.py",
"run_bi_lstm.py",
"dec_mining/print_sen_embedding.py"
] |
[
"# -*- coding: utf-8 -*-\n\"\"\"\nfinetune on pretrained model with trainset and devset\n\"\"\"\n\nimport sys\nimport os\nimport tensorflow as tf\nimport numpy as np\nimport argparse\nimport models\nimport utils\n\n\ndef evaluate(sess, full_tensors, args, model):\n total_num = 0\n right_num = 0\n for batch_data in utils.gen_batchs(full_tensors, args.batch_size, is_shuffle=False):\n softmax_re = sess.run(model.softmax_op,\n feed_dict={model.ph_dropout_rate: 0,\n model.ph_tokens: batch_data[0],\n model.ph_labels: batch_data[1],\n model.ph_length: batch_data[2],\n model.ph_input_mask: batch_data[3]})\n pred_re = np.argmax(softmax_re, axis=1)\n total_num += len(pred_re)\n right_num += np.sum(pred_re == batch_data[1])\n acc = 1.0 * right_num / (total_num + 1e-5)\n\n tf.logging.info(\"dev total num: \" + str(total_num) + \", right num: \" + str(right_num) + \", acc: \" + str(acc))\n return acc\n\n\ndef main(_):\n tf.logging.set_verbosity(tf.logging.INFO)\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--train_file\", type=str, default=\"\", help=\"Input train file.\")\n parser.add_argument(\"--dev_file\", type=str, default=\"\", help=\"Input dev file.\")\n parser.add_argument(\"--vocab_file\", type=str, default=\"\", help=\"Input vocab file.\")\n parser.add_argument(\"--output_id2label_file\", type=str, default=\"./id2label\",\n help=\"File containing (id, class label) map.\")\n parser.add_argument(\"--model_save_dir\", type=str, default=\"\",\n help=\"Specified the directory in which the model should stored.\")\n parser.add_argument(\"--lstm_dim\", type=int, default=500, help=\"Dimension of LSTM cell.\")\n parser.add_argument(\"--embedding_dim\", type=int, default=1000, help=\"Dimension of word embedding.\")\n parser.add_argument(\"--opt_type\", type=str, default='adam', help=\"Type of optimizer.\")\n parser.add_argument(\"--batch_size\", type=int, default=32, help=\"Batch size.\")\n parser.add_argument(\"--epoch\", type=int, default=20, help=\"Epoch.\")\n parser.add_argument(\"--learning_rate\", type=float, default=1e-4, help=\"Learning rate.\")\n parser.add_argument(\"--dropout_rate\", type=float, default=0.1, help=\"Dropout rate\")\n parser.add_argument(\"--seed\", type=int, default=1, help=\"Random seed value.\")\n parser.add_argument(\"--print_step\", type=int, default=1000, help=\"Print log every x step.\")\n parser.add_argument(\"--init_checkpoint\", type=str, default='',\n help=\"Initial checkpoint (usually from a pre-trained model).\")\n parser.add_argument(\"--max_len\", type=int, default=100, help=\"Max seqence length.\")\n parser.add_argument(\"--layer_num\", type=int, default=2, help=\"LSTM layer num.\")\n\n parser.add_argument(\"--representation_type\", type=str, default=\"lstm\",\n help=\"representation type include:lstm, transformer\")\n\n # transformer args\n parser.add_argument(\"--initializer_range\", type=float, default=\"0.02\", help=\"Embedding initialization range\")\n parser.add_argument(\"--max_position_embeddings\", type=int, default=512, help=\"max position num\")\n parser.add_argument(\"--hidden_size\", type=int, default=768, help=\"hidden size\")\n parser.add_argument(\"--num_hidden_layers\", type=int, default=12, help=\"num hidden layer\")\n parser.add_argument(\"--num_attention_heads\", type=int, default=12, help=\"num attention heads\")\n parser.add_argument(\"--intermediate_size\", type=int, default=3072, help=\"intermediate_size\")\n\n args = parser.parse_args()\n\n np.random.seed(args.seed)\n tf.set_random_seed(args.seed)\n tf.logging.info(str(args))\n if not os.path.exists(args.model_save_dir):\n os.mkdir(args.model_save_dir)\n\n tf.logging.info(\"load training sens\")\n train_sens = utils.load_training_data(args.train_file, skip_invalid=True)\n tf.logging.info(\"\\nload dev sens\")\n dev_sens = utils.load_training_data(args.dev_file, skip_invalid=True)\n\n word2id, id2word, label2id, id2label = utils.load_vocab(train_sens + dev_sens, args.vocab_file)\n fw = open(args.output_id2label_file, 'w+')\n for k, v in id2label.items():\n fw.write(str(k) + \"\\t\" + v + \"\\n\")\n fw.close()\n\n utils.gen_ids(train_sens, word2id, label2id, args.max_len)\n utils.gen_ids(dev_sens, word2id, label2id, args.max_len)\n\n train_full_tensors = utils.make_full_tensors(train_sens)\n dev_full_tensors = utils.make_full_tensors(dev_sens)\n\n other_arg_dict = {}\n other_arg_dict['token_num'] = len(word2id)\n other_arg_dict['label_num'] = len(label2id)\n model = models.create_finetune_classification_training_op(args, other_arg_dict)\n\n steps_in_epoch = int(len(train_sens) // args.batch_size)\n tf.logging.info(\"batch size: \" + str(args.batch_size) + \", training sample num : \" + str(\n len(train_sens)) + \", print step : \" + str(args.print_step))\n tf.logging.info(\n \"steps_in_epoch : \" + str(steps_in_epoch) + \", epoch num :\" + str(args.epoch) + \", total steps : \" + str(\n args.epoch * steps_in_epoch))\n print_step = min(args.print_step, steps_in_epoch)\n tf.logging.info(\"eval dev every {} step\".format(print_step))\n\n save_vars = [v for v in tf.global_variables() if\n v.name.find('adam') < 0 and v.name.find('Adam') < 0 and v.name.find('ADAM') < 0]\n tf.logging.info(str(save_vars))\n tf.logging.info(str(tf.all_variables()))\n\n saver = tf.train.Saver(max_to_keep=2)\n config = tf.ConfigProto(allow_soft_placement=True)\n config.gpu_options.allow_growth = True\n with tf.Session(config=config) as sess:\n sess.run(tf.global_variables_initializer())\n total_loss = 0\n dev_best_so_far = 0\n for epoch in range(1, args.epoch + 1):\n tf.logging.info(\"\\n\" + \"*\" * 20 + \"epoch num :\" + str(epoch) + \"*\" * 20)\n for batch_data in utils.gen_batchs(train_full_tensors, args.batch_size, is_shuffle=True):\n _, global_step, loss = sess.run([model.train_op, model.global_step_op, model.loss_op],\n feed_dict={model.ph_dropout_rate: args.dropout_rate,\n model.ph_tokens: batch_data[0],\n model.ph_labels: batch_data[1],\n model.ph_length: batch_data[2],\n model.ph_input_mask: batch_data[3]})\n total_loss += loss\n if global_step % print_step == 0:\n tf.logging.info(\n \"\\nglobal step : \" + str(global_step) + \", avg loss so far : \" + str(total_loss / global_step))\n tf.logging.info(\"begin to eval dev set: \")\n acc = evaluate(sess, dev_full_tensors, args, model)\n if acc > dev_best_so_far:\n dev_best_so_far = acc\n tf.logging.info(\"!\" * 20 + \"best got : \" + str(acc))\n # constant_graph = graph_util.convert_variables_to_constants(sess, sess.graph_def, [\"scores\"])\n saver.save(sess, args.model_save_dir + '/finetune.ckpt', global_step=global_step)\n\n tf.logging.info(\"\\n----------------------eval after one epoch: \")\n tf.logging.info(\n \"global step : \" + str(global_step) + \", avg loss so far : \" + str(total_loss / global_step))\n tf.logging.info(\"begin to eval dev set: \")\n sys.stdout.flush()\n acc = evaluate(sess, dev_full_tensors, args, model)\n if acc > dev_best_so_far:\n dev_best_so_far = acc\n tf.logging.info(\"!\" * 20 + \"best got : \" + str(acc))\n saver.save(sess, args.model_save_dir + '/finetune.ckpt', global_step=global_step)\n\n\nif __name__ == \"__main__\":\n tf.app.run()\n",
"# coding=utf-8\n\n\"\"\"\nrunning bi-lstm for short text classification\n\n\"\"\"\n\nimport os\nimport tensorflow as tf\nimport shutil\nfrom utils.classifier_utils import TextLoader\nfrom models.bilstm import BiLSTM\n\nflags = tf.flags\nFLAGS = flags.FLAGS\n\nflags.DEFINE_string(\"train_path\", None, \"dir for train data\")\nflags.DEFINE_string(\"valid_path\", None, \"dir for valid data\")\nflags.DEFINE_string(\"map_file_path\", None, \"dir for label std question mapping\")\nflags.DEFINE_string(\"model_path\", None, \"dir for save checkpoint data\")\n# flags.DEFINE_string(\"result_file\", None, \"file for valid result\")\nflags.DEFINE_string(\"vocab_file\", None, \"file for vocab\")\nflags.DEFINE_string(\"label_file\", None, \"file for label\")\nflags.DEFINE_integer(\"embedding_size\", 256, \"size of word embedding\")\nflags.DEFINE_integer(\"num_units\", 256, \"The number of units in the LSTM cell\")\nflags.DEFINE_integer(\"vocab_size\", 256, \"The size of vocab\")\nflags.DEFINE_integer(\"label_size\", 256, \"The num of label\")\nflags.DEFINE_integer(\"batch_size\", 128, \"batch_size of train data\")\nflags.DEFINE_integer(\"seq_length\", 50, \"the length of sequence\")\nflags.DEFINE_integer(\"num_epcho\", 30, \"the epcho num\")\nflags.DEFINE_integer(\"check_every\", 100, \"the epcho num\")\nflags.DEFINE_integer(\"lstm_layers\", 2, \"the layers of lstm\")\nflags.DEFINE_float(\"lr\", 0.001, \"learning rate\")\nflags.DEFINE_float(\"dropout_keep_prob\", 0.8, \"drop_out keep prob\")\n\n\ndef main(_):\n tf.logging.set_verbosity(tf.logging.INFO)\n data_loader = TextLoader(True, FLAGS.train_path, FLAGS.map_file_path, FLAGS.batch_size, FLAGS.seq_length, None,\n None, None, 'utf8', False)\n valid_data_loader = TextLoader(False, FLAGS.valid_path, FLAGS.map_file_path, FLAGS.batch_size, FLAGS.seq_length,\n data_loader.vocab,\n data_loader.labels, data_loader.std_label_map, 'utf8', False)\n tf.logging.info(\"vocab_size: \" + str(data_loader.vocab_size))\n FLAGS.vocab_size = data_loader.vocab_size\n tf.logging.info(\"label_size: \" + str(data_loader.label_size))\n FLAGS.label_size = data_loader.label_size\n bilstm = BiLSTM(FLAGS)\n init = tf.global_variables_initializer()\n config = tf.ConfigProto(allow_soft_placement=True)\n config.gpu_options.allow_growth = True\n with tf.Session(config=config) as sess:\n sess.run(init)\n idx = 0\n test_best_acc = 0\n for epcho in range(FLAGS.num_epcho): # for each epoch\n data_loader.reset_batch_pointer()\n for train_batch_num in range(data_loader.num_batches): # for each batch\n input_x, input_y, x_len, _ = data_loader.next_batch()\n feed = {bilstm.input_x: input_x, bilstm.input_y: input_y, bilstm.x_len: x_len,\n bilstm.dropout_keep_prob: FLAGS.dropout_keep_prob}\n _, global_step_op, train_loss, train_acc = sess.run(\n [bilstm.train_step, bilstm.global_step, bilstm.loss, bilstm.acc], feed_dict=feed)\n tf.logging.info(\"training...........global_step = {}, epoch = {}, current_batch = {}, \"\n \"train_loss = {:.4f}, accuracy = {:.4f}\".format(global_step_op, epcho, train_batch_num,\n train_loss, train_acc))\n idx += 1\n if idx % FLAGS.check_every == 0:\n all_num = 0\n acc_num = 0\n valid_data_loader.reset_batch_pointer()\n write_result = []\n for _ in range(valid_data_loader.num_batches):\n input_x_valid, input_y_valid, x_len_valid, _ = valid_data_loader.next_batch()\n feed = {bilstm.input_x: input_x_valid, bilstm.input_y: input_y_valid, bilstm.x_len: x_len_valid,\n bilstm.dropout_keep_prob: 1.0}\n prediction, arg_index = sess.run([bilstm.prediction, bilstm.arg_index], feed_dict=feed)\n all_num = all_num + len(input_y_valid)\n # write_str = \"\"\n for i, indexs in enumerate(arg_index):\n pre_label_id = indexs[0]\n real_label_id = input_y_valid[i]\n if pre_label_id == real_label_id:\n acc_num = acc_num + 1\n # if real_label_id in valid_data_loader.id_2_label:\n # write_str = valid_data_loader.id_2_label.get(real_label_id)\n # else:\n # write_str = \"__label__unknown\"\n # for index in indexs:\n # cur_label = valid_data_loader.id_2_label.get(index)\n # cur_score = prediction[i][index]\n # write_str = write_str + \" \" + cur_label + \":\" + str(cur_score)\n # write_str = write_str + \"\\n\"\n # write_result.append(write_str)\n test_acc = acc_num * 1.0 / all_num\n tf.logging.info(\n \"testing...........global_step = {}, epoch = {}, accuracy = {:.4f}, cur_best_acc = {}\".format(\n global_step_op, epcho, test_acc, test_best_acc))\n if test_best_acc < test_acc:\n test_best_acc = test_acc\n # save_model\n if not os.path.exists(FLAGS.model_path):\n os.makedirs(FLAGS.model_path)\n checkpoint_path = os.path.join(FLAGS.model_path, 'lstm_model')\n bilstm.saver.save(sess, checkpoint_path, global_step=global_step_op)\n # export model\n export_path = os.path.join(FLAGS.model_path, 'lstm_tf_serving')\n if os.path.isdir(export_path):\n shutil.rmtree(export_path)\n bilstm.export_model(export_path, sess)\n # resultfile = open(FLAGS.result_file, 'w', encoding='utf-8')\n # for pre_sen in write_result:\n # resultfile.write(pre_sen)\n tf.logging.info(\n \"has saved model and write.result...................................................................\")\n # resultfile.close()\n # save label and vocab\n vocabfile = open(FLAGS.vocab_file, 'w', encoding='utf-8')\n for key, value in data_loader.vocab.items():\n vocabfile.write(str(key) + \"\\t\" + str(value) + '\\n')\n vocabfile.close()\n labelfile = open(FLAGS.label_file, 'w', encoding='utf-8')\n for key, value in data_loader.labels.items():\n labelfile.write(str(key) + \"\\t\" + str(value) + '\\n')\n labelfile.close()\n # break\n\n\nif __name__ == \"__main__\":\n tf.app.run()\n",
"# -*- coding: utf-8 -*-\n\"\"\"\nprint sentence embedding\n\"\"\"\nimport os\nimport sys\nimport argparse\nimport codecs\nimport tensorflow as tf\nimport numpy as np\nimport data_utils\n\n# get graph output in different way: max mean concat\ndef get_output(g, embedding_way):\n if embedding_way == \"concat\": # here may have problem, this is just for 4 layers of biLM !\n t = g.get_tensor_by_name(\"concat_4:0\")\n elif embedding_way == \"max\":\n t = g.get_tensor_by_name(\"Max:0\")\n elif embedding_way == 'mean':\n t = g.get_tensor_by_name(\"Mean:0\")\n else:\n assert False\n return {\"sen_embedding\": t}\n\n# get graph input\ndef get_input(g):\n return {\"tokens\": g.get_tensor_by_name(\"ph_tokens:0\"),\n \"length\": g.get_tensor_by_name(\"ph_length:0\"),\n \"dropout_rate\": g.get_tensor_by_name(\"ph_dropout_rate:0\")}\n\ndef gen_test_data(input_file, word2id, max_seq_len):\n sens = []\n center_size = []\n for line in codecs.open(input_file, 'r', 'utf-8'):\n # separated by slash\n ls = line.strip().split(\"/\")\n center_size.append(len(ls))\n for l in ls:\n l = l.replace(\" \", \"\")\n l = l.replace(\"\", \" \")\n fs = l.rstrip().split()\n if len(fs) > max_seq_len:\n continue\n sen = []\n for f in fs:\n if f in word2id:\n sen.append(word2id[f])\n else:\n sen.append(word2id['<UNK>'])\n sens.append(sen)\n return sens, center_size\n\nif __name__==\"__main__\":\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--input_file\", type=str, default=\"\")\n parser.add_argument(\"--vocab_file\", type=str, default=\"\")\n parser.add_argument(\"--model_path\", type=str, default=\"\")\n parser.add_argument(\"--batch_size\", type=int, default=256)\n parser.add_argument(\"--max_seq_len\", type=int, default=100)\n parser.add_argument(\"--output_file\", type=str, default=\"\")\n # sentence representation output way : max mean concat\n parser.add_argument(\"--embedding_way\", type=str, default=\"concat\")\n args = parser.parse_args()\n\n word2id, id2word = data_utils.load_vocab_file(args.vocab_file)\n sys.stderr.write(\"vocab num : \" + str(len(word2id)) + \"\\n\")\n sens, center_size = gen_test_data(args.input_file, word2id, args.max_seq_len)\n sys.stderr.write(\"sens num : \" + str(len(sens)) + \"\\n\")\n tf.logging.info(\"embedding_way : \", args.embedding_way)\n\n # limit cpu resource\n cpu_num = int(os.environ.get('CPU_NUM', 15))\n config = tf.ConfigProto(device_count={\"CPU\": cpu_num},\n inter_op_parallelism_threads = cpu_num,\n intra_op_parallelism_threads = cpu_num,\n log_device_placement=True)\n config.gpu_options.allow_growth = True\n with tf.Session(config=config) as sess:\n saver = tf.train.import_meta_graph(\"{}.meta\".format(args.model_path))\n saver.restore(sess, args.model_path)\n\n graph = tf.get_default_graph()\n input_dict = get_input(graph)\n output_dict = get_output(graph, args.embedding_way)\n\n caches = []\n idx = 0\n while idx < len(sens):\n batch_sens = sens[idx:idx + args.batch_size]\n batch_tokens = []\n batch_length = []\n for sen in batch_sens:\n batch_tokens.append(sen)\n batch_length.append(len(sen))\n\n real_max_len = max([len(b) for b in batch_tokens])\n for b in batch_tokens:\n b.extend([0] * (real_max_len - len(b)))\n\n re = sess.run(output_dict['sen_embedding'],\n feed_dict={input_dict['tokens']: batch_tokens,\n input_dict['length']: batch_length,\n input_dict[\"dropout_rate\"]: 0.0})\n if len(caches) % 200 == 0:\n tf.logging.info(len(caches))\n caches.append(re)\n idx += len(batch_sens)\n\n sen_embeddings = np.concatenate(caches, 0)\n # calculate average embedding\n avg_centers = []\n\n idx = 0\n for size in center_size:\n avg_center_emb = np.average(sen_embeddings[idx: idx + size], axis=0)\n avg_centers.append(avg_center_emb)\n idx = idx + size\n\n np.savetxt(args.output_file, avg_centers, fmt='%.3e')\n"
] |
[
[
"tensorflow.all_variables",
"numpy.random.seed",
"tensorflow.global_variables",
"tensorflow.ConfigProto",
"tensorflow.global_variables_initializer",
"numpy.argmax",
"tensorflow.logging.info",
"tensorflow.logging.set_verbosity",
"tensorflow.Session",
"tensorflow.set_random_seed",
"tensorflow.train.Saver",
"numpy.sum",
"tensorflow.app.run"
],
[
"tensorflow.ConfigProto",
"tensorflow.global_variables_initializer",
"tensorflow.logging.info",
"tensorflow.logging.set_verbosity",
"tensorflow.Session",
"tensorflow.app.run"
],
[
"tensorflow.ConfigProto",
"numpy.concatenate",
"tensorflow.logging.info",
"tensorflow.Session",
"numpy.savetxt",
"tensorflow.get_default_graph",
"numpy.average"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
jsturtz/env
|
[
"d523b0be3345f883a727679d58ff29efb4389d16"
] |
[
"Lib/site-packages/pandas_ml/snsaccessors/test/test_sns.py"
] |
[
"#!/usr/bin/env python\nimport pytest\n\nimport matplotlib\nmatplotlib.use('Agg')\n\nimport numpy as np # noqa\nimport sklearn.datasets as datasets # noqa\n\nimport pandas_ml as pdml # noqa\nimport pandas_ml.util.testing as tm # noqa\n\ntry:\n import seaborn as sns # noqa\nexcept ImportError:\n pass\n\n\nclass SeabornCase(tm.PlottingTestCase):\n\n def setup_method(self):\n\n try:\n import matplotlib.pyplot # noqa\n except ImportError:\n import nose\n # matplotlib.use doesn't work on Travis\n # PYTHON=3.4 PANDAS=0.17.1 SKLEARN=0.16.1\n raise nose.SkipTest()\n\n self.iris = pdml.ModelFrame(datasets.load_iris())\n\n self.diabetes = pdml.ModelFrame(datasets.load_diabetes())\n # convert columns to str\n self.diabetes.columns = ['col{0}'.format(c) if isinstance(c, int)\n else c for c in self.diabetes.columns]\n\n\nclass TestSeabornAttrs(SeabornCase):\n\n def test_objectmapper(self):\n\n df = pdml.ModelFrame([])\n self.assertIs(df.sns.palplot, sns.palplot)\n self.assertIs(df.sns.set, sns.set)\n self.assertIs(df.sns.axes_style, sns.axes_style)\n self.assertIs(df.sns.plotting_context, sns.plotting_context)\n self.assertIs(df.sns.set_context, sns.set_context)\n self.assertIs(df.sns.set_color_codes, sns.set_color_codes)\n self.assertIs(df.sns.reset_defaults, sns.reset_defaults)\n self.assertIs(df.sns.reset_orig, sns.reset_orig)\n self.assertIs(df.sns.set_palette, sns.set_palette)\n self.assertIs(df.sns.color_palette, sns.color_palette)\n self.assertIs(df.sns.husl_palette, sns.husl_palette)\n self.assertIs(df.sns.hls_palette, sns.hls_palette)\n self.assertIs(df.sns.cubehelix_palette, sns.cubehelix_palette)\n self.assertIs(df.sns.dark_palette, sns.dark_palette)\n self.assertIs(df.sns.light_palette, sns.light_palette)\n self.assertIs(df.sns.diverging_palette, sns.diverging_palette)\n self.assertIs(df.sns.blend_palette, sns.blend_palette)\n self.assertIs(df.sns.xkcd_palette, sns.xkcd_palette)\n self.assertIs(df.sns.crayon_palette, sns.crayon_palette)\n self.assertIs(df.sns.mpl_palette, sns.mpl_palette)\n self.assertIs(df.sns.choose_colorbrewer_palette,\n sns.choose_colorbrewer_palette)\n self.assertIs(df.sns.choose_cubehelix_palette,\n sns.choose_cubehelix_palette)\n self.assertIs(df.sns.choose_light_palette,\n sns.choose_light_palette)\n\n self.assertIs(df.sns.choose_dark_palette, sns.choose_dark_palette)\n self.assertIs(df.sns.choose_diverging_palette,\n sns.choose_diverging_palette)\n self.assertIs(df.sns.despine, sns.despine)\n self.assertIs(df.sns.desaturate, sns.desaturate)\n self.assertIs(df.sns.saturate, sns.saturate)\n self.assertIs(df.sns.set_hls_values, sns.set_hls_values)\n # self.assertIs(df.sns.ci_to_errsize, sns.ci_to_errsize)\n # self.assertIs(df.sns.axlabel, sns.axlabel)\n\n\nclass TestSeabornDistribution(SeabornCase):\n\n def test_jointplot(self):\n df = self.iris\n\n jg = df.sns.jointplot(df.columns[1])\n self.assertIsInstance(jg, sns.JointGrid)\n self.assertEqual(jg.ax_joint.get_xlabel(), df.columns[1])\n self.assertEqual(jg.ax_joint.get_ylabel(), '.target')\n tm.close()\n\n jg = df.sns.jointplot(df.columns[2], df.columns[3])\n self.assertIsInstance(jg, sns.JointGrid)\n self.assertEqual(jg.ax_joint.get_xlabel(), df.columns[2])\n self.assertEqual(jg.ax_joint.get_ylabel(), df.columns[3])\n\n def test_pairplot(self):\n df = self.iris\n\n pg = df.sns.pairplot()\n self._check_axes_shape(pg.axes, axes_num=25,\n layout=(5, 5), figsize=None)\n for i in range(5):\n self.assertEqual(pg.axes[i][0].get_ylabel(), df.columns[i])\n self.assertEqual(pg.axes[-1][i].get_xlabel(), df.columns[i])\n tm.close()\n\n def test_distplot(self):\n return # ToDo: only fails on Travis\n\n df = self.iris\n\n ax = df.sns.distplot()\n self.assertIsInstance(ax, matplotlib.axes.Axes)\n self.assertEqual(ax.get_xlabel(), '.target')\n tm.close()\n\n # pass scalar (str)\n ax = df.sns.distplot(df.columns[1])\n self.assertIsInstance(ax, matplotlib.axes.Axes)\n self.assertEqual(ax.get_xlabel(), df.columns[1])\n tm.close()\n\n # pass Series\n ax = df.sns.distplot(df[df.columns[2]])\n self.assertIsInstance(ax, matplotlib.axes.Axes)\n self.assertEqual(ax.get_xlabel(), df.columns[2])\n\n def test_dist_error(self):\n df = pdml.ModelFrame(np.random.randn(100, 5), columns=list('abcde'))\n\n msg = \"a can't be ommitted when ModelFrame doesn't have target column\"\n with pytest.raises(ValueError, match=msg):\n df.sns.distplot()\n\n df.target = df[['a', 'b']]\n self.assertTrue(df.has_multi_targets())\n\n msg = \"a can't be ommitted when ModelFrame has multiple target columns\"\n with pytest.raises(ValueError, match=msg):\n df.sns.distplot()\n\n def test_kdeplot(self):\n df = pdml.ModelFrame(np.random.randn(100, 5), columns=list('abcde'))\n df.target = df['a']\n\n ax = df.sns.kdeplot()\n self.assertIsInstance(ax, matplotlib.axes.Axes)\n self.assertEqual(ax.get_xlabel(), '')\n self.assertEqual(ax.get_ylabel(), '')\n tm.close()\n\n ax = df.sns.kdeplot(data='b', data2='c')\n self.assertIsInstance(ax, matplotlib.axes.Axes)\n self.assertEqual(ax.get_xlabel(), 'b')\n self.assertEqual(ax.get_ylabel(), 'c')\n tm.close()\n\n ax = df.sns.kdeplot(data=df['b'], data2=df['c'])\n self.assertIsInstance(ax, matplotlib.axes.Axes)\n self.assertEqual(ax.get_xlabel(), 'b')\n self.assertEqual(ax.get_ylabel(), 'c')\n\n def test_rugplot(self):\n df = self.iris\n\n ax = df.sns.rugplot()\n self.assertIsInstance(ax, matplotlib.axes.Axes)\n # rugplot does not add label\n\n # pass scalar (str)\n ax = df.sns.rugplot(df.columns[1])\n self.assertIsInstance(ax, matplotlib.axes.Axes)\n\n # pass Series\n ax = df.sns.rugplot(df[df.columns[2]])\n self.assertIsInstance(ax, matplotlib.axes.Axes)\n\n def test_kde_rug_mix(self):\n import matplotlib.pyplot as plt\n\n df = pdml.ModelFrame(np.random.randn(100, 5), columns=list('abcde'))\n df.target = df['a']\n\n f, ax = plt.subplots(figsize=(6, 6))\n ax = df.sns.kdeplot('b', 'c', ax=ax)\n self.assertIsInstance(ax, matplotlib.axes.Axes)\n self.assertEqual(ax.get_xlabel(), 'b')\n self.assertEqual(ax.get_ylabel(), 'c')\n # plot continues, do not reset by tm.close()\n\n ax = df.sns.rugplot('b', color=\"g\", ax=ax)\n self.assertIsInstance(ax, matplotlib.axes.Axes)\n self.assertEqual(ax.get_xlabel(), 'b')\n self.assertEqual(ax.get_ylabel(), 'c')\n\n ax = df.sns.rugplot('c', vertical=True, ax=ax)\n self.assertIsInstance(ax, matplotlib.axes.Axes)\n self.assertEqual(ax.get_xlabel(), 'b')\n self.assertEqual(ax.get_ylabel(), 'c')\n\n\nclass TestSeabornRegression(SeabornCase):\n\n def test_lmplot(self):\n df = self.diabetes\n\n fg = df.sns.lmplot(df.columns[1])\n self.assertIsInstance(fg, sns.FacetGrid)\n self.assertEqual(fg.ax.get_xlabel(), df.columns[1])\n self.assertEqual(fg.ax.get_ylabel(), '.target')\n tm.close()\n\n fg = df.sns.lmplot(df.columns[1], df.columns[2])\n self.assertIsInstance(fg, sns.FacetGrid)\n self.assertEqual(fg.ax.get_xlabel(), df.columns[1])\n self.assertEqual(fg.ax.get_ylabel(), df.columns[2])\n\n def test_regression_plot(self):\n df = self.diabetes\n\n plots = ['regplot', 'residplot']\n\n for plot in plots:\n func = getattr(df.sns, plot)\n\n ax = func(df.columns[1])\n self.assertIsInstance(ax, matplotlib.axes.Axes)\n self.assertEqual(ax.get_xlabel(), df.columns[1])\n self.assertEqual(ax.get_ylabel(), '.target')\n tm.close()\n\n ax = func(df.columns[1], df.columns[2])\n self.assertIsInstance(ax, matplotlib.axes.Axes)\n self.assertEqual(ax.get_xlabel(), df.columns[1])\n self.assertEqual(ax.get_ylabel(), df.columns[2])\n tm.close()\n\n def test_interactplot(self):\n pass\n\n def test_coefplot(self):\n pass\n\n\nclass TestSeabornCategorical(SeabornCase):\n\n def test_factorplots(self):\n df = self.iris\n\n fg = df.sns.factorplot(df.columns[1])\n self.assertIsInstance(fg, sns.FacetGrid)\n self.assertEqual(fg.ax.get_xlabel(), '.target')\n self.assertEqual(fg.ax.get_ylabel(), df.columns[1])\n tm.close()\n\n fg = df.sns.factorplot(x=df.columns[1])\n self.assertIsInstance(fg, sns.FacetGrid)\n self.assertEqual(fg.ax.get_xlabel(), df.columns[1])\n self.assertEqual(fg.ax.get_ylabel(), '.target')\n tm.close()\n\n fg = df.sns.factorplot(x=df.columns[1], y=df.columns[2])\n self.assertIsInstance(fg, sns.FacetGrid)\n self.assertEqual(fg.ax.get_xlabel(), df.columns[1])\n self.assertEqual(fg.ax.get_ylabel(), df.columns[2])\n\n def test_categoricalplots(self):\n df = self.iris\n\n plots = ['boxplot', 'violinplot', 'stripplot']\n\n for plot in plots:\n func = getattr(df.sns, plot)\n ax = func(df.columns[1])\n self.assertIsInstance(ax, matplotlib.axes.Axes)\n self.assertEqual(ax.get_xlabel(), '.target')\n self.assertEqual(ax.get_ylabel(), df.columns[1])\n tm.close()\n\n ax = func(y=df.columns[1])\n self.assertIsInstance(ax, matplotlib.axes.Axes)\n self.assertEqual(ax.get_xlabel(), '.target')\n self.assertEqual(ax.get_ylabel(), df.columns[1])\n tm.close()\n\n ax = func(x=df.columns[1])\n self.assertIsInstance(ax, matplotlib.axes.Axes)\n self.assertEqual(ax.get_xlabel(), df.columns[1])\n self.assertEqual(ax.get_ylabel(), '.target')\n tm.close()\n\n ax = func(x=df.columns[1], y=df.columns[2])\n self.assertIsInstance(ax, matplotlib.axes.Axes)\n self.assertEqual(ax.get_xlabel(), df.columns[1])\n self.assertEqual(ax.get_ylabel(), df.columns[2])\n tm.close()\n\n def test_categorical_mean_plots(self):\n df = self.iris\n\n plots = ['pointplot', 'barplot']\n\n for plot in plots:\n func = getattr(df.sns, plot)\n ax = func(df.columns[1])\n self.assertIsInstance(ax, matplotlib.axes.Axes)\n self.assertEqual(ax.get_xlabel(), '.target')\n self.assertEqual(ax.get_ylabel(), '{0}'.format(df.columns[1]))\n tm.close()\n\n ax = func(y=df.columns[1])\n self.assertIsInstance(ax, matplotlib.axes.Axes)\n self.assertEqual(ax.get_xlabel(), '.target')\n self.assertEqual(ax.get_ylabel(), '{0}'.format(df.columns[1]))\n tm.close()\n\n ax = func(x=df.columns[1])\n self.assertIsInstance(ax, matplotlib.axes.Axes)\n self.assertEqual(ax.get_xlabel(), df.columns[1])\n self.assertEqual(ax.get_ylabel(), '{0}'.format('.target'))\n tm.close()\n\n ax = func(x=df.columns[1], y=df.columns[2])\n self.assertIsInstance(ax, matplotlib.axes.Axes)\n self.assertEqual(ax.get_xlabel(), df.columns[1])\n self.assertEqual(ax.get_ylabel(), '{0}'.format(df.columns[2]))\n tm.close()\n\n def test_count_plots(self):\n df = self.iris\n\n ax = df.sns.countplot()\n self.assertIsInstance(ax, matplotlib.axes.Axes)\n self.assertEqual(ax.get_xlabel(), '.target')\n self.assertEqual(ax.get_ylabel(), 'count')\n tm.close()\n\n return # ToDo: only fails on Travis\n\n ax = df.sns.countplot(df.columns[1])\n self.assertIsInstance(ax, matplotlib.axes.Axes)\n self.assertEqual(ax.get_xlabel(), df.columns[1])\n self.assertEqual(ax.get_ylabel(), 'count')\n tm.close()\n\n ax = df.sns.countplot(x=df.columns[1])\n self.assertIsInstance(ax, matplotlib.axes.Axes)\n self.assertEqual(ax.get_xlabel(), df.columns[1])\n self.assertEqual(ax.get_ylabel(), 'count')\n tm.close()\n\n ax = df.sns.countplot(y=df.columns[1])\n self.assertIsInstance(ax, matplotlib.axes.Axes)\n self.assertEqual(ax.get_xlabel(), 'count')\n self.assertEqual(ax.get_ylabel(), df.columns[1])\n tm.close()\n\n with tm.assertRaises(TypeError):\n # can't pass both x and y\n df.sns.countplot(x=df.columns[1], y=df.columns[2])\n\n # Matrix\n\n def test_heatmap(self):\n pass\n\n def test_clustermap(self):\n pass\n\n # Timeseries\n\n def test_tsplot(self):\n pass\n\n # AxisGrid\n\n def test_facetgrid(self):\n df = self.iris\n\n fg = df.sns.FacetGrid(df.columns[0])\n self.assertIsInstance(fg, sns.FacetGrid)\n self._check_axes_shape(fg.axes, axes_num=3, layout=(3, 1), figsize=None)\n tm.close()\n\n fg = df.sns.FacetGrid(row=df.columns[0])\n self.assertIsInstance(fg, sns.FacetGrid)\n self._check_axes_shape(fg.axes, axes_num=3, layout=(3, 1), figsize=None)\n tm.close()\n\n fg = df.sns.FacetGrid(col=df.columns[0])\n self.assertIsInstance(fg, sns.FacetGrid)\n self._check_axes_shape(fg.axes, axes_num=3, layout=(1, 3), figsize=None)\n tm.close()\n\n def test_pairgrid(self):\n df = self.iris\n\n pg = df.sns.PairGrid()\n self.assertIsInstance(pg, sns.PairGrid)\n self._check_axes_shape(pg.axes, axes_num=25, layout=(5, 5), figsize=None)\n\n def test_jointgrid(self):\n df = self.iris\n\n jg = df.sns.JointGrid(x=df.columns[1], y=df.columns[1])\n self.assertIsInstance(jg, sns.JointGrid)\n"
] |
[
[
"matplotlib.use",
"sklearn.datasets.load_iris",
"matplotlib.pyplot.subplots",
"sklearn.datasets.load_diabetes",
"numpy.random.randn"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
i-pan/kaggle-melanoma
|
[
"def5cfbbb7cbf80560b41ef93fd2d6ee6e9e7b5a",
"def5cfbbb7cbf80560b41ef93fd2d6ee6e9e7b5a",
"def5cfbbb7cbf80560b41ef93fd2d6ee6e9e7b5a",
"def5cfbbb7cbf80560b41ef93fd2d6ee6e9e7b5a"
] |
[
"src/factory/evaluate/metrics.py",
"src/check_cv_scores.py",
"src/factory/models/efficientnet.py",
"src/etl/3_combine_isic2019_cropped.py"
] |
[
"import numpy as np\n\nfrom sklearn import metrics\n\n\ndef auc(y_true, y_pred, **kwargs):\n # y_pred.shape = (N, C)\n # AUC for melanoma (class 0)\n return {'auc': metrics.roc_auc_score((y_true==0).astype('float'), y_pred[:,0])}\n\n\ndef mel_auc(y_true, y_pred, **kwargs):\n # y_pred.shape = (N, C)\n # AUC for melanoma + nevi (class 0+1)\n return {'mel_auc': metrics.roc_auc_score((y_true<=1).astype('float'), y_pred[:,0]+y_pred[:,1])}\n\n\ndef mel_f1(y_true, y_pred, **kwargs):\n # y_pred.shape = (N, C)\n # AUC for melanoma + nevi (class 0+1)\n t = (y_true <= 1).astype('float')\n p = (y_pred[:,0] + y_pred[:,1]) >= 0.5\n p = p.astype('float')\n return {'mel_f1': metrics.f1_score(t, p)}\n\n\ndef accuracy(y_true, y_pred, **kwargs):\n return {'accuracy': np.mean(y_true == np.argmax(y_pred, axis=1))}\n\n\ndef auc2(y_true, y_pred, **kwargs):\n # y_pred.shape = (N, 2)\n # AUC for melanoma (class 1)\n return {'auc2': metrics.roc_auc_score(y_true, y_pred)}\n\n\ndef arc_auc(y_true, y_pred, **kwargs):\n # y_pred.shape = (N, 2)\n # AUC for melanoma (class 1)\n return {'arc_auc': metrics.roc_auc_score(y_true, y_pred)}\n\n\ndef auc3(y_true, y_pred, **kwargs):\n # y_pred.shape = (N, 3) - includes prediction for nevus\n t = (y_true == 2).astype('float')\n p = y_pred[:,2]\n return {'auc3': metrics.roc_auc_score(t, p)}",
"import argparse\nimport numpy as np\nimport glob\nimport os, os.path as osp\n\nfrom collections import defaultdict\n\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('dir', type=str)\n return parser.parse_args()\n\n\ndef get_metric_from_checkpoint(fp):\n return float(fp.split('/')[-1].split('-')[-1].replace('.PTH', ''))\n\n\ndef main():\n args = parse_args()\n folds = np.sort(glob.glob(osp.join(args.dir, '*')))\n folds = list(folds)\n\n folds_dict = defaultdict(list)\n for fo in folds:\n checkpoints = glob.glob(osp.join(fo, '*.PTH'))\n if len(checkpoints) == 0:\n continue\n\n for ckpt in checkpoints:\n value = get_metric_from_checkpoint(ckpt)\n folds_dict[fo].append(value)\n\n for fo in np.sort([*folds_dict]):\n print(f'{fo.split(\"/\")[-1].upper()} : {np.max(folds_dict[fo]):.4f}')\n\n print('=====')\n print(f'CVAVG : {np.mean([np.max(v) for v in folds_dict.values()]):.4f}')\n\n\nif __name__ == '__main__':\n main()\n",
"import torch\nfrom torch import nn\nfrom torch.nn import functional as F\n\nfrom .efficientnet_utils import (\n round_filters,\n round_repeats,\n drop_connect,\n get_same_padding_conv2d,\n get_model_params,\n efficientnet_params,\n load_pretrained_weights,\n Swish,\n MemoryEfficientSwish,\n)\n\nclass MBConvBlock(nn.Module):\n \"\"\"\n Mobile Inverted Residual Bottleneck Block\n\n Args:\n block_args (namedtuple): BlockArgs, see above\n global_params (namedtuple): GlobalParam, see above\n\n Attributes:\n has_se (bool): Whether the block contains a Squeeze and Excitation layer.\n \"\"\"\n\n def __init__(self, block_args, global_params):\n super().__init__()\n self._block_args = block_args\n self._bn_mom = 1 - global_params.batch_norm_momentum\n self._bn_eps = global_params.batch_norm_epsilon\n self.has_se = (self._block_args.se_ratio is not None) and (0 < self._block_args.se_ratio <= 1)\n self.id_skip = block_args.id_skip # skip connection and drop connect\n\n # Get static or dynamic convolution depending on image size\n Conv2d = get_same_padding_conv2d(image_size=global_params.image_size)\n\n # Expansion phase\n inp = self._block_args.input_filters # number of input channels\n oup = self._block_args.input_filters * self._block_args.expand_ratio # number of output channels\n if self._block_args.expand_ratio != 1:\n self._expand_conv = Conv2d(in_channels=inp, out_channels=oup, kernel_size=1, bias=False)\n self._bn0 = nn.BatchNorm2d(num_features=oup, momentum=self._bn_mom, eps=self._bn_eps)\n\n # Depthwise convolution phase\n k = self._block_args.kernel_size\n s = self._block_args.stride\n self._depthwise_conv = Conv2d(\n in_channels=oup, out_channels=oup, groups=oup, # groups makes it depthwise\n kernel_size=k, stride=s, bias=False)\n self._bn1 = nn.BatchNorm2d(num_features=oup, momentum=self._bn_mom, eps=self._bn_eps)\n\n # Squeeze and Excitation layer, if desired\n if self.has_se:\n num_squeezed_channels = max(1, int(self._block_args.input_filters * self._block_args.se_ratio))\n self._se_reduce = Conv2d(in_channels=oup, out_channels=num_squeezed_channels, kernel_size=1)\n self._se_expand = Conv2d(in_channels=num_squeezed_channels, out_channels=oup, kernel_size=1)\n\n # Output phase\n final_oup = self._block_args.output_filters\n self._project_conv = Conv2d(in_channels=oup, out_channels=final_oup, kernel_size=1, bias=False)\n self._bn2 = nn.BatchNorm2d(num_features=final_oup, momentum=self._bn_mom, eps=self._bn_eps)\n self._swish = MemoryEfficientSwish()\n\n def forward(self, inputs, drop_connect_rate=None):\n \"\"\"\n :param inputs: input tensor\n :param drop_connect_rate: drop connect rate (float, between 0 and 1)\n :return: output of block\n \"\"\"\n\n # Expansion and Depthwise Convolution\n x = inputs\n if self._block_args.expand_ratio != 1:\n x = self._swish(self._bn0(self._expand_conv(inputs)))\n x = self._swish(self._bn1(self._depthwise_conv(x)))\n\n # Squeeze and Excitation\n if self.has_se:\n x_squeezed = F.adaptive_avg_pool2d(x, 1)\n x_squeezed = self._se_expand(self._swish(self._se_reduce(x_squeezed)))\n x = torch.sigmoid(x_squeezed) * x\n\n x = self._bn2(self._project_conv(x))\n\n # Skip connection and drop connect\n input_filters, output_filters = self._block_args.input_filters, self._block_args.output_filters\n if self.id_skip and self._block_args.stride == 1 and input_filters == output_filters:\n if drop_connect_rate:\n x = drop_connect(x, p=drop_connect_rate, training=self.training)\n x = x + inputs # skip connection\n return x\n\n def set_swish(self, memory_efficient=True):\n \"\"\"Sets swish function as memory efficient (for training) or standard (for export)\"\"\"\n self._swish = MemoryEfficientSwish() if memory_efficient else Swish()\n\n\nclass EfficientNet(nn.Module):\n \"\"\"\n An EfficientNet model. Most easily loaded with the .from_name or .from_pretrained methods\n\n Args:\n blocks_args (list): A list of BlockArgs to construct blocks\n global_params (namedtuple): A set of GlobalParams shared between blocks\n\n Example:\n model = EfficientNet.from_pretrained('efficientnet-b0')\n\n \"\"\"\n\n def __init__(self, blocks_args=None, global_params=None):\n super().__init__()\n assert isinstance(blocks_args, list), 'blocks_args should be a list'\n assert len(blocks_args) > 0, 'block args must be greater than 0'\n self._global_params = global_params\n self._blocks_args = blocks_args\n\n # Get static or dynamic convolution depending on image size\n Conv2d = get_same_padding_conv2d(image_size=global_params.image_size)\n\n # Batch norm parameters\n bn_mom = 1 - self._global_params.batch_norm_momentum\n bn_eps = self._global_params.batch_norm_epsilon\n\n # Stem\n in_channels = 3 # rgb\n out_channels = round_filters(32, self._global_params) # number of output channels\n self._conv_stem = Conv2d(in_channels, out_channels, kernel_size=3, stride=2, bias=False)\n self._bn0 = nn.BatchNorm2d(num_features=out_channels, momentum=bn_mom, eps=bn_eps)\n\n # Build blocks\n self._blocks = nn.ModuleList([])\n for block_args in self._blocks_args:\n\n # Update block input and output filters based on depth multiplier.\n block_args = block_args._replace(\n input_filters=round_filters(block_args.input_filters, self._global_params),\n output_filters=round_filters(block_args.output_filters, self._global_params),\n num_repeat=round_repeats(block_args.num_repeat, self._global_params)\n )\n\n # The first block needs to take care of stride and filter size increase.\n self._blocks.append(MBConvBlock(block_args, self._global_params))\n if block_args.num_repeat > 1:\n block_args = block_args._replace(input_filters=block_args.output_filters, stride=1)\n for _ in range(block_args.num_repeat - 1):\n self._blocks.append(MBConvBlock(block_args, self._global_params))\n\n # Head\n in_channels = block_args.output_filters # output of final block\n out_channels = round_filters(1280, self._global_params)\n self._conv_head = Conv2d(in_channels, out_channels, kernel_size=1, bias=False)\n self._bn1 = nn.BatchNorm2d(num_features=out_channels, momentum=bn_mom, eps=bn_eps)\n\n # Final linear layer\n self.avgpool = nn.AdaptiveAvgPool2d(1)\n self._dropout = nn.Dropout(self._global_params.dropout_rate)\n self._fc = nn.Linear(out_channels, self._global_params.num_classes)\n self._swish = MemoryEfficientSwish()\n\n def set_swish(self, memory_efficient=True):\n \"\"\"Sets swish function as memory efficient (for training) or standard (for export)\"\"\"\n self._swish = MemoryEfficientSwish() if memory_efficient else Swish()\n for block in self._blocks:\n block.set_swish(memory_efficient)\n\n\n def extract_features(self, inputs):\n \"\"\" Returns output of the final convolution layer \"\"\"\n\n # Stem\n x = self._swish(self._bn0(self._conv_stem(inputs)))\n\n # Blocks\n for idx, block in enumerate(self._blocks):\n drop_connect_rate = self._global_params.drop_connect_rate\n if drop_connect_rate:\n drop_connect_rate *= float(idx) / len(self._blocks)\n x = block(x, drop_connect_rate=drop_connect_rate)\n\n # Head\n x = self._swish(self._bn1(self._conv_head(x)))\n\n return x\n\n def forward(self, inputs):\n \"\"\" Calls extract_features to extract features, applies final linear layer, and returns logits. \"\"\"\n bs = inputs.size(0)\n # Convolution layers\n x = self.extract_features(inputs)\n\n # Pooling and final linear layer\n x = self.avgpool(x)\n x = x.view(bs, -1)\n x = self._dropout(x)\n x = self._fc(x)\n return x\n\n @classmethod\n def from_name(cls, model_name, override_params=None):\n cls._check_model_name_is_valid(model_name)\n blocks_args, global_params = get_model_params(model_name, override_params)\n return cls(blocks_args, global_params)\n\n @classmethod\n def from_pretrained(cls, model_name, num_classes=1000, in_channels = 3):\n model = cls.from_name(model_name, override_params={'num_classes': num_classes})\n load_pretrained_weights(model, model_name, load_fc=(num_classes == 1000))\n if in_channels != 3:\n Conv2d = get_same_padding_conv2d(image_size = model._global_params.image_size)\n out_channels = round_filters(32, model._global_params)\n model._conv_stem = Conv2d(in_channels, out_channels, kernel_size=3, stride=2, bias=False)\n return model\n \n @classmethod\n def from_pretrained(cls, model_name, num_classes=1000):\n model = cls.from_name(model_name, override_params={'num_classes': num_classes})\n load_pretrained_weights(model, model_name, load_fc=(num_classes == 1000))\n\n return model\n\n @classmethod\n def get_image_size(cls, model_name):\n cls._check_model_name_is_valid(model_name)\n _, _, res, _ = efficientnet_params(model_name)\n return res\n\n @classmethod\n def _check_model_name_is_valid(cls, model_name, also_need_pretrained_weights=False):\n \"\"\" Validates model name. None that pretrained weights are only available for\n the first four models (efficientnet-b{i} for i in 0,1,2,3) at the moment. \"\"\"\n num_models = 4 if also_need_pretrained_weights else 8\n valid_models = ['efficientnet-b'+str(i) for i in range(num_models)]\n if model_name not in valid_models:\n raise ValueError('model_name should be one of: ' + ', '.join(valid_models))",
"import pandas as pd\nimport os.path as osp\n\n\nx = pd.read_csv('../../data/train_with_stratified_splits.csv')\ny = pd.read_csv('../../data/isic2019/train_with_splits.csv')\ny['image'] = [osp.join('data/isic2019/ISIC_2019_Training_Input/', _) for _ in y['image']]\nx['image'] = [osp.join('data/cropped/train/', _) for _ in x['image']]\ny['label'] = y['MEL']\ny = y[['image', 'label']]\ny['isic'] = 2019\nx['isic'] = 2020\n\ncols = [c for c in x.columns if 'outer' in c or 'inner' in c]\nfor c in cols: y[c] = 888\n\n# Combine everything\ndf = pd.concat([x,y])\ndf.to_csv('../../data/complete_cropped_combined_train_with_splits.csv', index=False)\n\n# Multiply melanoma from ISIC20 by 8\ndf = pd.concat([x,y]+[x[x['target'] == 1]]*7)\ndf.to_csv('../../data/upsampled_cropped_combined_train_with_splits.csv', index=False)\n\n# Only add melanomas from ISIC2019\ndf = pd.concat([x,y[y['label'] == 1]]+[x[x['target'] == 1]]*7)\ndf.to_csv('../../data/upsampled_cropped_combined_train_with_splits.csv', index=False)\n"
] |
[
[
"sklearn.metrics.roc_auc_score",
"sklearn.metrics.f1_score",
"numpy.argmax"
],
[
"numpy.max",
"numpy.sort"
],
[
"torch.nn.Dropout",
"torch.sigmoid",
"torch.nn.ModuleList",
"torch.nn.functional.adaptive_avg_pool2d",
"torch.nn.Linear",
"torch.nn.AdaptiveAvgPool2d",
"torch.nn.BatchNorm2d"
],
[
"pandas.concat",
"pandas.read_csv"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
zhaoyuzhi/ChildPredictor
|
[
"ba36d9851a37522ec5a0de4eab3b973f872d885e",
"ba36d9851a37522ec5a0de4eab3b973f872d885e",
"ba36d9851a37522ec5a0de4eab3b973f872d885e",
"ba36d9851a37522ec5a0de4eab3b973f872d885e"
] |
[
"code/babymapping_1219/Models/pggan_tf_official/mapping_4.py",
"code/ProGAN/tfutil.py",
"code/babymapping_1219/Models/pggan_tf_official/base_generator.py",
"code/babyinverse/inverse.py"
] |
[
"import torch\nimport torch.nn as nn\nimport torch.autograd as autograd\nfrom torchvision import models\nimport torch.nn.functional as F\nimport numpy as np\nimport math\n\nimport torch\nimport torch.nn as nn\n\n\"\"\"\n Mapping network\n Input: two tensor of size (batchsize, 512, 4, 4)\n Output: a tensor of size (batchsize, 480)\n\n how to combine two tensor into one tensor is a challenge.\n\"\"\"\nclass MappingResBlock(nn.Module):\n def __init__(self, in_channels, ksize=3, padding=0, stride=1, res_weight=0.1):\n super(MappingResBlock, self).__init__()\n self.res_weight = res_weight\n # Initialize the conv scheme\n self.conv2d = nn.Sequential(\n nn.Conv2d(in_channels, in_channels, ksize, padding=padding, stride=stride),\n #nn.BatchNorm2d(in_channels),\n nn.LeakyReLU(0.2, inplace = False),\n nn.Conv2d(in_channels, in_channels, ksize, padding=padding, stride=stride)\n #nn.BatchNorm2d(in_channels),\n )\n\n def forward(self, x):\n residual = x\n out = self.conv2d(x)\n out = self.res_weight * out + residual\n return out\n\nclass MappingNet(nn.Module):\n def __init__(self, in_channels, out_channels, input_norm=False, output_norm=True):\n super(MappingNet, self).__init__()\n self.input_norm = input_norm\n self.output_norm = output_norm\n # Head block\n self.head = nn.Sequential(\n nn.Conv2d(in_channels * 4, in_channels*2, 3, stride=2, padding=1), #in: 2048,4,4 out: 1024,3,3\n nn.LeakyReLU(0.2, inplace = False),\n nn.Conv2d(in_channels*2, in_channels, 2, stride=1, padding=0) #in:1024,3,3 out:512,1,1\n )\n # Bottle neck 感觉5个resblock应该够了把...\n self.bottle = nn.Sequential(\n MappingResBlock(in_channels, 1, 0, 1),\n MappingResBlock(in_channels, 1, 0, 1),\n MappingResBlock(in_channels, 1, 0, 1),\n MappingResBlock(in_channels, 1, 0, 1),\n MappingResBlock(in_channels, 1, 0, 1),\n # MappingResBlock(in_channels),\n # MappingResBlock(in_channels),\n # MappingResBlock(in_channels)\n )\n self.final = nn.Linear(in_channels, out_channels) #in_channels=512, out_channels = 480\n\n def forward(self, x_father, x_mother):\n assert x_father.shape==x_mother.shape, 'shape of x_father and x_mother is different, x_father:{} x_mother'.format(x_father.shape, x_mother.shape)\n if self.input_norm:\n x_father = (x_father - x_father.mean(dim=[1,2,3]).reshape(x_father.shape[0],1,1,1)) / x_father.var(dim=[1,2,3]).reshape(x_father.shape[0],1,1,1)\n x_mother = (x_mother - x_mother.mean(dim=[1,2,3]).reshape(x_mother.shape[0],1,1,1)) / x_mother.var(dim=[1,2,3]).reshape(x_mother.shape[0],1,1,1)\n x = torch.cat((x_father, x_mother), dim=1) #在channel维进行合并 -> [bs, 1024, 4, 4]\n #head block \n out = self.head(x)\n # Bottle neck\n out = self.bottle(out)\n # Final conv\n out = out.reshape(out.shape[0], out.shape[1])\n out = self.final(out)\n if self.output_norm:\n out = (out - out.mean(dim=1).reshape(out.shape[0], 1)) / out.var(dim=1).reshape(out.shape[0], 1)\n \n return out #[batchsize, 512]\n\n\nif __name__ == '__main__':\n x_father = torch.randn((1,1024,4,4)).cuda()\n x_mother = torch.randn((1,1024,4,4)).cuda()\n\n net = MappingNet(512, 480).cuda()\n\n code_of_child = net(x_father, x_mother)\n print(code_of_child.shape)\n",
"# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.\n#\n# This work is licensed under the Creative Commons Attribution-NonCommercial\n# 4.0 International License. To view a copy of this license, visit\n# http://creativecommons.org/licenses/by-nc/4.0/ or send a letter to\n# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA.\n\nimport os\nimport sys\nimport inspect\nimport importlib\nimport imp\nimport numpy as np\nfrom collections import OrderedDict\nimport tensorflow as tf\nimport pdb\n#----------------------------------------------------------------------------\n# Convenience.\n\ndef run(*args, **kwargs): # Run the specified ops in the default session.\n return tf.get_default_session().run(*args, **kwargs)\n\ndef is_tf_expression(x):\n return isinstance(x, tf.Tensor) or isinstance(x, tf.Variable) or isinstance(x, tf.Operation)\n\ndef shape_to_list(shape):\n return [dim.value for dim in shape]\n\ndef flatten(x):\n with tf.name_scope('Flatten'):\n return tf.reshape(x, [-1])\n\ndef log2(x):\n with tf.name_scope('Log2'):\n return tf.log(x) * np.float32(1.0 / np.log(2.0))\n\ndef exp2(x):\n with tf.name_scope('Exp2'):\n return tf.exp(x * np.float32(np.log(2.0)))\n\ndef lerp(a, b, t):\n with tf.name_scope('Lerp'):\n return a + (b - a) * t\n\ndef lerp_clip(a, b, t):\n with tf.name_scope('LerpClip'):\n return a + (b - a) * tf.clip_by_value(t, 0.0, 1.0)\n\ndef absolute_name_scope(scope): # Forcefully enter the specified name scope, ignoring any surrounding scopes.\n return tf.name_scope(scope + '/')\n\n#----------------------------------------------------------------------------\n# Initialize TensorFlow graph and session using good default settings.\n\ndef init_tf(config_dict=dict()):\n if tf.get_default_session() is None:\n tf.set_random_seed(np.random.randint(1 << 31))\n create_session(config_dict, force_as_default=True)\n\n#----------------------------------------------------------------------------\n# Create tf.Session based on config dict of the form\n# {'gpu_options.allow_growth': True}\n\ndef create_session(config_dict=dict(), force_as_default=False):\n config = tf.ConfigProto()\n for key, value in config_dict.items(): #允许放置运行的子图\n fields = key.split('.')\n obj = config\n for field in fields[:-1]:\n obj = getattr(obj, field)\n setattr(obj, fields[-1], value)\n session = tf.Session(config=config)\n if force_as_default:\n session._default_session = session.as_default()\n session._default_session.enforce_nesting = False\n session._default_session.__enter__()\n return session#返回session,并且为default session\n\n#----------------------------------------------------------------------------\n# Initialize all tf.Variables that have not already been initialized.\n# Equivalent to the following, but more efficient and does not bloat the tf graph:\n# tf.variables_initializer(tf.report_unitialized_variables()).run()\n\ndef init_uninited_vars(vars=None):\n if vars is None: vars = tf.global_variables()\n test_vars = []; test_ops = []\n with tf.control_dependencies(None): # ignore surrounding control_dependencies\n for var in vars:\n assert is_tf_expression(var)\n try:\n tf.get_default_graph().get_tensor_by_name(var.name.replace(':0', '/IsVariableInitialized:0'))\n except KeyError:\n # Op does not exist => variable may be uninitialized.\n test_vars.append(var)\n with absolute_name_scope(var.name.split(':')[0]):\n test_ops.append(tf.is_variable_initialized(var))\n init_vars = [var for var, inited in zip(test_vars, run(test_ops)) if not inited]\n run([var.initializer for var in init_vars])\n\n#----------------------------------------------------------------------------\n# Set the values of given tf.Variables.\n# Equivalent to the following, but more efficient and does not bloat the tf graph:\n# tfutil.run([tf.assign(var, value) for var, value in var_to_value_dict.items()]\n\ndef set_vars(var_to_value_dict):\n ops = []\n feed_dict = {}\n for var, value in var_to_value_dict.items():\n assert is_tf_expression(var)\n try:\n setter = tf.get_default_graph().get_tensor_by_name(var.name.replace(':0', '/setter:0')) # look for existing op\n except KeyError:\n with absolute_name_scope(var.name.split(':')[0]):\n with tf.control_dependencies(None): # ignore surrounding control_dependencies\n setter = tf.assign(var, tf.placeholder(var.dtype, var.shape, 'new_value'), name='setter') # create new setter\n ops.append(setter)\n feed_dict[setter.op.inputs[1]] = value\n run(ops, feed_dict)\n\n#----------------------------------------------------------------------------\n# Autosummary creates an identity op that internally keeps track of the input\n# values and automatically shows up in TensorBoard. The reported value\n# represents an average over input components. The average is accumulated\n# constantly over time and flushed when save_summaries() is called.\n#\n# Notes:\n# - The output tensor must be used as an input for something else in the\n# graph. Otherwise, the autosummary op will not get executed, and the average\n# value will not get accumulated.\n# - It is perfectly fine to include autosummaries with the same name in\n# several places throughout the graph, even if they are executed concurrently.\n# - It is ok to also pass in a python scalar or numpy array. In this case, it\n# is added to the average immediately.\n\n_autosummary_vars = OrderedDict() # name => [var, ...]\n_autosummary_immediate = OrderedDict() # name => update_op, update_value\n_autosummary_finalized = False\n\ndef autosummary(name, value):\n id = name.replace('/', '_')\n if is_tf_expression(value):\n with tf.name_scope('summary_' + id), tf.device(value.device):\n update_op = _create_autosummary_var(name, value)\n with tf.control_dependencies([update_op]):\n return tf.identity(value)\n else: # python scalar or numpy array\n if name not in _autosummary_immediate:\n with absolute_name_scope('Autosummary/' + id), tf.device(None), tf.control_dependencies(None):\n update_value = tf.placeholder(tf.float32)\n update_op = _create_autosummary_var(name, update_value)\n _autosummary_immediate[name] = update_op, update_value\n update_op, update_value = _autosummary_immediate[name]\n run(update_op, {update_value: np.float32(value)})\n return value\n\n# Create the necessary ops to include autosummaries in TensorBoard report.\n# Note: This should be done only once per graph.\ndef finalize_autosummaries():\n global _autosummary_finalized\n if _autosummary_finalized:\n return\n _autosummary_finalized = True\n init_uninited_vars([var for vars in _autosummary_vars.values() for var in vars])\n with tf.device(None), tf.control_dependencies(None):\n for name, vars in _autosummary_vars.items():\n id = name.replace('/', '_')\n with absolute_name_scope('Autosummary/' + id):\n sum = tf.add_n(vars)\n avg = sum[0] / sum[1]\n with tf.control_dependencies([avg]): # read before resetting\n reset_ops = [tf.assign(var, tf.zeros(2)) for var in vars]\n with tf.name_scope(None), tf.control_dependencies(reset_ops): # reset before reporting\n tf.summary.scalar(name, avg)\n\n# Internal helper for creating autosummary accumulators.\ndef _create_autosummary_var(name, value_expr):\n assert not _autosummary_finalized\n v = tf.cast(value_expr, tf.float32)\n if v.shape.ndims is 0:\n v = [v, np.float32(1.0)]\n elif v.shape.ndims is 1:\n v = [tf.reduce_sum(v), tf.cast(tf.shape(v)[0], tf.float32)]\n else:\n v = [tf.reduce_sum(v), tf.reduce_prod(tf.cast(tf.shape(v), tf.float32))]\n v = tf.cond(tf.is_finite(v[0]), lambda: tf.stack(v), lambda: tf.zeros(2))\n with tf.control_dependencies(None):\n var = tf.Variable(tf.zeros(2)) # [numerator, denominator]\n update_op = tf.cond(tf.is_variable_initialized(var), lambda: tf.assign_add(var, v), lambda: tf.assign(var, v))\n if name in _autosummary_vars:\n _autosummary_vars[name].append(var)\n else:\n _autosummary_vars[name] = [var]\n return update_op\n\n#----------------------------------------------------------------------------\n# Call filewriter.add_summary() with all summaries in the default graph,\n# automatically finalizing and merging them on the first call.\n\n_summary_merge_op = None\n\ndef save_summaries(filewriter, global_step=None):\n global _summary_merge_op\n if _summary_merge_op is None:\n finalize_autosummaries()\n with tf.device(None), tf.control_dependencies(None):\n _summary_merge_op = tf.summary.merge_all()\n filewriter.add_summary(_summary_merge_op.eval(), global_step)\n\n#----------------------------------------------------------------------------\n# Utilities for importing modules and objects by name.\n\ndef import_module(module_or_obj_name):\n parts = module_or_obj_name.split('.')\n parts[0] = {'np': 'numpy', 'tf': 'tensorflow'}.get(parts[0], parts[0])\n for i in range(len(parts), 0, -1):\n try:\n module = importlib.import_module('.'.join(parts[:i]))\n relative_obj_name = '.'.join(parts[i:])\n return module, relative_obj_name\n except ImportError:\n pass\n raise ImportError(module_or_obj_name)\n\ndef find_obj_in_module(module, relative_obj_name):\n \n obj = module\n for part in relative_obj_name.split('.'):\n obj = getattr(obj, part)\n\n return obj\n\ndef import_obj(obj_name):\n \n module, relative_obj_name = import_module(obj_name)#train, train_progressive_gan\n\n return find_obj_in_module(module, relative_obj_name)\n\ndef call_func_by_name(*args, func=None, **kwargs):\n\n assert func is not None\n return import_obj(func)(*args, **kwargs) #返回train_prograssive_gan(*args, **kwargs)\n\n#----------------------------------------------------------------------------\n# Wrapper for tf.train.Optimizer that automatically takes care of:\n# - Gradient averaging for multi-GPU training.\n# - Dynamic loss scaling and typecasts for FP16 training.\n# - Ignoring corrupted gradients that contain NaNs/Infs.\n# - Reporting statistics.\n# - Well-chosen default settings.\n\nclass Optimizer:\n def __init__(\n self,\n name = 'Train',\n tf_optimizer = 'tf.train.AdamOptimizer',\n learning_rate = 0.001,\n use_loss_scaling = False,\n loss_scaling_init = 64.0,\n loss_scaling_inc = 0.0005,\n loss_scaling_dec = 1.0,\n **kwargs):\n\n # Init fields.\n self.name = name\n self.learning_rate = tf.convert_to_tensor(learning_rate)\n self.id = self.name.replace('/', '.')\n self.scope = tf.get_default_graph().unique_name(self.id)\n self.optimizer_class = import_obj(tf_optimizer)\n self.optimizer_kwargs = dict(kwargs)\n self.use_loss_scaling = use_loss_scaling\n self.loss_scaling_init = loss_scaling_init\n self.loss_scaling_inc = loss_scaling_inc\n self.loss_scaling_dec = loss_scaling_dec\n self._grad_shapes = None # [shape, ...]\n self._dev_opt = OrderedDict() # device => optimizer\n self._dev_grads = OrderedDict() # device => [[(grad, var), ...], ...]\n self._dev_ls_var = OrderedDict() # device => variable (log2 of loss scaling factor)\n self._updates_applied = False\n\n # Register the gradients of the given loss function with respect to the given variables.\n # Intended to be called once per GPU.\n def register_gradients(self, loss, vars):\n assert not self._updates_applied\n\n # Validate arguments.\n if isinstance(vars, dict):\n vars = list(vars.values()) # allow passing in Network.trainables as vars\n assert isinstance(vars, list) and len(vars) >= 1\n assert all(is_tf_expression(expr) for expr in vars + [loss])\n if self._grad_shapes is None:\n self._grad_shapes = [shape_to_list(var.shape) for var in vars]\n assert len(vars) == len(self._grad_shapes)\n assert all(shape_to_list(var.shape) == var_shape for var, var_shape in zip(vars, self._grad_shapes))\n dev = loss.device\n assert all(var.device == dev for var in vars)\n\n # Register device and compute gradients.\n with tf.name_scope(self.id + '_grad'), tf.device(dev):\n if dev not in self._dev_opt:\n opt_name = self.scope.replace('/', '_') + '_opt%d' % len(self._dev_opt)\n self._dev_opt[dev] = self.optimizer_class(name=opt_name, learning_rate=self.learning_rate, **self.optimizer_kwargs)\n self._dev_grads[dev] = []\n loss = self.apply_loss_scaling(tf.cast(loss, tf.float32))\n grads = self._dev_opt[dev].compute_gradients(loss, vars, gate_gradients=tf.train.Optimizer.GATE_NONE) # disable gating to reduce memory usage\n grads = [(g, v) if g is not None else (tf.zeros_like(v), v) for g, v in grads] # replace disconnected gradients with zeros\n self._dev_grads[dev].append(grads)\n\n # Construct training op to update the registered variables based on their gradients.\n def apply_updates(self):\n assert not self._updates_applied\n self._updates_applied = True\n devices = list(self._dev_grads.keys())\n total_grads = sum(len(grads) for grads in self._dev_grads.values())\n assert len(devices) >= 1 and total_grads >= 1\n ops = []\n with absolute_name_scope(self.scope):\n\n # Cast gradients to FP32 and calculate partial sum within each device.\n dev_grads = OrderedDict() # device => [(grad, var), ...]\n for dev_idx, dev in enumerate(devices):\n with tf.name_scope('ProcessGrads%d' % dev_idx), tf.device(dev):\n sums = []\n for gv in zip(*self._dev_grads[dev]):\n assert all(v is gv[0][1] for g, v in gv)\n g = [tf.cast(g, tf.float32) for g, v in gv]\n g = g[0] if len(g) == 1 else tf.add_n(g)\n sums.append((g, gv[0][1]))\n dev_grads[dev] = sums\n\n # Sum gradients across devices.\n if len(devices) > 1:\n with tf.name_scope('SumAcrossGPUs'), tf.device(None):\n for var_idx, grad_shape in enumerate(self._grad_shapes):\n g = [dev_grads[dev][var_idx][0] for dev in devices]\n if np.prod(grad_shape): # nccl does not support zero-sized tensors\n g = tf.contrib.nccl.all_sum(g)\n for dev, gg in zip(devices, g):\n dev_grads[dev][var_idx] = (gg, dev_grads[dev][var_idx][1])\n\n # Apply updates separately on each device.\n for dev_idx, (dev, grads) in enumerate(dev_grads.items()):\n with tf.name_scope('ApplyGrads%d' % dev_idx), tf.device(dev):\n\n # Scale gradients as needed.\n if self.use_loss_scaling or total_grads > 1:\n with tf.name_scope('Scale'):\n coef = tf.constant(np.float32(1.0 / total_grads), name='coef')\n coef = self.undo_loss_scaling(coef)\n grads = [(g * coef, v) for g, v in grads]\n\n # Check for overflows.\n with tf.name_scope('CheckOverflow'):\n grad_ok = tf.reduce_all(tf.stack([tf.reduce_all(tf.is_finite(g)) for g, v in grads]))\n\n # Update weights and adjust loss scaling.\n with tf.name_scope('UpdateWeights'):\n opt = self._dev_opt[dev]\n ls_var = self.get_loss_scaling_var(dev)\n if not self.use_loss_scaling:\n ops.append(tf.cond(grad_ok, lambda: opt.apply_gradients(grads), tf.no_op))\n else:\n ops.append(tf.cond(grad_ok,\n lambda: tf.group(tf.assign_add(ls_var, self.loss_scaling_inc), opt.apply_gradients(grads)),\n lambda: tf.group(tf.assign_sub(ls_var, self.loss_scaling_dec))))\n\n # Report statistics on the last device.\n if dev == devices[-1]:\n with tf.name_scope('Statistics'):\n ops.append(autosummary(self.id + '/learning_rate', self.learning_rate))\n ops.append(autosummary(self.id + '/overflow_frequency', tf.where(grad_ok, 0, 1)))\n if self.use_loss_scaling:\n ops.append(autosummary(self.id + '/loss_scaling_log2', ls_var))\n\n # Initialize variables and group everything into a single op.\n self.reset_optimizer_state()\n init_uninited_vars(list(self._dev_ls_var.values()))\n return tf.group(*ops, name='TrainingOp')\n\n # Reset internal state of the underlying optimizer.\n def reset_optimizer_state(self):\n run([var.initializer for opt in self._dev_opt.values() for var in opt.variables()])\n\n # Get or create variable representing log2 of the current dynamic loss scaling factor.\n def get_loss_scaling_var(self, device):\n if not self.use_loss_scaling:\n return None\n if device not in self._dev_ls_var:\n with absolute_name_scope(self.scope + '/LossScalingVars'), tf.control_dependencies(None):\n self._dev_ls_var[device] = tf.Variable(np.float32(self.loss_scaling_init), name='loss_scaling_var')\n return self._dev_ls_var[device]\n\n # Apply dynamic loss scaling for the given expression.\n def apply_loss_scaling(self, value):\n assert is_tf_expression(value)\n if not self.use_loss_scaling:\n return value\n return value * exp2(self.get_loss_scaling_var(value.device))\n\n # Undo the effect of dynamic loss scaling for the given expression.\n def undo_loss_scaling(self, value):\n assert is_tf_expression(value)\n if not self.use_loss_scaling:\n return value\n return value * exp2(-self.get_loss_scaling_var(value.device))\n\n#----------------------------------------------------------------------------\n# Generic network abstraction.\n#\n# Acts as a convenience wrapper for a parameterized network construction\n# function, providing several utility methods and convenient access to\n# the inputs/outputs/weights.\n#\n# Network objects can be safely pickled and unpickled for long-term\n# archival purposes. The pickling works reliably as long as the underlying\n# network construction function is defined in a standalone Python module\n# that has no side effects or application-specific imports.\n\nnetwork_import_handlers = [] # Custom import handlers for dealing with legacy data in pickle import.\n_network_import_modules = [] # Temporary modules create during pickle import.\n\nclass Network:\n def __init__(self,\n name=None, # Network name. Used to select TensorFlow name and variable scopes.\n func=None, # Fully qualified name of the underlying network construction function.\n **static_kwargs): # Keyword arguments to be passed in to the network construction function.\n \n self._init_fields()\n self.name = name #'G' or 'D'\n self.static_kwargs = dict(static_kwargs)\n\n # Init build func.\n module, self._build_func_name = import_module(func)\n self._build_module_src = inspect.getsource(module)\n self._build_func = find_obj_in_module(module, self._build_func_name)#netwoks.G_paper and networks.D_paper\n\n # Init graph.\n self._init_graph() #初始化整个网络的graph\n self.reset_vars()\n\n def _init_fields(self):\n self.name = None # User-specified name, defaults to build func name if None.\n self.scope = None # Unique TF graph scope, derived from the user-specified name.\n self.static_kwargs = dict() # Arguments passed to the user-supplied build func.\n self.num_inputs = 0 # Number of input tensors.\n self.num_outputs = 0 # Number of output tensors.\n self.input_shapes = [[]] # Input tensor shapes (NC or NCHW), including minibatch dimension.\n self.output_shapes = [[]] # Output tensor shapes (NC or NCHW), including minibatch dimension.\n self.input_shape = [] # Short-hand for input_shapes[0].\n self.output_shape = [] # Short-hand for output_shapes[0].\n self.input_templates = [] # Input placeholders in the template graph.\n self.output_templates = [] # Output tensors in the template graph.\n self.input_names = [] # Name string for each input.\n self.output_names = [] # Name string for each output.\n self.vars = OrderedDict() # All variables (localname => var).\n self.trainables = OrderedDict() # Trainable variables (localname => var).\n self._build_func = None # User-supplied build function that constructs the network.\n self._build_func_name = None # Name of the build function.\n self._build_module_src = None # Full source code of the module containing the build function.\n self._run_cache = dict() # Cached graph data for Network.run().\n \n def _init_graph(self):\n \n # ------------- Collect inputs.获得网络需要进行输入的参数-------------------------\n self.input_names = []\n for param in inspect.signature(self._build_func).parameters.values():\n if param.kind == param.POSITIONAL_OR_KEYWORD and param.default is param.empty:\n self.input_names.append(param.name)\n self.num_inputs = len(self.input_names)#self.input_names是网络的没有default值的参数\n assert self.num_inputs >= 1\n \n # -------------- Choose name and scope.获得网络的name,并且根据name来设置该网络的整体scope\n if self.name is None:\n self.name = self._build_func_name#如果没有指定的话,就改成G_paper or D_paper\n self.scope = tf.get_default_graph().unique_name(self.name.replace('/', '_'), mark_as_used=False)#设置该网络的scope为'G' or 'D'\n \n # Build template graph.创建网络运行的图,对应就是G_paper, D_paper\n with tf.variable_scope(self.scope, reuse=tf.AUTO_REUSE):#设置为auto模式,变量存在就获取,不存在就创建\n assert tf.get_variable_scope().name == self.scope #进一步验证当前变量空间名是与设置的一致.\n with absolute_name_scope(self.scope): # ignore surrounding name_scope\n with tf.control_dependencies(None): # ignore surrounding control_dependencies\n self.input_templates = [tf.placeholder(tf.float32, name=name) for name in self.input_names]#根据网络需要输入的创建占位变量\n out_expr = self._build_func(*self.input_templates, is_template_graph=True, **self.static_kwargs)#网络的输出\n #^^^^^^^ 应G来说是image_out,对于D来说就是score和label\n # Collect outputs.\n assert is_tf_expression(out_expr) or isinstance(out_expr, tuple)\n self.output_templates = [out_expr] if is_tf_expression(out_expr) else list(out_expr)\n self.output_names = [t.name.split('/')[-1].split(':')[0] for t in self.output_templates]#对应着images_out与scores_out, labels_out\n self.num_outputs = len(self.output_templates)#对于G是一个,对于D是两个\n assert self.num_outputs >= 1\n \n # Populate remaining fields. 在设置网络图后可以得到输入输出的shape\n self.input_shapes = [shape_to_list(t.shape) for t in self.input_templates]\n self.output_shapes = [shape_to_list(t.shape) for t in self.output_templates]\n self.input_shape = self.input_shapes[0]\n self.output_shape = self.output_shapes[0]\n self.vars = OrderedDict([(self.get_var_localname(var), var) for var in tf.global_variables(self.scope + '/')])\n #所有可训练的参数\n self.trainables = OrderedDict([(self.get_var_localname(var), var) for var in tf.trainable_variables(self.scope + '/')])\n\n # Run initializers for all variables defined by this network.\n def reset_vars(self):\n run([var.initializer for var in self.vars.values()])#对网络中的参数们进行初始化\n\n # Run initializers for all trainable variables defined by this network.\n def reset_trainables(self):\n run([var.initializer for var in self.trainables.values()])\n\n # Get TensorFlow expression(s) for the output(s) of this network, given the inputs.\n def get_output_for(self, *in_expr, return_as_list=False, **dynamic_kwargs):\n\n assert len(in_expr) == self.num_inputs\n all_kwargs = dict(self.static_kwargs)\n all_kwargs.update(dynamic_kwargs)\n with tf.variable_scope(self.scope, reuse=True):\n assert tf.get_variable_scope().name == self.scope\n named_inputs = [tf.identity(expr, name=name) for expr, name in zip(in_expr, self.input_names)]\n out_expr = self._build_func(*named_inputs, **all_kwargs)\n assert is_tf_expression(out_expr) or isinstance(out_expr, tuple)\n if return_as_list:\n out_expr = [out_expr] if is_tf_expression(out_expr) else list(out_expr)\n return out_expr\n\n # Get the local name of a given variable, excluding any surrounding name scopes.\n def get_var_localname(self, var_or_globalname):\n assert is_tf_expression(var_or_globalname) or isinstance(var_or_globalname, str)\n globalname = var_or_globalname if isinstance(var_or_globalname, str) else var_or_globalname.name\n assert globalname.startswith(self.scope + '/')\n localname = globalname[len(self.scope) + 1:]\n localname = localname.split(':')[0]\n return localname\n\n # Find variable by local or global name.\n def find_var(self, var_or_localname):\n assert is_tf_expression(var_or_localname) or isinstance(var_or_localname, str)\n return self.vars[var_or_localname] if isinstance(var_or_localname, str) else var_or_localname\n\n # Get the value of a given variable as NumPy array.\n # Note: This method is very inefficient -- prefer to use tfutil.run(list_of_vars) whenever possible.\n def get_var(self, var_or_localname):\n return self.find_var(var_or_localname).eval()\n \n # Set the value of a given variable based on the given NumPy array.\n # Note: This method is very inefficient -- prefer to use tfutil.set_vars() whenever possible.\n def set_var(self, var_or_localname, new_value):\n return set_vars({self.find_var(var_or_localname): new_value})\n\n # Pickle export.\n def __getstate__(self):\n return {\n 'version': 2,\n 'name': self.name,\n 'static_kwargs': self.static_kwargs,\n 'build_module_src': self._build_module_src,\n 'build_func_name': self._build_func_name,\n 'variables': list(zip(self.vars.keys(), run(list(self.vars.values()))))}\n\n # Pickle import.\n def __setstate__(self, state):\n self._init_fields()\n\n # Execute custom import handlers.\n for handler in network_import_handlers:\n state = handler(state)\n\n # Set basic fields.\n assert state['version'] == 2\n self.name = state['name']\n self.static_kwargs = state['static_kwargs']\n self._build_module_src = state['build_module_src']\n self._build_func_name = state['build_func_name']\n \n # Parse imported module.\n module = imp.new_module('_tfutil_network_import_module_%d' % len(_network_import_modules))\n exec(self._build_module_src, module.__dict__)\n self._build_func = find_obj_in_module(module, self._build_func_name)\n _network_import_modules.append(module) # avoid gc\n \n # Init graph.\n self._init_graph()\n self.reset_vars()\n set_vars({self.find_var(name): value for name, value in state['variables']})\n\n # Create a clone of this network with its own copy of the variables.\n def clone(self, name=None):\n net = object.__new__(Network)\n net._init_fields()\n net.name = name if name is not None else self.name\n net.static_kwargs = dict(self.static_kwargs)\n net._build_module_src = self._build_module_src\n net._build_func_name = self._build_func_name\n net._build_func = self._build_func\n net._init_graph()\n net.copy_vars_from(self)\n return net\n\n # Copy the values of all variables from the given network.\n def copy_vars_from(self, src_net):\n assert isinstance(src_net, Network)\n name_to_value = run({name: src_net.find_var(name) for name in self.vars.keys()})\n set_vars({self.find_var(name): value for name, value in name_to_value.items()})\n\n # Copy the values of all trainable variables from the given network.\n def copy_trainables_from(self, src_net):\n assert isinstance(src_net, Network)\n name_to_value = run({name: src_net.find_var(name) for name in self.trainables.keys()})\n set_vars({self.find_var(name): value for name, value in name_to_value.items()})\n\n # Create new network with the given parameters, and copy all variables from this network.\n def convert(self, name=None, func=None, **static_kwargs):\n net = Network(name, func, **static_kwargs)\n net.copy_vars_from(self)\n return net\n\n # Construct a TensorFlow op that updates the variables of this network\n # to be slightly closer to those of the given network.\n def setup_as_moving_average_of(self, src_net, beta=0.99, beta_nontrainable=0.0):\n assert isinstance(src_net, Network)\n with absolute_name_scope(self.scope):\n with tf.name_scope('MovingAvg'):\n ops = []\n for name, var in self.vars.items():\n if name in src_net.vars:\n cur_beta = beta if name in self.trainables else beta_nontrainable\n new_value = lerp(src_net.vars[name], var, cur_beta)\n ops.append(var.assign(new_value))\n return tf.group(*ops)\n\n # Run this network for the given NumPy array(s), and return the output(s) as NumPy array(s).\n def run(self, *in_arrays,\n return_as_list = False, # True = return a list of NumPy arrays, False = return a single NumPy array, or a tuple if there are multiple outputs.\n print_progress = False, # Print progress to the console? Useful for very large input arrays.\n minibatch_size = None, # Maximum minibatch size to use, None = disable batching.\n num_gpus = 1, # Number of GPUs to use.\n out_mul = 1.0, # Multiplicative constant to apply to the output(s).\n out_add = 0.0, # Additive constant to apply to the output(s).\n out_shrink = 1, # Shrink the spatial dimensions of the output(s) by the given factor.\n out_dtype = None, # Convert the output to the specified data type.\n **dynamic_kwargs): # Additional keyword arguments to pass into the network construction function.\n\n assert len(in_arrays) == self.num_inputs\n num_items = in_arrays[0].shape[0]\n if minibatch_size is None:\n minibatch_size = num_items\n key = str([list(sorted(dynamic_kwargs.items())), num_gpus, out_mul, out_add, out_shrink, out_dtype])\n\n # Build graph.\n if key not in self._run_cache:\n with absolute_name_scope(self.scope + '/Run'), tf.control_dependencies(None):\n in_split = list(zip(*[tf.split(x, num_gpus) for x in self.input_templates]))\n out_split = []\n for gpu in range(num_gpus):\n with tf.device('/gpu:%d' % gpu):\n out_expr = self.get_output_for(*in_split[gpu], return_as_list=True, **dynamic_kwargs)\n if out_mul != 1.0:\n out_expr = [x * out_mul for x in out_expr]\n if out_add != 0.0:\n out_expr = [x + out_add for x in out_expr]\n if out_shrink > 1:\n ksize = [1, 1, out_shrink, out_shrink]\n out_expr = [tf.nn.avg_pool(x, ksize=ksize, strides=ksize, padding='VALID', data_format='NCHW') for x in out_expr]\n if out_dtype is not None:\n if tf.as_dtype(out_dtype).is_integer:\n out_expr = [tf.round(x) for x in out_expr]\n out_expr = [tf.saturate_cast(x, out_dtype) for x in out_expr]\n out_split.append(out_expr)\n self._run_cache[key] = [tf.concat(outputs, axis=0) for outputs in zip(*out_split)]\n\n # Run minibatches.\n out_expr = self._run_cache[key]\n out_arrays = [np.empty([num_items] + shape_to_list(expr.shape)[1:], expr.dtype.name) for expr in out_expr]\n for mb_begin in range(0, num_items, minibatch_size):\n if print_progress:\n print('\\r%d / %d' % (mb_begin, num_items), end='')\n mb_end = min(mb_begin + minibatch_size, num_items)\n mb_in = [src[mb_begin : mb_end] for src in in_arrays]\n mb_out = tf.get_default_session().run(out_expr, dict(zip(self.input_templates, mb_in)))\n for dst, src in zip(out_arrays, mb_out):\n dst[mb_begin : mb_end] = src\n\n # Done.\n if print_progress:\n print('\\r%d / %d' % (num_items, num_items))\n if not return_as_list:\n out_arrays = out_arrays[0] if len(out_arrays) == 1 else tuple(out_arrays)\n return out_arrays\n\n # Returns a list of (name, output_expr, trainable_vars) tuples corresponding to\n # individual layers of the network. Mainly intended to be used for reporting.\n def list_layers(self):\n patterns_to_ignore = ['/Setter', '/new_value', '/Shape', '/strided_slice', '/Cast', '/concat']\n all_ops = tf.get_default_graph().get_operations()\n all_ops = [op for op in all_ops if not any(p in op.name for p in patterns_to_ignore)]\n layers = []\n\n def recurse(scope, parent_ops, level):\n prefix = scope + '/'\n ops = [op for op in parent_ops if op.name == scope or op.name.startswith(prefix)]\n\n # Does not contain leaf nodes => expand immediate children.\n if level == 0 or all('/' in op.name[len(prefix):] for op in ops):\n visited = set()\n for op in ops:\n suffix = op.name[len(prefix):]\n if '/' in suffix:\n suffix = suffix[:suffix.index('/')]\n if suffix not in visited:\n recurse(prefix + suffix, ops, level + 1)\n visited.add(suffix)\n\n # Otherwise => interpret as a layer.\n else:\n layer_name = scope[len(self.scope)+1:]\n layer_output = ops[-1].outputs[0]\n layer_trainables = [op.outputs[0] for op in ops if op.type.startswith('Variable') and self.get_var_localname(op.name) in self.trainables]\n layers.append((layer_name, layer_output, layer_trainables))\n\n recurse(self.scope, all_ops, 0)\n return layers\n\n # Print a summary table of the network structure.\n def print_layers(self, title=None, hide_layers_with_no_params=False):\n if title is None: title = self.name\n print()\n print('%-28s%-12s%-24s%-24s' % (title, 'Params', 'OutputShape', 'WeightShape'))\n print('%-28s%-12s%-24s%-24s' % (('---',) * 4))\n\n total_params = 0\n for layer_name, layer_output, layer_trainables in self.list_layers():\n weights = [var for var in layer_trainables if var.name.endswith('/weight:0')]\n num_params = sum(np.prod(shape_to_list(var.shape)) for var in layer_trainables)\n total_params += num_params\n if hide_layers_with_no_params and num_params == 0:\n continue\n\n print('%-28s%-12s%-24s%-24s' % (\n layer_name,\n num_params if num_params else '-',\n layer_output.shape,\n weights[0].shape if len(weights) == 1 else '-'))\n\n print('%-28s%-12s%-24s%-24s' % (('---',) * 4))\n print('%-28s%-12s%-24s%-24s' % ('Total', total_params, '', ''))\n print()\n\n # Construct summary ops to include histograms of all trainable parameters in TensorBoard.\n def setup_weight_histograms(self, title=None):\n if title is None: title = self.name\n with tf.name_scope(None), tf.device(None), tf.control_dependencies(None):\n for localname, var in self.trainables.items():\n if '/' in localname:\n p = localname.split('/')\n name = title + '_' + p[-1] + '/' + '_'.join(p[:-1])\n else:\n name = title + '_toplevel/' + localname\n tf.summary.histogram(name, var)\n\n#----------------------------------------------------------------------------\n",
"# python3.7\n\"\"\"Contains the base class for generator.\"\"\"\n\nimport os\nimport sys\nimport logging\nimport numpy as np\n\nimport torch\n\nfrom Models import model_settings\n\n__all__ = ['BaseGenerator']\n\n\ndef get_temp_logger(logger_name='logger'):\n \"\"\"Gets a temporary logger.\n\n This logger will print all levels of messages onto the screen.\n\n Args:\n logger_name: Name of the logger.\n\n Returns:\n A `logging.Logger`.\n\n Raises:\n ValueError: If the input `logger_name` is empty.\n \"\"\"\n if not logger_name:\n raise ValueError(f'Input `logger_name` should not be empty!')\n\n logger = logging.getLogger(logger_name)\n if not logger.hasHandlers():\n logger.setLevel(logging.DEBUG)\n formatter = logging.Formatter(\"[%(asctime)s][%(levelname)s] %(message)s\")\n sh = logging.StreamHandler(stream=sys.stdout)\n sh.setLevel(logging.DEBUG)\n sh.setFormatter(formatter)\n logger.addHandler(sh)\n\n return logger\n\n\nclass BaseGenerator(object):\n \"\"\"Base class for generator used in GAN variants.\n\n NOTE: The model should be defined with pytorch, and only used for inference.\n \"\"\"\n\n def __init__(self, model_name, logger=None):\n \"\"\"Initializes with specific settings.\n\n The model should be registered in `model_settings.py` with proper settings\n first. Among them, some attributes are necessary, including:\n (1) gan_type: Type of the GAN model.\n (2) latent_space_dim: Dimension of the latent space. Should be a tuple.\n (3) resolution: Resolution of the synthesis.\n (4) min_val: Minimum value of the raw output. (default -1.0)\n (5) max_val: Maximum value of the raw output. (default 1.0)\n (6) channel_order: Channel order of the output image. (default: `RGB`)\n\n Args:\n model_name: Name with which the model is registered.\n logger: Logger for recording log messages. If set as `None`, a default\n logger, which prints messages from all levels to screen, will be\n created. (default: None)\n\n Raises:\n AttributeError: If some necessary attributes are missing.\n \"\"\"\n \"\"\"\n MODEL_POOL:\n 'tf_model_path': MODEL_DIR + './pggan_baby_ckp.pkl',\n 'model_path': MODEL_DIR + 'pggan_baby_pytorch.pth',\n 'gan_type': 'pggan',\n 'dataset_name': 'baby',\n 'latent_space_dim': 512,\n 'resolution': 128,\n 'min_val': -1.0,\n 'max_val': 1.0,\n 'output_channels': 3,\n 'channel_order': 'RGB',\n 'fused_scale': False,\n \"\"\"\n self.model_name = model_name\n for key, val in model_settings.MODEL_POOL[model_name].items():\n setattr(self, key, val)\n self.use_cuda = model_settings.USE_CUDA\n self.batch_size = model_settings.MAX_IMAGES_ON_DEVICE #4 batch_size\n self.logger = logger or get_temp_logger(model_name + '_generator')\n self.model = None\n self.run_device = 'cuda' if self.use_cuda else 'cpu'\n self.cpu_device = 'cpu'\n\n # Check necessary settings.\n self.check_attr('gan_type')\n self.check_attr('latent_space_dim')\n self.check_attr('resolution')\n self.min_val = getattr(self, 'min_val', -1.0)\n self.max_val = getattr(self, 'max_val', 1.0)\n self.output_channels = getattr(self, 'output_channels', 3)\n self.channel_order = getattr(self, 'channel_order', 'RGB').upper()\n assert self.channel_order in ['RGB', 'BGR']\n\n # Build model and load pre-trained weights.\n self.build() #mainly get self.model,这里是实现子类的build方法\n if os.path.isfile(getattr(self, 'model_path', '')): #用来加载pytorch版本的模型参数\n self.load()\n elif os.path.isfile(getattr(self, 'tf_model_path', '')): #用来加载tf版本的模型参数并且转化为pytorch版本\n self.convert_tf_model()\n else:\n self.logger.warning(f'No pre-trained model will be loaded!')\n\n # Change to inference mode and GPU mode if needed.\n assert self.model\n self.model.eval().to(self.run_device) #设置为训练模式\n\n def check_attr(self, attr_name):\n \"\"\"Checks the existence of a particular attribute.\n\n Args:\n attr_name: Name of the attribute to check.\n\n Raises:\n AttributeError: If the target attribute is missing.\n \"\"\"\n if not hasattr(self, attr_name):\n raise AttributeError(\n f'`{attr_name}` is missing for model `{self.model_name}`!')\n\n def build(self):\n \"\"\"Builds the graph.\"\"\"\n raise NotImplementedError(f'Should be implemented in derived class!')\n\n def load(self):\n \"\"\"Loads pre-trained weights.\"\"\"\n raise NotImplementedError(f'Should be implemented in derived class!')\n\n def convert_tf_model(self, test_num=10):\n \"\"\"Converts models weights from tensorflow version.\n\n Args:\n test_num: Number of images to generate for testing whether the conversion\n is done correctly. `0` means skipping the test. (default 10)\n \"\"\"\n raise NotImplementedError(f'Should be implemented in derived class!')\n\n def sample(self, num):\n \"\"\"Samples latent codes randomly.\n\n Args:\n num: Number of latent codes to sample. Should be positive.\n\n Returns:\n A `numpy.ndarray` as sampled latend codes.\n \"\"\"\n raise NotImplementedError(f'Should be implemented in derived class!')\n\n def preprocess(self, latent_codes):\n \"\"\"Preprocesses the input latent code if needed.\n\n Args:\n latent_codes: The input latent codes for preprocessing.\n\n Returns:\n The preprocessed latent codes which can be used as final input for the\n generator.\n \"\"\"\n raise NotImplementedError(f'Should be implemented in derived class!')\n\n def easy_sample(self, num):\n \"\"\"Wraps functions `sample()` and `preprocess()` together.\"\"\"\n return self.preprocess(self.sample(num))\n\n def synthesize(self, latent_codes):\n \"\"\"Synthesizes images with given latent codes.\n\n NOTE: The latent codes should have already been preprocessed.\n\n Args:\n latent_codes: Input latent codes for image synthesis.\n\n Returns:\n A dictionary whose values are raw outputs from the generator.\n \"\"\"\n raise NotImplementedError(f'Should be implemented in derived class!')\n\n def get_value(self, tensor):\n \"\"\"Gets value of a `torch.Tensor`.\n\n Args:\n tensor: The input tensor to get value from.\n\n Returns:\n A `numpy.ndarray`.\n\n Raises:\n ValueError: If the tensor is with neither `torch.Tensor` type or\n `numpy.ndarray` type.\n \"\"\"\n if isinstance(tensor, np.ndarray):\n return tensor\n if isinstance(tensor, torch.Tensor):\n return tensor.to(self.cpu_device).detach().numpy()\n raise ValueError(f'Unsupported input type `{type(tensor)}`!')\n\n def postprocess(self, images):\n \"\"\"Postprocesses the output images if needed.\n\n This function assumes the input numpy array is with shape [batch_size,\n channel, height, width]. Here, `channel = 3` for color image and\n `channel = 1` for grayscale image. The return images are with shape\n [batch_size, height, width, channel]. NOTE: The channel order of output\n image will always be `RGB`.\n\n Args:\n images: The raw output from the generator.\n\n Returns:\n The postprocessed images with dtype `numpy.uint8` with range [0, 255].\n\n Raises:\n ValueError: If the input `images` are not with type `numpy.ndarray` or not\n with shape [batch_size, channel, height, width].\n \"\"\"\n if not isinstance(images, np.ndarray):\n raise ValueError(f'Images should be with type `numpy.ndarray`!')\n\n images_shape = images.shape\n if len(images_shape) != 4 or images_shape[1] not in [1, 3]:\n raise ValueError(f'Input should be with shape [batch_size, channel, '\n f'height, width], where channel equals to 1 or 3. '\n f'But {images_shape} is received!')\n images = (images - self.min_val) * 255 / (self.max_val - self.min_val)\n images = np.clip(images + 0.5, 0, 255).astype(np.uint8)\n images = images.transpose(0, 2, 3, 1)\n if self.channel_order == 'BGR':\n images = images[:, :, :, ::-1]\n\n return images\n\n def easy_synthesize(self, latent_codes, **kwargs):\n \"\"\"Wraps functions `synthesize()` and `postprocess()` together.\"\"\"\n outputs = self.synthesize(latent_codes, **kwargs)\n if 'image' in outputs:\n outputs['image'] = self.postprocess(outputs['image'])\n\n return outputs\n\n def get_batch_inputs(self, latent_codes):\n \"\"\"Gets batch inputs from a collection of latent codes.\n\n This function will yield at most `self.batch_size` latent_codes at a time.\n\n Args:\n latent_codes: The input latent codes for generation. First dimension\n should be the total number.\n \"\"\"\n total_num = latent_codes.shape[0]\n for i in range(0, total_num, self.batch_size):\n yield latent_codes[i:i + self.batch_size]\n",
"''' Baisc packages\n'''\nimport os\nimport glob\nimport tqdm\nimport copy\nimport random\nimport importlib\nimport numpy as np\n\n''' Configuration packages\n'''\nimport yaml\nimport argparse\nfrom easydict import EasyDict as edict\n\n''' PyTorch packages\n'''\nimport torch\nimport torch.nn as nn\nimport torch.autograd as autograd\nimport torch.nn.functional as F\nimport torch.backends.cudnn as cudnn\nfrom torch.nn.functional import cosine_similarity\n\nfrom torch.utils.data import DataLoader\nfrom utils.utils_check import check_var\nfrom utils.utils_loss import VGGLoss\n\n\n\ndef train(args, logger=None, vis_logger=None):\n logger.info('=> Import libs')\n Datalib = importlib.import_module('.{}'.format(args.DATASET_CONFIG.dataset.name), package=args.DATASET_CONFIG.package)\n Model = importlib.import_module('.{}'.format(args.MODEL.name[0]), package=args.MODEL.package)\n Model_PGGAN = importlib.import_module('.{}'.format(args.MODEL.name[1]), package=args.MODEL.package)\n\n\n\n logger.info('=> Set dataloader')\n trainset = Datalib.inverse(args.DATASET_CONFIG)\n train_loader = DataLoader(trainset, batch_size=args.TRAIN.batch_size, num_workers=args.TRAIN.num_workers, drop_last=True, pin_memory=True, shuffle=True)\n\n\n logger.info('=> Make models and load checkpoint')\n encoder_net = Model.Vgg16(pre_train=args.MODEL.vgg.pre_train,\n requires_grad=args.MODEL.vgg.requires_grad,\n vae_encoder=args.MODEL.vgg.use_vae,\n global_pooling=args.MODEL.vgg.global_pooling,\n if_downsample=args.MODEL.vgg.if_downsample)\n G_net = Model_PGGAN.PGGANGenerator(model_name=args.MODEL.pggan.model_name, logger=logger)\n D_net = Model.Discriminator(size=args.MODEL.D.imgsize,\n input_channel=args.MODEL.D.input_channel,\n ndf=args.MODEL.D.ndf,\n channel_multiplier=args.MODEL.D.channel_multiplier,\n use_sigmoid=args.MODEL.D.use_sigmoid,\n use_sn=args.MODEL.D.use_sn)\n if args.USE_GPU:\n logger.info('===> Use {} GPUs'.format(args.NUM_GPU))\n assert torch.cuda.is_available()\n device = torch.device('cuda')\n GPU_list = [i for i in range(args.NUM_GPU)]\n encoder_net = nn.DataParallel(encoder_net, device_ids=GPU_list)\n D_net = nn.DataParallel(D_net, device_ids=GPU_list)\n encoder_net.to(device)\n D_net.to(device)\n else:\n ValueError('Unsupported mode!')\n if args.MODEL.checkpoint.ckp_flag:\n logger.info('===> Load ckp for <vgg_encoder> from {}'.format(args.MODEL.checkpoint.ckp_path))\n statedict = torch.load(args.MODEL.checkpoint.ckp_path)\n encoder_net.module.load_state_dict(statedict)\n \n\n logger.info('=> Set loss')\n MSE_loss = nn.MSELoss()\n # MSE_loss = nn.L1Loss()\n D_loss = nn.BCELoss()\n VGG_loss = VGGLoss(weight_vgg=1.)\n if args.USE_GPU:\n MSE_loss.to(device)\n VGG_loss.to(device)\n D_loss.to(device)\n \n \n logger.info('=> Set optimizer')\n optimizer_encoder = torch.optim.Adam(encoder_net.module.parameters(), \\\n lr=args.OPTIMIZER.lr_encoder, \\\n betas=(args.OPTIMIZER.Adam.beta1, args.OPTIMIZER.Adam.beta2), \\\n weight_decay=1e-4)\n optimizer_D = torch.optim.Adam(D_net.module.parameters(), \\\n lr=args.OPTIMIZER.lr_D, \\\n betas=(args.OPTIMIZER.Adam.beta1, args.OPTIMIZER.Adam.beta2), \\\n weight_decay=1e-4)\n\n \"\"\" Intrisic function\"\"\"\n def cor_square_error(x, y, eps=1e-12):\n return (1.0 - cosine_similarity(x, y, eps=eps)).mean()\n def save_checkpoint(net, name, dir_path, epoch, bs, gap=1):\n if not os.path.exists(dir_path):\n os.makedirs(dir_path)\n if epoch % gap == 0:\n path = os.path.join(dir_path, '{}_Batchsize_{}_Epoch_{}.pth'.format(name, bs, epoch))\n torch.save(net.module.state_dict(), path)\n logger.info('===> Save checkpoint at {}'.format(path))\n\n logger.info('=> Begin to train the model')\n encoder_net.train()\n D_net.train()\n Tensor = torch.cuda.FloatTensor if args.USE_GPU else torch.FloatTensor\n \n for epoch in range(0, args.TRAIN.epochs):\n bar = tqdm.tqdm(train_loader)\n dis_loss_list = []\n gan_loss_list = []\n latent_loss_list = []\n en_mse_loss_list = []\n en_vgg_loss_list = []\n encoder_loss_list = []\n ####################################################\n # gt_img\n # |\n # inverse_net\n # |\n # lat_code + label + uniform\n # |\n # pggan\n # |\n # recon_img\n # |\n # inverse_net\n # |\n # rec_code\n ####################################################\n for idx, (input_img, input_label) in enumerate(bar):\n input_img = input_img.cuda()\n input_label = input_label.cuda()\n uniform_code = torch.Tensor(input_img.shape[0], args.TRAIN.uniform_dim).uniform_().type_as(input_img)\n uniform_code = uniform_code.cuda()\n\n latent_code4, skip = encoder_net(input_img) #输入为gt,得到其inverse后的code\n latent_code = torch.cat((latent_code4, input_label, uniform_code), dim=1)\n recon_img = G_net.synthesize(latent_code) #输入为pred的code,通过pggan得到recon_img,希望它可以和gt尽可能一样\n\n #Update D_net\n optimizer_D.zero_grad()\n pred_real = D_net(input_img)\n pred_fake = D_net(recon_img.detach())\n dis_loss = 0\n\n for idx_real, idx_fake in zip(pred_real, pred_fake):\n global_real_label = Tensor(idx_real.size()).fill_(1.).cuda()\n global_fake_label = Tensor(idx_fake.size()).fill_(0.).cuda()\n dis_loss += (D_loss(idx_real, global_real_label) + D_loss(idx_fake, global_fake_label))\n dis_loss.backward()\n optimizer_D.step()\n dis_loss_list.append(dis_loss.item())\n \n #Update encoder_net\n optimizer_encoder.zero_grad()\n pred_fake = D_net(recon_img)\n recon_latent_code, _ = encoder_net(recon_img)\n \n gan_loss = 0 #gan loss\n for idx_fake in pred_fake:\n global_real_label = Tensor(idx_fake.size()).fill_(1.).cuda()\n gan_loss += D_loss(idx_fake, global_real_label)\n latent_loss = 1. * cor_square_error(recon_latent_code, latent_code4) +\\\n 1. * MSE_loss(recon_latent_code, latent_code4)\n mse_loss = MSE_loss(recon_img, input_img)\n vgg_loss = VGG_loss(recon_img, input_img)\n encoder_loss = (args.TRAIN.weight_mse * mse_loss +\\\n args.TRAIN.weight_vgg * vgg_loss +\\\n args.TRAIN.weight_latent * latent_loss +\\\n args.TRAIN.weight_gan * gan_loss).mean()\n encoder_loss.backward()\n optimizer_encoder.step()\n gan_loss_list.append(gan_loss.item())\n latent_loss_list.append(latent_loss.item())\n en_mse_loss_list.append(mse_loss.item())\n en_vgg_loss_list.append(vgg_loss.item())\n encoder_loss_list.append(encoder_loss.item())\n \n\n if idx % args.TRAIN.save_iter == 0:\n #dump images to tensorboard\n vis_logger.visual_image(\n {'input_image': input_img,\n 'recon_image': recon_img},\n (idx+1), normalize=True\n )\n\n vis_logger.visual_text(\n {'D_net': np.mean(dis_loss_list)},\n epoch,\n main_tag='D_loss'\n )\n vis_logger.visual_text(\n {'EncoderNet': np.mean(encoder_loss_list),\n 'gan_loss': np.mean(gan_loss_list),\n 'latent_loss': np.mean(latent_loss_list),\n 'recon_loss': np.mean(en_mse_loss_list),\n 'perceptual_loss': np.mean(en_vgg_loss_list)},\n epoch,\n main_tag='Encoder_loss'\n )\n logger.info('[Epoch: {0}/{1}] | [D_net: {2:.5f}] [EncoderNet: {3:.5f}] [gan_loss: {4:.5f}] [latent_loss: {5:.5f}] [recon_loss: {6:.5f}] [perceptual_loss: {7:.5f}]'.format(\n epoch,\n args.TRAIN.epochs,\n np.mean(dis_loss_list),\n np.mean(encoder_loss_list),\n np.mean(gan_loss_list),\n np.mean(latent_loss_list),\n np.mean(en_mse_loss_list),\n np.mean(en_vgg_loss_list)\n ))\n\n save_checkpoint(encoder_net, 'EncoderNet', args.MODEL.checkpoint.save_path[0], epoch, args.TRAIN.batch_size, gap=2)\n save_checkpoint(encoder_net, 'DNet', args.MODEL.checkpoint.save_path[1], epoch, args.TRAIN.batch_size, gap=2)\n\n \n"
] |
[
[
"torch.cat",
"torch.randn",
"torch.nn.Conv2d",
"torch.nn.Linear",
"torch.nn.LeakyReLU"
],
[
"tensorflow.convert_to_tensor",
"tensorflow.device",
"tensorflow.concat",
"tensorflow.control_dependencies",
"tensorflow.is_finite",
"tensorflow.zeros",
"tensorflow.stack",
"tensorflow.reduce_sum",
"tensorflow.cast",
"tensorflow.global_variables",
"tensorflow.is_variable_initialized",
"tensorflow.as_dtype",
"tensorflow.where",
"tensorflow.get_default_graph",
"tensorflow.group",
"tensorflow.add_n",
"tensorflow.summary.scalar",
"numpy.random.randint",
"tensorflow.assign_add",
"tensorflow.ConfigProto",
"tensorflow.name_scope",
"tensorflow.Session",
"numpy.float32",
"tensorflow.trainable_variables",
"numpy.log",
"tensorflow.shape",
"tensorflow.identity",
"tensorflow.placeholder",
"tensorflow.zeros_like",
"tensorflow.summary.merge_all",
"tensorflow.nn.avg_pool",
"tensorflow.split",
"tensorflow.round",
"tensorflow.summary.histogram",
"tensorflow.get_default_session",
"tensorflow.clip_by_value",
"tensorflow.contrib.nccl.all_sum",
"tensorflow.assign_sub",
"tensorflow.reshape",
"tensorflow.assign",
"tensorflow.log",
"numpy.prod",
"tensorflow.saturate_cast",
"tensorflow.variable_scope",
"tensorflow.get_variable_scope"
],
[
"numpy.clip"
],
[
"torch.Tensor",
"torch.load",
"torch.cat",
"torch.utils.data.DataLoader",
"torch.nn.BCELoss",
"numpy.mean",
"torch.nn.functional.cosine_similarity",
"torch.cuda.is_available",
"torch.device",
"torch.nn.DataParallel",
"torch.nn.MSELoss"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
jiricejchan/AnonymniAnalytici
|
[
"e4e96f943d0b2232d9099c6e7bb690a3d25ea422",
"e4e96f943d0b2232d9099c6e7bb690a3d25ea422"
] |
[
"2018_02_15_cryptocurrencies_trading/algorithms/shared/shannon's_demon-1500960177478.py",
"2018_02_15_cryptocurrencies_trading/algorithms/shared/buy_and_hodl_to_csv-1501032722566.py"
] |
[
"from catalyst.api import order_target_percent, record, symbol, set_benchmark\r\n\r\ndef initialize(context):\r\n context.ASSET_NAME = 'USDT_REP'\r\n context.asset = symbol(context.ASSET_NAME)\r\n set_benchmark(context.asset)\r\n context.is_first_time = True\r\n \r\n # For all trading pairs in the poloniex bundle, the default denomination\r\n # currently supported by Catalyst is 1/1000th of a full coin. Use this\r\n # constant to scale the price of up to that of a full coin if desired.\r\n context.TICK_SIZE = 1.0\r\n\r\n\r\ndef handle_data(context, data):\r\n \r\n # Define base price and make initial trades to achieve target investment\r\n # ratio of 0.5\r\n\r\n if context.is_first_time:\r\n order_target_percent(\r\n context.asset,\r\n 0.5,\r\n )\r\n context.base_price = data[context.asset].price\r\n context.first_price = data[context.asset].price\r\n context.is_first_time = False\r\n \r\n # Retrieve current asset price from pricing data\r\n price = data[context.asset].price\r\n REP_cumulative_return = (price/context.first_price-1)*100\r\n Portfolio_cumulative_return = (context.portfolio.portfolio_value/\r\n context.portfolio.starting_cash-1)*100\r\n \r\n # Trading logic: rebalance to a 0.5 investment ratio every time the price\r\n # of the asset doubles or decreases to half the initial price\r\n if price > context.base_price*2:\r\n order_target_percent(\r\n context.asset,\r\n 0.5,\r\n )\r\n context.base_price = data[context.asset].price\r\n elif price < context.base_price/2:\r\n order_target_percent(\r\n context.asset,\r\n 0.5,\r\n )\r\n context.base_price = data[context.asset].price\r\n \r\n price = data[context.asset].price\r\n # Save values for later inspection\r\n record(price=price,\r\n base_price=context.base_price,\r\n cash=context.portfolio.cash,\r\n leverage=context.account.leverage,\r\n Portfolio_cumulative_return=Portfolio_cumulative_return,\r\n REP_cumulative_return=REP_cumulative_return,\r\n )\r\n\r\n\r\ndef analyze(context=None, results=None):\r\n import matplotlib.pyplot as plt\r\n import pandas as pd\r\n import sys\r\n import os\r\n from os.path import basename\r\n \r\n # Plot the portfolio and asset data.\r\n ax1 = plt.subplot(221)\r\n results[[\r\n 'Portfolio_cumulative_return',\r\n 'REP_cumulative_return',\r\n ]].plot(ax=ax1)\r\n ax1.set_ylabel('Percent Return (%)')\r\n\r\n ax2 = plt.subplot(222, sharex=ax1)\r\n ax2.set_ylabel('{asset} (USD)'.format(asset=context.ASSET_NAME))\r\n (context.TICK_SIZE * results[[\r\n 'price',\r\n 'base_price',\r\n ]]).plot(ax=ax2)\r\n\r\n trans = results.ix[[t != [] for t in results.transactions]]\r\n buys = trans.ix[\r\n [t[0]['amount'] > 0 for t in trans.transactions]\r\n ]\r\n sells = trans.ix[\r\n [t[0]['amount'] < 0 for t in trans.transactions]\r\n ]\r\n\r\n ax2.plot(\r\n buys.index,\r\n context.TICK_SIZE * results.price[buys.index],\r\n '^',\r\n markersize=10,\r\n color='g',\r\n )\r\n ax2.plot(\r\n sells.index,\r\n context.TICK_SIZE * results.price[sells.index],\r\n 'v',\r\n markersize=10,\r\n color='r',\r\n )\r\n\r\n\r\n ax3 = plt.subplot(223, sharex=ax1)\r\n results[['leverage']].plot(ax=ax3)\r\n ax3.set_ylabel('Leverage ')\r\n\r\n ax4 = plt.subplot(224, sharex=ax1)\r\n results[['cash']].plot(ax=ax4)\r\n ax4.set_ylabel('Cash (USD)')\r\n\r\n plt.legend(loc=3)\r\n\r\n # Show the plot.\r\n plt.gcf().set_size_inches(16, 8)\r\n plt.show()\r\n \r\n # Save results in CSV file\r\n filename = os.path.splitext(basename(sys.argv[3]))[0]\r\n results.to_csv(filename + '.csv')",
"#!/usr/bin/env python\n#\n# Copyright 2017 Enigma MPC, Inc.\n# Copyright 2015 Quantopian, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# import pandas as pd\n# perf = pd.read_pickle('bah.pickle') # read in perf DataFrame\n# perf.head()\n\nfrom catalyst.finance.slippage import VolumeShareSlippage\n\nfrom catalyst.api import (\n order_target_value,\n symbol,\n record,\n cancel_order,\n get_open_orders,\n)\n\ndef initialize(context):\n context.ASSET_NAME = 'USDT_BTC'\n context.TARGET_HODL_RATIO = 0.8\n context.RESERVE_RATIO = 1.0 - context.TARGET_HODL_RATIO\n\n # For all trading pairs in the poloniex bundle, the default denomination\n # currently supported by Catalyst is 1/1000th of a full coin. Use this\n # constant to scale the price of up to that of a full coin if desired.\n context.TICK_SIZE = 1000.0\n\n context.is_buying = True\n context.asset = symbol(context.ASSET_NAME)\n\n context.i = 0\n\ndef handle_data(context, data):\n context.i += 1\n\n starting_cash = context.portfolio.starting_cash\n target_hodl_value = context.TARGET_HODL_RATIO * starting_cash\n reserve_value = context.RESERVE_RATIO * starting_cash\n\n # Cancel any outstanding orders\n orders = get_open_orders(context.asset) or []\n for order in orders:\n cancel_order(order)\n \n # Stop buying after passing the reserve threshold\n cash = context.portfolio.cash\n if cash <= reserve_value:\n context.is_buying = False\n\n # Retrieve current asset price from pricing data\n price = data[context.asset].price\n\n # Check if still buying and could (approximately) afford another purchase\n if context.is_buying and cash > price:\n # Place order to make position in asset equal to target_hodl_value\n order_target_value(\n context.asset,\n target_hodl_value,\n limit_price=price*1.1,\n stop_price=price*0.9,\n )\n\n record(\n price=price,\n volume=data[context.asset].volume,\n cash=cash,\n starting_cash=context.portfolio.starting_cash,\n leverage=context.account.leverage,\n )\n\ndef analyze(context=None, results=None):\n import matplotlib.pyplot as plt\n\n # Plot the portfolio and asset data.\n ax1 = plt.subplot(611)\n results[['portfolio_value']].plot(ax=ax1)\n ax1.set_ylabel('Portfolio Value (USD)')\n\n ax2 = plt.subplot(612, sharex=ax1)\n ax2.set_ylabel('{asset} (USD)'.format(asset=context.ASSET_NAME))\n (context.TICK_SIZE * results[['price']]).plot(ax=ax2)\n\n trans = results.ix[[t != [] for t in results.transactions]]\n buys = trans.ix[\n [t[0]['amount'] > 0 for t in trans.transactions]\n ]\n ax2.plot(\n buys.index,\n context.TICK_SIZE * results.price[buys.index],\n '^',\n markersize=10,\n color='g',\n )\n\n ax3 = plt.subplot(613, sharex=ax1)\n results[['leverage', 'alpha', 'beta']].plot(ax=ax3)\n ax3.set_ylabel('Leverage ')\n\n ax4 = plt.subplot(614, sharex=ax1)\n results[['starting_cash', 'cash']].plot(ax=ax4)\n ax4.set_ylabel('Cash (USD)')\n\n results[[\n 'treasury',\n 'algorithm',\n 'benchmark',\n ]] = results[[\n 'treasury_period_return',\n 'algorithm_period_return',\n 'benchmark_period_return',\n ]]\n\n ax5 = plt.subplot(615, sharex=ax1)\n results[[\n 'treasury',\n 'algorithm',\n 'benchmark',\n ]].plot(ax=ax5)\n ax5.set_ylabel('Percent Change')\n\n ax6 = plt.subplot(616, sharex=ax1)\n results[['volume']].plot(ax=ax6)\n ax6.set_ylabel('Volume (mCoins/5min)')\n\n plt.legend(loc=3)\n\n # Show the plot.\n plt.gcf().set_size_inches(18, 8)\n plt.show()\n\ndef analyze(context=None, results=None):\n import pandas as pd\n import sys\n import os\n from os.path import basename\n \n # Save results in CSV file\n filename = os.path.splitext(basename(sys.argv[3]))[0]\n results.to_csv(filename + '.csv')"
] |
[
[
"matplotlib.pyplot.legend",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.show",
"matplotlib.pyplot.gcf"
],
[
"matplotlib.pyplot.legend",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.show",
"matplotlib.pyplot.gcf"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
telemetry2u/public
|
[
"cda938d5806c034e5cb3277a6c98fb1acbc0f528"
] |
[
"Python/Telemetry2U.py"
] |
[
"#!/usr/bin/env python\n# Simple Python script demonstrating use of the Telemetry2U APi\n# to retrieve data from LoRaWAN nodes.\n\n# MIT License\n# Copyright (c) 2021 Telemetry2U Pty Lrd\n# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the \"Software\"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:\n# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\nfrom http.client import HTTPSConnection\nimport json\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom datetime import datetime, timedelta\n\ndef do_request(request):\n connection = HTTPSConnection(\"telemetry2u.com\")\n # Your API key and authorization string may be generated under the Account / Api Keys section of your Telemetry2U account.\n # The following authorization details are for the demo account and may be used for experimentation.\n authorization = \"Basic ZGVtb0BleGFtcGxlLm9yZzpQOXg2ZGgrSXpZYVV1NS9mUHpjL1JZZkh3VzFuL0gyNStsMVNlYi9TY3oxUQ==\"\n headers = { \"Authorization\" : authorization}\n connection.request(\"GET\", request, headers=headers)\n response = connection.getresponse()\n data = json.loads(response.read())\n return pd.json_normalize(data)\n\ndef main():\n # Retrieve and print list of node ids / descriptions\n nodes = do_request(\"/api/nodes\")\n print(nodes[['nodeId', 'description']])\n\n # Find nodeId for \"LHT65 Fridge/Freezer Demo\"\n nodeId = nodes.query(\"description=='LHT65 Fridge/Freezer Demo'\")[\"nodeId\"].values[0]\n\n # Call api/data endpoint to retrieve data for node for past week.\n startDate = (datetime.now() - timedelta(days=7)).strftime('%Y-%m-%d')\n endDate = \"9999-12-31\"; # Use large end date to retrieve most recent data\n data = do_request(f\"/api/data/{nodeId}/{startDate}/{endDate}\")\n\n data['Int. Temperature'].plot()\n data['Int. Humidity'].plot()\n plt.show()\n \nif __name__ == '__main__':\n main()\n"
] |
[
[
"pandas.json_normalize",
"matplotlib.pyplot.show"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0"
],
"scipy": [],
"tensorflow": []
}
] |
lyyu0413/InstaF_Python
|
[
"949f5bd1c6d4d534409e36c977fd3b12b555f3d3"
] |
[
"InstaF_Python/RGB_manipulation.py"
] |
[
"# Copyright 2019 Betty Zhou\n\nimport numpy as np\nimport skimage.io\nimport pytest\n\ndef RGB_manipulation(input_path, output_path, R = 2, G = 2, B = 2):\n '''\n Manipulates the RGB intensity of an image\n\n Inputs\n ------\n input_path: string, path for an image file in .png format\n output_path: string, path for the output image in .png format\n R: int, the weight to adjust intensity for red channel, all with default 2\n G: int, the weight to adjust intensity for green channel, all with default 2\n B: int, the weight to adjust intensity for blue channel, all with default 2\n\n Returns\n -------\n .png format image at the output path\n '''\n\n # Read in .png as np.array and exception handling\n try:\n img = skimage.io.imread(input_path)[:,:,:3]\n except AttributeError:\n print(\"Please provide the input path as a string\")\n raise\n except OSError:\n print(\"The input is not a .png file\")\n raise\n except FileNotFoundError:\n print(\"The input path does not exist\")\n raise\n except Exception as error:\n paste(\"Error: \", error)\n raise\n\n # construct filter based on user input of RGB weights\n filter = np.array([[[R, G, B]]])\n\n # Adjust RGB intensity with filter\n output = img * filter\n\n # Adjust RGB intenity above 255 to 255 and ensure output is uint8 type\n output[output > 255] = 255\n output_img = output.astype(np.uint8)\n\n # output RGB manipulated img at output input_path and exception handling\n try:\n skimage.io.imsave(output_path, output_img)\n except AttributeError:\n print(\"Please provide the output path as a string\")\n raise\n except Exception as error:\n paste(\"Error: \", error)\n raise\n"
] |
[
[
"numpy.array"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
shafferm/kb_DRAM
|
[
"f414190b56e12aef939e405413b930b96ccb781a"
] |
[
"lib/kb_DRAM/kb_DRAMImpl.py"
] |
[
"# -*- coding: utf-8 -*-\n#BEGIN_HEADER\nimport logging\nimport os\nimport pandas as pd\nimport yaml\nimport warnings\n\nfrom mag_annotator import __version__ as dram_version\nfrom mag_annotator.database_processing import import_config, set_database_paths, print_database_locations\nfrom mag_annotator.annotate_bins import annotate_bins, annotate_called_genes\nfrom mag_annotator.summarize_genomes import summarize_genomes\nfrom mag_annotator.annotate_vgfs import annotate_vgfs, remove_bad_chars\nfrom mag_annotator.summarize_vgfs import summarize_vgfs\nfrom mag_annotator.utils import remove_suffix\n\nfrom installed_clients.WorkspaceClient import Workspace as workspaceService\nfrom installed_clients.AssemblyUtilClient import AssemblyUtil\nfrom installed_clients.GenomeFileUtilClient import GenomeFileUtil\nfrom installed_clients.annotation_ontology_apiServiceClient import annotation_ontology_api\nfrom installed_clients.KBaseDataObjectToFileUtilsClient import KBaseDataObjectToFileUtils\nfrom installed_clients.DataFileUtilClient import DataFileUtil\n\nfrom .utils.dram_util import get_annotation_files, get_distill_files, generate_genomes, add_ontology_terms,\\\n get_viral_distill_files\nfrom .utils.kbase_util import generate_product_report\n\nTHREADS = 30\n\n# TODO: Fix no pfam annotations bug\n#END_HEADER\n\n\nclass kb_DRAM:\n '''\n Module Name:\n kb_DRAM\n\n Module Description:\n A KBase module: kb_DRAM\n '''\n\n ######## WARNING FOR GEVENT USERS ####### noqa\n # Since asynchronous IO can lead to methods - even the same method -\n # interrupting each other, you must be *very* careful when using global\n # state. A method could easily clobber the state set by another while\n # the latter method is running.\n ######################################### noqa\n VERSION = \"0.0.2\"\n GIT_URL = \"https://github.com/shafferm/kb_DRAM.git\"\n GIT_COMMIT_HASH = \"6c91eb1cdbd74eec6efd105477c89b76f34cabd9\"\n\n #BEGIN_CLASS_HEADER\n #END_CLASS_HEADER\n\n # config contains contents of config file in a hash or None if it couldn't\n # be found\n def __init__(self, config):\n #BEGIN_CONSTRUCTOR\n self.callback_url = os.environ['SDK_CALLBACK_URL']\n self.workspaceURL = config['workspace-url']\n self.shared_folder = config['scratch']\n logging.basicConfig(format='%(created)s %(levelname)s: %(message)s',\n level=logging.INFO)\n #END_CONSTRUCTOR\n pass\n\n\n def run_kb_dram_annotate(self, ctx, params):\n \"\"\"\n This example function accepts any number of parameters and returns results in a KBaseReport\n :param params: instance of mapping from String to unspecified object\n :returns: instance of type \"ReportResults\" -> structure: parameter\n \"report_name\" of String, parameter \"report_ref\" of String\n \"\"\"\n # ctx is the context object\n # return variables are: output\n #BEGIN run_kb_dram_annotate\n # validate inputs\n if not isinstance(params['assembly_input_ref'], str) or not len(params['assembly_input_ref']):\n raise ValueError('Pass in a valid assembly reference string')\n if not isinstance(params['desc'], str) or not len(params['desc']):\n raise ValueError('Pass in a valid genomeSet description')\n if not isinstance(params['output_name'], str) or not len(params['output_name']):\n raise ValueError('Pass in a valid genomeSet output name')\n if not isinstance(params['min_contig_size'], int) or (params['min_contig_size'] < 0):\n raise ValueError('Min contig size must be a non-negative integer')\n\n # setup params\n with open(\"/kb/module/kbase.yml\", 'r') as stream:\n data_loaded = yaml.load(stream)\n version = str(data_loaded['module-version'])\n\n is_metagenome = params['is_metagenome']\n min_contig_size = params['min_contig_size']\n trans_table = str(params['trans_table'])\n bitscore = params['bitscore']\n rbh_bitscore = params['rbh_bitscore']\n output_dir = os.path.join(self.shared_folder, 'DRAM_annos')\n output_objects = []\n\n # create Util objects\n wsClient = workspaceService(self.workspaceURL, token=ctx['token'])\n assembly_util = AssemblyUtil(self.callback_url)\n genome_util = GenomeFileUtil(self.callback_url)\n\n # set DRAM database locations\n print('DRAM version: %s' % dram_version)\n import_config('/data/DRAM_databases/CONFIG')\n # This is a hack to get around a bug in my database setup\n set_database_paths(description_db_loc='/data/DRAM_databases/description_db.sqlite')\n print_database_locations()\n\n # get files\n assemblies = assembly_util.get_fastas({'ref_lst': [params['assembly_input_ref']]})\n # would paths ever have more than one thing?\n fasta_locs = [assembly_data['paths'][0] for assembly_ref, assembly_data in assemblies.items()]\n # get assembly refs from dram assigned genome names\n assembly_ref_dict = {os.path.splitext(os.path.basename(remove_suffix(assembly_data['paths'][0], '.gz')))[0]:\n assembly_ref for assembly_ref, assembly_data in assemblies.items()}\n\n # annotate and distill with DRAM\n annotate_bins(fasta_locs, output_dir, min_contig_size, trans_table=trans_table, bit_score_threshold=bitscore,\n rbh_bit_score_threshold=rbh_bitscore, low_mem_mode=True, rename_bins=False, keep_tmp_dir=False,\n threads=THREADS, verbose=False)\n output_files = get_annotation_files(output_dir)\n distill_output_dir = os.path.join(output_dir, 'distilled')\n summarize_genomes(output_files['annotations']['path'], output_files['trnas']['path'],\n output_files['rrnas']['path'], output_dir=distill_output_dir, groupby_column='fasta')\n output_files = get_distill_files(distill_output_dir, output_files)\n\n if is_metagenome:\n pass # TODO: make annotated metagenome object\n else:\n # generate genome files\n annotations = pd.read_csv(output_files['annotations']['path'], sep='\\t', index_col=0)\n genome_objects = generate_genomes(annotations, output_files['genes_fna']['path'],\n output_files['genes_faa']['path'], assembly_ref_dict, assemblies,\n params[\"workspace_name\"], ctx.provenance())\n\n genome_ref_dict = dict()\n genome_set_elements = dict()\n for genome_object in genome_objects:\n info = genome_util.save_one_genome(genome_object)[\"info\"]\n genome_ref = '%s/%s/%s' % (info[6], info[0], info[4])\n genome_set_elements[genome_object[\"name\"]] = {'ref': genome_ref}\n output_objects.append({\"ref\": genome_ref,\n \"description\": 'Annotated Genome'})\n genome_ref_dict[genome_object[\"name\"]] = genome_ref\n\n # add ontology terms\n anno_api = annotation_ontology_api(service_ver=\"beta\")\n ontology_events = add_ontology_terms(annotations, params['desc'], version, params['workspace_name'],\n self.workspaceURL, genome_ref_dict)\n [anno_api.add_annotation_ontology_events(i) for i in ontology_events]\n\n # make genome set\n # TODO: only make genome set if there is more than one genome\n if 'provenance' in ctx:\n provenance = ctx['provenance']\n else:\n provenance = [{}]\n # add additional info to provenance here, in this case the input data object reference\n provenance[0]['input_ws_objects'] = list(genome_ref_dict.values())\n provenance[0]['service'] = 'kb_SetUtilities'\n provenance[0]['method'] = 'KButil_Batch_Create_GenomeSet'\n output_genomeSet_obj = {'description': params['desc'],\n 'elements': genome_set_elements}\n output_genomeSet_name = params['output_name']\n new_obj_info = wsClient.save_objects({'workspace': params['workspace_name'],\n 'objects': [{'type': 'KBaseSearch.GenomeSet',\n 'data': output_genomeSet_obj,\n 'name': output_genomeSet_name,\n 'meta': {},\n 'provenance': provenance\n }]\n })[0]\n genome_set_ref = '%s/%s/%s' % (new_obj_info[6], new_obj_info[0], new_obj_info[4])\n output_objects.append({\"ref\": genome_set_ref,\n \"description\": params['desc']})\n\n # generate report\n product_html_loc = os.path.join(distill_output_dir, 'product.html')\n report = generate_product_report(self.callback_url, params['workspace_name'], output_dir, product_html_loc,\n output_files, output_objects)\n output = {\n 'report_name': report['name'],\n 'report_ref': report['ref'],\n }\n #END run_kb_dram_annotate\n\n # At some point might do deeper type checking...\n if not isinstance(output, dict):\n raise ValueError('Method run_kb_dram_annotate return value ' +\n 'output is not type dict as required.')\n # return the results\n return [output]\n\n def run_kb_dram_annotate_genome(self, ctx, params):\n \"\"\"\n :param params: instance of mapping from String to unspecified object\n :returns: instance of type \"ReportResults\" -> structure: parameter\n \"report_name\" of String, parameter \"report_ref\" of String\n \"\"\"\n # ctx is the context object\n # return variables are: output\n #BEGIN run_kb_dram_annotate_genome\n # validate inputs\n if not isinstance(params['genome_input_ref'], str) or not len(params['genome_input_ref']):\n raise ValueError('Pass in a valid genome reference string')\n\n # setup\n with open(\"/kb/module/kbase.yml\", 'r') as stream:\n data_loaded = yaml.load(stream)\n version = str(data_loaded['module-version'])\n genome_input_ref = params['genome_input_ref']\n bitscore = params['bitscore']\n rbh_bitscore = params['rbh_bitscore']\n\n # create Util objects\n wsClient = workspaceService(self.workspaceURL, token=ctx['token'])\n object_to_file_utils = KBaseDataObjectToFileUtils(self.callback_url, token=ctx['token'])\n\n # set DRAM database locations\n print('DRAM version: %s' % dram_version)\n import_config('/data/DRAM_databases/CONFIG')\n # This is a hack to get around a bug in my database setup\n set_database_paths(description_db_loc='/data/DRAM_databases/description_db.sqlite')\n print_database_locations()\n\n # get genomes\n genome_dir = os.path.join(self.shared_folder, 'genomes')\n os.mkdir(genome_dir)\n genome_info = wsClient.get_object_info_new({'objects': [{'ref': genome_input_ref}]})[0]\n genome_input_type = genome_info[2]\n faa_locs = list()\n genome_ref_dict = {}\n if 'GenomeSet' in genome_input_type:\n genomeSet_object = wsClient.get_objects2({'objects': [{'ref': genome_input_ref}]})['data'][0]['data']\n for ref_dict in genomeSet_object['elements'].values():\n genome_ref = ref_dict['ref']\n name = wsClient.get_object_info_new({'objects': [{'ref': genome_ref}]})[0][1]\n genome_ref_dict[name] = genome_ref\n else:\n genome_ref_dict[genome_info[1]] = genome_input_ref\n for genome_name, genome_ref in genome_ref_dict.items():\n # this makes the names match if you are doing a genome or genomeSet\n faa_file = '%s.faa' % genome_name\n faa_object = object_to_file_utils.GenomeToFASTA({\"genome_ref\": genome_ref,\n \"file\": faa_file,\n \"dir\": genome_dir,\n \"console\": [],\n \"invalid_msgs\": [],\n \"residue_type\": 'P',\n \"feature_type\": None,\n \"record_id_pattern\": None,\n \"record_desc_pattern\": None,\n \"case\": None,\n \"linewrap\": None})\n faa_locs.append(faa_object['fasta_file_path'])\n\n # annotate and distill with DRAM\n output_dir = os.path.join(self.shared_folder, 'DRAM_annos')\n annotate_called_genes(faa_locs, output_dir, bit_score_threshold=bitscore, rbh_bit_score_threshold=rbh_bitscore,\n low_mem_mode=True, rename_genes=False, keep_tmp_dir=False, threads=THREADS, verbose=False)\n output_files = get_annotation_files(output_dir)\n distill_output_dir = os.path.join(output_dir, 'distilled')\n summarize_genomes(output_files['annotations']['path'], output_files['trnas']['path'],\n output_files['rrnas']['path'], output_dir=distill_output_dir, groupby_column='fasta')\n output_files = get_distill_files(distill_output_dir, output_files)\n\n # add ontology terms\n annotations = pd.read_csv(output_files['annotations']['path'], sep='\\t', index_col=0, dtype={'fasta': str})\n anno_api = annotation_ontology_api(service_ver=\"beta\")\n ontology_events = add_ontology_terms(annotations, \"DRAM genome annotated\", version, params['workspace_name'],\n self.workspaceURL, genome_ref_dict)\n annotation_events = [anno_api.add_annotation_ontology_events(i) for i in ontology_events]\n\n # generate report\n product_html_loc = os.path.join(distill_output_dir, 'product.html')\n report = generate_product_report(self.callback_url, params['workspace_name'], output_dir, product_html_loc,\n output_files)\n output = {\n 'report_name': report['name'],\n 'report_ref': report['ref'],\n }\n #END run_kb_dram_annotate_genome\n\n # At some point might do deeper type checking...\n if not isinstance(output, dict):\n raise ValueError('Method run_kb_dram_annotate_genome return value ' +\n 'output is not type dict as required.')\n # return the results\n return [output]\n\n def run_kb_dramv_annotate(self, ctx, params):\n \"\"\"\n :param params: instance of mapping from String to unspecified object\n :returns: instance of type \"ReportResults\" -> structure: parameter\n \"report_name\" of String, parameter \"report_ref\" of String\n \"\"\"\n # ctx is the context object\n # return variables are: output\n #BEGIN run_kb_dramv_annotate\n warnings.filterwarnings(\"ignore\")\n\n # setup\n affi_contigs_shock_ids = params['affi_contigs_shock_id']\n min_contig_size = params['min_contig_size']\n trans_table = str(params['trans_table'])\n bitscore = params['bitscore']\n rbh_bitscore = params['rbh_bitscore']\n\n assembly_util = AssemblyUtil(self.callback_url)\n datafile_util = DataFileUtil(self.callback_url)\n\n # get contigs and merge\n assemblies = assembly_util.get_fastas({'ref_lst': [params['assembly_input_ref']]})\n fasta = os.path.join(self.shared_folder, 'merged_contigs.fasta')\n with open(fasta, 'w') as f:\n for assembly_ref, assembly_data in assemblies.items():\n fasta_path = assembly_data['paths'][0]\n for line in open(fasta_path):\n f.write(line)\n\n # get affi contigs, read all and merge\n affi_contigs_path = os.path.join(self.shared_folder, 'VIRSorter_affi-contigs.tab')\n with open(affi_contigs_path, 'w') as f:\n for affi_contigs_shock_id in affi_contigs_shock_ids:\n temp_affi_contigs_path = os.path.join(self.shared_folder, 'temp_VIRSorter_affi-contigs.tab')\n temp_affi_contigs = datafile_util.shock_to_file({\n 'shock_id': affi_contigs_shock_id,\n 'file_path': temp_affi_contigs_path,\n 'unpack': 'unpack'\n })['file_path']\n for line in open(temp_affi_contigs):\n f.write(line)\n os.remove(temp_affi_contigs)\n\n # set DRAM database locations\n print('DRAM version: %s' % dram_version)\n import_config('/data/DRAM_databases/CONFIG')\n # This is a hack to get around a bug in my database setup\n set_database_paths(description_db_loc='/data/DRAM_databases/description_db.sqlite')\n print_database_locations()\n\n # clean affi contigs file\n cleaned_fasta = os.path.join(self.shared_folder, '%s.cleaned.fasta' % os.path.basename(fasta))\n remove_bad_chars(input_fasta=fasta, output=cleaned_fasta)\n cleaned_affi_contigs = os.path.join(self.shared_folder, 'VIRSorter_affi-contigs.cleaned.tab')\n remove_bad_chars(input_virsorter_affi_contigs=affi_contigs_path, output=cleaned_affi_contigs)\n\n # annotate and distill\n output_dir = os.path.join(self.shared_folder, 'DRAM_annos')\n annotate_vgfs(cleaned_fasta, cleaned_affi_contigs, output_dir, min_contig_size, trans_table=trans_table,\n bit_score_threshold=bitscore, rbh_bit_score_threshold=rbh_bitscore, low_mem_mode=True,\n keep_tmp_dir=False, threads=THREADS, verbose=False)\n output_files = get_annotation_files(output_dir)\n distill_output_dir = os.path.join(output_dir, 'distilled')\n summarize_vgfs(output_files['annotations']['path'], distill_output_dir, groupby_column='scaffold')\n output_files = get_viral_distill_files(distill_output_dir, output_files)\n\n # generate report\n product_html_loc = os.path.join(distill_output_dir, 'product.html')\n report = generate_product_report(self.callback_url, params['workspace_name'], output_dir,\n product_html_loc, output_files)\n output = {\n 'report_name': report['name'],\n 'report_ref': report['ref'],\n }\n #END run_kb_dramv_annotate\n\n # At some point might do deeper type checking...\n if not isinstance(output, dict):\n raise ValueError('Method run_kb_dramv_annotate return value ' +\n 'output is not type dict as required.')\n # return the results\n return [output]\n def status(self, ctx):\n #BEGIN_STATUS\n returnVal = {'state': \"OK\",\n 'message': \"\",\n 'version': self.VERSION,\n 'git_url': self.GIT_URL,\n 'git_commit_hash': self.GIT_COMMIT_HASH}\n #END_STATUS\n return [returnVal]\n"
] |
[
[
"pandas.read_csv"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
Niky1/size-image-annotator
|
[
"fce57e97ac61601aa8c67cf69438f6dc6dbac946"
] |
[
"plot_results.py"
] |
[
"import json\nimport os.path\nimport sys\nfrom os import path\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom imageio import imread\nfrom matplotlib import gridspec\n\nwith open(sys.argv[1]) as f:\n data = json.load(f)\npicture_indices = [i for datapoint in data for i in (\n datapoint[\"left\"], datapoint[\"right\"])]\npicture_paths = [\n f\"pics/ILSVRC2012_test_{i:08d}.zoom00.JPEG\" for i in picture_indices]\npics = [imread(path) for path in picture_paths]\n\nlabels = []\nfor datapoint in data:\n cur_labels = [1, 0] if datapoint[\"label\"] == 0 else [0, 1]\n labels.extend(cur_labels)\n\nncols = 10\nnrows = 10\ngs = gridspec.GridSpec(nrows, ncols, hspace=.25)\n\nfig = plt.figure(figsize=(16, 16))\nfor i in range(ncols):\n for j in range(nrows):\n cur_index = i * ncols + j\n ax = fig.add_subplot(gs[i, j])\n ax.set_title(\"label: \" + str(labels[cur_index]), fontdict={\"fontsize\": 8}, pad=4)\n ax.set_xticklabels([])\n ax.set_yticklabels([])\n plt.imshow(pics[cur_index])\nplt.savefig(\"fig.png\", bbox_inches=\"tight\")\n"
] |
[
[
"matplotlib.pyplot.imshow",
"matplotlib.gridspec.GridSpec",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.figure"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
minister19/Python_snippets
|
[
"69accc4278443271aefc7e354161eac7df2fa283"
] |
[
"pandas/rolling_mean.py"
] |
[
"import matplotlib\nimport matplotlib.pyplot as plt\nimport pandas as pd\nfrom numpy import NaN\n\nis_ipython = 'inline' in matplotlib.get_backend()\nif is_ipython:\n from IPython import display\n\n\ndef plot_single_with_mean(config):\n '''\n config: {\n 'id': unique identifier,\n 'title': '',\n 'xlabel': '',\n 'ylabel': '',\n 'x_data': [],\n 'y_data': [],\n 'm': int\n }\n '''\n fig = plt.figure(config['id'])\n axes = fig.get_axes()\n _data = config['y_data']\n m = config['m']\n if m > 0 and len(_data) > m:\n means = pd.Series(_data).rolling(m).mean()\n print(len(_data), len(means))\n else:\n means = [NaN] * len(_data)\n if len(axes) == 0:\n plt.title(config['title'])\n plt.xlabel(config['xlabel'])\n plt.plot(config['x_data'], config['y_data'], label=config['ylabel'])\n plt.plot(config['x_data'], means, label=config['ylabel'] + '_mean')\n else:\n ax = axes[0]\n line, meanline = ax.get_lines()\n line.set_xdata(config['x_data'])\n line.set_ydata(config['y_data'])\n meanline.set_xdata(config['x_data'])\n meanline.set_ydata(means)\n ax.relim()\n ax.autoscale_view(True, True, True)\n if is_ipython:\n display.clear_output(wait=True)\n display.display(fig)\n else:\n plt.pause(0.2) # pause a bit so that plots are updated\n return axes\n\n\nconfig = {\n 'id': 2,\n 'title': 'single_with_mean',\n 'xlabel': 't',\n 'ylabel': 'l1',\n 'x_data': range(5),\n 'y_data': [1, 3, 6, 7, 9],\n \"m\": 3\n}\nplot_single_with_mean(config)\nplt.show(block=True)\n"
] |
[
[
"pandas.Series",
"matplotlib.pyplot.title",
"matplotlib.get_backend",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.show",
"matplotlib.pyplot.pause",
"matplotlib.pyplot.figure"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
norbertosanchezdichi/TIL
|
[
"2e9719ddd288022f53b094a42679e849bdbcc625"
] |
[
"MachineLearning/Classification/KernelSupportVectorMachine(SVM)Classification/kernel_support_vector_machine_(svm)_classification.py"
] |
[
"# Import libraries\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\n# Import dataset\ndataset = pd.read_csv('Social_Network_Ads.csv')\nX = dataset.iloc[:, :-1].values\nY = dataset.iloc[:, -1].values\n\nprint(f\"X = {X}\")\nprint(f\"Y = {Y}\")\nprint()\n\n# Split Dataset: Training Set and Test Set\nfrom sklearn.model_selection import train_test_split\nX_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size = 0.25, random_state = 0)\n\nprint(f\"X_train = {X_train}\")\nprint(f\"X_test = {X_test}\")\nprint(f\"Y_train = {Y_train}\")\nprint(f\"Y_test = {Y_test}\")\nprint()\n\n# Feature Scaling (done after splitting to avoid information leakage.)\nfrom sklearn.preprocessing import StandardScaler\nstandardScaler = StandardScaler()\nX_train_scaled = standardScaler.fit_transform(X_train)\nX_test_scaled = standardScaler.transform(X_test)\n\nprint(f\"X_train_scaled = {X_train_scaled}\")\nprint(f\"X_test_scaled = {X_test_scaled}\")\nprint()\n\n# Kernel Support Vector Machine (SVM) Classifier\n## Effective for data sets that are non-linearly separable by mapping to a higher dimension.\n## The data set becomes separable by using a line, a hyperplane, or other structure with a dimension less than the mapped higher dimension.\n## Mapping to a higher dimensional space can become computationally expensive.\n## The Kernel Trick using the Gaussian Radial-Basis Function (RBF)\n### Its a function of a vector and a landmark, which is the center of the peak of the function.\n#### Using Euler's number, the function is three-dimensional and uses σ to adjust the radius of the base of the peak.\n### It is used to produce a decision boundary for a non-linearly separable dataset.\n### By choosing the optimal place for the landmark in the non-linear dataset and by tuning σ, the dataset is easily separated into two categories.\n### Multiple kernel functions can be used by adding them up such that multiple landmarks with a specific base radius are found to linearly separate the dataset in 3-D. This allows to create a more complex decision boundary.\n## Types of Kernel Functions\n### Gaussian Radial-Basis Function (RBF) Kernel\n### Sigmoid Kernel\n### Polynomial Kernel\n### mlkernels.readthedocs.io\n### When evaluation which kernel to use, evaluate on new observations (K-Fold Cross Validation) and use different metrics (Accuracy, F1 Score, etc.)\n## Non-Linear Support Vector Regression (SVR)\n### Results in a non-linear separation between the two categories.\n### For example, the intersection of three hyperplanes and the Gaussian RBF function is done in such a way that a non-linear solution projected to the 2-D space results in an accurate separation between the two categories.\n\n# Create and train Kernel Support Vector Machine (SVM) model\n## Use Gaussian Radial-Basis Function (RBF) kernel\nfrom sklearn.svm import SVC\nclassifier = SVC(kernel = 'rbf', random_state = 0)\nclassifier.fit(X_train_scaled, Y_train)\n\n# Predict if-purchase for 30 year old customer earning $87,000\nY_predict = classifier.predict(standardScaler.transform([[30, 87000]]))\n\n# Output prediction salary for a position 6\nprint(f\"Purchase possible from 30 year old earning $87,000? = {Y_predict}.\")\nprint()\n\n# Predict using Kernel Support Vector Machine (SVM) model\nY_predict = classifier.predict(X_test_scaled)\nprint(f\"[Y_predict Y_test] = {np.concatenate((Y_predict.reshape(len(Y_predict), 1), Y_test.reshape(len(Y_test), 1)), axis = 1)}\")\nprint()\n\n# Create Confusion Matrix\n## Not the optimal method to evaluate the performance of the model - K-Fold Cross Validation is preferred and it involves using validation tests.\nfrom sklearn.metrics import confusion_matrix\nprint(f\"Confusion Matrix = {confusion_matrix(Y_test, Y_predict)}\")\nprint()\n\n# Generate Accuracy Score\nfrom sklearn.metrics import accuracy_score\nprint(f\"Accuracy Score = {accuracy_score(Y_test, Y_predict)}\")\n\n# Output Training Set Results\nfrom matplotlib.colors import ListedColormap\nX_set, Y_set = standardScaler.inverse_transform(X_train_scaled), Y_train\nX1, X2 = np.meshgrid(np.arange(start = X_set[:, 0].min() - 10, stop = X_set[:, 0].max() + 10, step = 1),\n np.arange(start = X_set[:, 1].min() - 1000, stop = X_set[:, 1].max() + 1000, step = 1))\nplt.contourf(X1, X2, classifier.predict(standardScaler.transform(np.array([X1.ravel(), X2.ravel()]).T)).reshape(X1.shape),\n alpha = 0.75, cmap = ListedColormap(('red', 'green')))\nplt.xlim(X1.min(), X1.max())\nplt.ylim(X2.min(), X2.max())\nfor i, j in enumerate(np.unique(Y_train)):\n plt.scatter(X_set[Y_train == j, 0], X_set[Y_train == j, 1], c = ListedColormap(('red', 'green'))(i), label = j)\nplt.title('Kernel Support Vector Machine (SVM) (Training Set)')\nplt.xlabel('Age')\nplt.ylabel('Estimated Salary')\nplt.legend()\nplt.savefig('Kernel_Support_Vector_Machine_Training_Set_Results.png')\nplt.clf()\n\n# Output Test Set Results\nfrom matplotlib.colors import ListedColormap\nX_set, Y_set = standardScaler.inverse_transform(X_test_scaled), Y_test\nX1, X2 = np.meshgrid(np.arange(start = X_set[:, 0].min() - 10, stop = X_set[:, 0].max() + 10, step = 1),\n np.arange(start = X_set[:, 1].min() - 1000, stop = X_set[:, 1].max() + 1000, step = 1))\nplt.contourf(X1, X2, classifier.predict(standardScaler.transform(np.array([X1.ravel(), X2.ravel()]).T)).reshape(X1.shape),\n alpha = 0.75, cmap = ListedColormap(('red', 'green')))\nplt.xlim(X1.min(), X1.max())\nplt.ylim(X2.min(), X2.max())\nfor i, j in enumerate(np.unique(Y_test)):\n plt.scatter(X_set[Y_test == j, 0], X_set[Y_test == j, 1], c = ListedColormap(('red', 'green'))(i), label = j)\nplt.title('Kernel Support Vector Machine (SVM) (Test Set)')\nplt.xlabel('Age')\nplt.ylabel('Estimated Salary')\nplt.legend()\nplt.savefig('Kernel_Support_Vector_Machine_Test_Set_Results.png')\nplt.clf()"
] |
[
[
"matplotlib.pyplot.legend",
"pandas.read_csv",
"matplotlib.pyplot.title",
"numpy.unique",
"sklearn.metrics.accuracy_score",
"sklearn.model_selection.train_test_split",
"matplotlib.pyplot.savefig",
"sklearn.metrics.confusion_matrix",
"matplotlib.pyplot.clf",
"matplotlib.colors.ListedColormap",
"sklearn.svm.SVC",
"matplotlib.pyplot.xlabel",
"sklearn.preprocessing.StandardScaler",
"matplotlib.pyplot.ylabel"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
sulaymandesai/pyUSID
|
[
"fa4d152856e4717c92b1fbe34222eb2e1c042707"
] |
[
"examples/beginner/plot_hdf_utils_read.py"
] |
[
"\"\"\"\n================================================================================\n04. Utilities for reading h5USID files\n================================================================================\n\n**Suhas Somnath**\n\n4/18/2018\n\n**This document illustrates the many handy functions in pyUSID.hdf_utils that significantly simplify reading data\nand metadata in Universal Spectroscopy and Imaging Data (USID) HDF5 files (h5USID files)**\n\"\"\"\n########################################################################################################################\n# Introduction\n# -------------\n# The USID model uses a data-centric approach to data analysis and processing meaning that results from all data analysis\n# and processing are written to the same h5 file that contains the recorded measurements. **Hierarchical Data Format\n# (HDF5)** files allow data, whether it is raw measured data or results of analysis, to be stored in multiple datasets within\n# the same file in a tree-like manner. Certain rules and considerations have been made in pyUSID to ensure\n# consistent and easy access to any data.\n#\n# The h5py python package provides great functions to create, read, and manage data in HDF5 files. In\n# ``pyUSID.hdf_utils``, we have added functions that facilitate scientifically relevant, or USID specific\n# functionality such as checking if a dataset is a Main dataset, reshaping to / from the original N dimensional form of\n# the data, etc. Due to the wide breadth of the functions in ``hdf_utils``, the guide for hdf_utils will be split in two\n# parts - one that focuses on functions that facilitate reading and one that facilitate writing of data. The following\n# guide provides examples of how, and more importantly when, to use functions in ``pyUSID.hdf_utils`` for various\n# scenarios.\n#\n# Recommended pre-requisite reading\n# ---------------------------------\n# * `Universal Spectroscopic and Imaging Data (USID) model </../../../USID/usid_model.html>`_\n# * `Crash course on HDF5 and h5py <./plot_h5py.html>`_\n#\n# .. tip::\n# You can download and run this document as a Jupyter notebook using the link at the bottom of this page.\n#\n# Import all necessary packages\n# -------------------------------\n#\n# Before we begin demonstrating the numerous functions in ``pyUSID.hdf_utils``, we need to import the necessary\n# packages. Here are a list of packages besides pyUSID that will be used in this example:\n#\n# * ``h5py`` - to open and close the file\n# * ``wget`` - to download the example data file\n# * ``numpy`` - for numerical operations on arrays in memory\n# * ``matplotlib`` - basic visualization of data\n\nfrom __future__ import print_function, division, unicode_literals\nimport os\n# Warning package in case something goes wrong\nfrom warnings import warn\nimport subprocess\nimport sys\n\n\ndef install(package):\n subprocess.call([sys.executable, \"-m\", \"pip\", \"install\", package])\n# Package for downloading online files:\n\ntry:\n # This package is not part of anaconda and may need to be installed.\n import wget\nexcept ImportError:\n warn('wget not found. Will install with pip.')\n import pip\n install(wget)\n import wget\nimport h5py\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n# import sidpy - supporting package for pyUSID:\ntry:\n import sidpy\nexcept ImportError:\n warn('sidpy not found. Will install with pip.')\n import pip\n install('sidpy')\n import sidpy\n\n# Finally import pyUSID.\ntry:\n import pyUSID as usid\nexcept ImportError:\n warn('pyUSID not found. Will install with pip.')\n import pip\n install('pyUSID')\n import pyUSID as usid\n\n########################################################################################################################\n# In order to demonstrate the many functions in hdf_utils, we will be using a h5USID file containing real\n# experimental data along with results from analyses on the measurement data\n#\n# This scientific dataset\n# -----------------------\n#\n# For this example, we will be working with a **Band Excitation Polarization Switching (BEPS)** dataset acquired from\n# advanced atomic force microscopes. In the much simpler **Band Excitation (BE)** imaging datasets, a single spectrum is\n# acquired at each location in a two dimensional grid of spatial locations. Thus, BE imaging datasets have two\n# position dimensions (``X``, ``Y``) and one spectroscopic dimension (``Frequency`` - against which the spectrum is recorded).\n# The BEPS dataset used in this example has a spectrum for **each combination of** three other parameters (``DC offset``,\n# ``Field``, and ``Cycle``). Thus, this dataset has three new spectral dimensions in addition to ``Frequency``. Hence,\n# this dataset becomes a 2+4 = **6 dimensional dataset**\n#\n# Load the dataset\n# ------------------\n# First, let us download this file from the pyUSID Github project:\n\nurl = 'https://raw.githubusercontent.com/pycroscopy/pyUSID/master/data/BEPS_small.h5'\nh5_path = 'temp.h5'\n_ = wget.download(url, h5_path, bar=None)\n\nprint('Working on:\\n' + h5_path)\n\n########################################################################################################################\n# Next, lets open this HDF5 file in read-only mode. Note that opening the file does not cause the contents to be\n# automatically loaded to memory. Instead, we are presented with objects that refer to specific HDF5 datasets,\n# attributes or groups in the file\n\nh5_path = 'temp.h5'\nh5_f = h5py.File(h5_path, mode='r')\n\n########################################################################################################################\n# Here, ``h5_f`` is an active handle to the open file\n#\n# Inspect HDF5 contents\n# ======================\n#\n# The file contents are stored in a tree structure, just like files on a contemporary computer. The file contains\n# groups (similar to file folders) and datasets (similar to spreadsheets).\n# There are several datasets in the file and these store:\n#\n# * The actual measurement collected from the experiment\n# * Spatial location on the sample where each measurement was collected\n# * Information to support and explain the spectral data collected at each location\n# * Since the USID model stores results from processing and analyses performed on the data in the same h5USID file,\n# these datasets and groups are present as well\n# * Any other relevant ancillary information\n#\n# print_tree()\n# ------------\n# Soon after opening any file, it is often of interest to list the contents of the file. While one can use the open\n# source software HDFViewer developed by the HDF organization, ``pyUSID.hdf_utils`` also has a very handy function -\n# ``print_tree()`` to quickly visualize all the datasets and groups within the file within python.\n\nprint('Contents of the H5 file:')\nsidpy.hdf_utils.print_tree(h5_f)\n\n########################################################################################################################\n# By default, ``print_tree()`` presents a clean tree view of the contents of the group. In this mode, only the group names\n# are underlined. Alternatively, it can print the full paths of each dataset and group, with respect to the group / file\n# of interest, by setting the ``rel_paths``\n# keyword argument. ``print_tree()`` could also be used to display the contents of and HDF5 group instead of complete HDF5\n# file as we have done above. Lets configure it to print the relative paths of all objects within the ``Channel_000``\n# group:\n\nsidpy.hdf_utils.print_tree(h5_f['/Measurement_000/Channel_000/'], rel_paths=True)\n\n########################################################################################################################\n# Finally, ``print_tree()`` can also be configured to only print USID Main datasets besides Group objects using the\n# ``main_dsets_only`` option\n\nsidpy.hdf_utils.print_tree(h5_f, main_dsets_only=True)\n\n########################################################################################################################\n# Accessing Attributes\n# ==================================\n#\n# HDF5 datasets and groups can also store metadata such as experimental parameters. These metadata can be text,\n# numbers, small lists of numbers or text etc. These metadata can be very important for understanding the datasets\n# and guide the analysis routines.\n#\n# While one could use the basic ``h5py`` functionality to access attributes, one would encounter a lot of problems when\n# attempting to decode attributes whose values were strings or lists of strings due to some issues in ``h5py``. This problem\n# has been demonstrated in our `primer to HDF5 and h5py <./plot_h5py.html>`_. Instead of using the basic functionality of ``h5py``, we recommend always\n# using the functions in pyUSID that reliably and consistently work for any kind of attribute for any version of\n# python:\n#\n# get_attributes()\n# ----------------\n#\n# ``get_attributes()`` is a very handy function that returns all or a specified set of attributes in an HDF5 object. If no\n# attributes are explicitly requested, all attributes in the object are returned:\n\nfor key, val in sidpy.hdf_utils.get_attributes(h5_f).items():\n print('{} : {}'.format(key, val))\n\n########################################################################################################################\n# ``get_attributes()`` is also great for only getting selected attributes. For example, if we only cared about the user\n# and project related attributes, we could manually request for any that we wanted:\n\nproj_attrs = sidpy.hdf_utils.get_attributes(h5_f, ['project_name', 'project_id', 'user_name'])\nfor key, val in proj_attrs.items():\n print('{} : {}'.format(key, val))\n\n########################################################################################################################\n# get_attr()\n# ----------\n#\n# If we are sure that we only wanted a specific attribute, we could instead use ``get_attr()`` as:\n\n\nprint(sidpy.hdf_utils.get_attr(h5_f, 'user_name'))\n\n########################################################################################################################\n# check_for_matching_attrs()\n# --------------------------\n# Consider the scenario where we are have several HDF5 files or Groups or datasets and we wanted to check each one to\n# see if they have the certain metadata / attributes. ``check_for_matching_attrs()`` is one very handy function that\n# simplifies the comparision operation.\n#\n# For example, let us check if this file was authored by ``John Doe``:\n\nprint(usid.hdf_utils.check_for_matching_attrs(h5_f, new_parms={'user_name': 'John Doe'}))\n\n########################################################################################################################\n# Finding datasets and groups\n# ============================\n#\n# There are numerous ways to search for and access datasets and groups in H5 files using the basic functionalities\n# of h5py. pyUSID.hdf_utils contains several functions that simplify common searching / lookup operations as part of\n# scientific workflows.\n#\n# find_dataset()\n# ----------------\n#\n# The ``find_dataset()`` function will return all datasets that whose names contain the provided string. In this case, we\n# are looking for any datasets containing the string ``UDVS`` in their names. If you look above, there are two datasets\n# (UDVS and UDVS_Indices) that match this condition:\n\nudvs_dsets_2 = usid.hdf_utils.find_dataset(h5_f, 'UDVS')\nfor item in udvs_dsets_2:\n print(item)\n\n########################################################################################################################\n# As you might know by now, h5USID files contain three kinds of datasets:\n#\n# * ``Main`` datasets that contain data recorded / computed at multiple spatial locations.\n# * ``Ancillary`` datasets that support a main dataset\n# * Other datasets\n#\n# For more information, please refer to the documentation on the USID model.\n#\n# check_if_main()\n# ---------------\n# ``check_if_main()`` is a very handy function that helps distinguish between ``Main`` datasets and other objects\n# (``Ancillary`` datasets, other datasets, Groups etc.). Lets apply this function to see which of the objects within the\n# ``Channel_000`` Group are ``Main`` datasets:\n\nh5_chan_group = h5_f['Measurement_000/Channel_000']\n\n# We will prepare two lists - one of objects that are ``main`` and one of objects that are not\n\nnon_main_objs = []\nmain_objs = []\nfor key, val in h5_chan_group.items():\n if usid.hdf_utils.check_if_main(val):\n main_objs.append(key)\n else:\n non_main_objs.append(key)\n\n# Now we simply print the names of the items in each list\n\nprint('Main Datasets:')\nprint('----------------')\nfor item in main_objs:\n print(item)\nprint('\\nObjects that were not Main datasets:')\nprint('--------------------------------------')\nfor item in non_main_objs:\n print(item)\n\n########################################################################################################################\n# The above script allowed us to distinguish Main datasets from all other objects only within the Group named\n# ``Channel_000``.\n#\n# get_all_main()\n# --------------\n# What if we want to quickly find all ``Main`` datasets even within the sub-Groups of ``Channel_000``? To do this, we have a\n# very handy function called - ``get_all_main()``:\n\nmain_dsets = usid.hdf_utils.get_all_main(h5_chan_group)\nfor dset in main_dsets:\n print(dset)\n print('--------------------------------------------------------------------')\n\n########################################################################################################################\n# The datasets above show that the file contains three main datasets. Two of these datasets are contained in a HDF5\n# Group called ``Raw_Data-SHO_Fit_000`` meaning that they are results of an operation called ``SHO_Fit`` performed on the\n# ``Main`` dataset - ``Raw_Data``. The first of the three main datasets is indeed the ``Raw_Data`` dataset from which the\n# latter two datasets (``Fit`` and ``Guess``) were derived.\n#\n# The USID model allows the same operation, such as ``SHO_Fit``, to be performed on the same dataset (``Raw_Data``),\n# multiple\n# times. Each time the operation is performed, a new HDF5 Group is created to hold the new results. Often, we may\n# want to perform a few operations such as:\n#\n# * Find the (source / main) dataset from which certain results were derived\n# * Check if a particular operation was performed on a main dataset\n# * Find all groups corresponding to a particular operation (e.g. - ``SHO_Fit``) being applied to a Main dataset\n#\n# ``hdf_utils`` has a few handy functions for many of these use cases.\n#\n# find_results_groups()\n# ----------------------\n# First, lets show that ``find_results_groups()`` finds all Groups containing the results of a ``SHO_Fit`` operation applied\n# to ``Raw_Data``:\n\n# First get the dataset corresponding to Raw_Data\nh5_raw = h5_chan_group['Raw_Data']\n\noperation = 'SHO_Fit'\nprint('Instances of operation \"{}\" applied to dataset named \"{}\":'.format(operation, h5_raw.name))\nh5_sho_group_list = usid.hdf_utils.find_results_groups(h5_raw, operation)\nprint(h5_sho_group_list)\n\n########################################################################################################################\n# As expected, the ``SHO_Fit`` operation was performed on ``Raw_Data`` dataset only once, which is why\n# ``find_results_groups()`` returned only one HDF5 Group - ``SHO_Fit_000``.\n#\n# check_for_old()\n# -----------------\n#\n# Often one may want to check if a certain operation was performed on a dataset with the very same parameters to\n# avoid recomputing the results. ``hdf_utils.check_for_old()`` is a very handy function that compares parameters (a\n# dictionary) for a new / potential operation against the metadata (attributes) stored in each existing results group\n# (HDF5 groups whose name starts with ``Raw_Data-SHO_Fit`` in this case). Before we demonstrate ``check_for_old()``, lets\n# take a look at the attributes stored in the existing results groups:\n\nprint('Parameters already used for computing SHO_Fit on Raw_Data in the file:')\nfor key, val in sidpy.hdf_utils.get_attributes(h5_chan_group['Raw_Data-SHO_Fit_000']).items():\n print('{} : {}'.format(key, val))\n\n########################################################################################################################\n# Now, let us check for existing results where the ``SHO_fit_method`` attribute matches an existing value and a new value:\n\nprint('Checking to see if SHO Fits have been computed on the raw dataset:')\nprint('\\nUsing \"pycroscopy BESHO\":')\nprint(usid.hdf_utils.check_for_old(h5_raw, 'SHO_Fit',\n new_parms={'SHO_fit_method': 'pycroscopy BESHO'}))\nprint('\\nUsing \"alternate technique\"')\nprint(usid.hdf_utils.check_for_old(h5_raw, 'SHO_Fit',\n new_parms={'SHO_fit_method': 'alternate technique'}))\n\n########################################################################################################################\n# Clearly, while find_results_groups() returned any and all groups corresponding to ``SHO_Fit`` being applied to\n# ``Raw_Data``, ``check_for_old()`` only returned the group(s) where the operation was performed using the same specified\n# parameters (``sho_fit_method`` in this case).\n#\n# Note that ``check_for_old()`` performs two operations - search for all groups with the matching nomenclature and then\n# compare the attributes. ``check_for_matching_attrs()`` is the handy function, that enables the latter operation of\n# comparing a giving dictionary of parameters against attributes in a given object.\n#\n# get_source_dataset()\n# ---------------------\n# ``hdf_utils.get_source_dataset()`` is a very handy function for the inverse scenario where we are interested in finding\n# the source dataset from which the known result was derived:\n\nh5_sho_group = h5_sho_group_list[0]\nprint('Datagroup containing the SHO fits:')\nprint(h5_sho_group)\nprint('\\nDataset on which the SHO Fit was computed:')\nh5_source_dset = usid.hdf_utils.get_source_dataset(h5_sho_group)\nprint(h5_source_dset)\n\n########################################################################################################################\n# Since the source dataset is always a ``Main`` dataset, ``get_source_dataset()`` results a ``USIDataset`` object instead of\n# a regular ``HDF5 Dataset`` object.\n#\n# Note that ``hdf_utils.get_source_dataset()`` and ``find_results_groups()`` rely on the USID rule that results of an\n# operation be stored in a Group named ``Source_Dataset_Name-Operation_Name_00x``.\n\n# get_auxiliary_datasets()\n# -------------------------\n# The association of datasets and groups with one another provides a powerful mechanism for conveying (richer)\n# information. One way to associate objects with each other is to store the reference of an object as an attribute of\n# another. This is precisely the capability that is leveraged to turn Central datasets into USID Main Datasets or\n# ``USIDatasets``. USIDatasets need to have four attributes that are references to the ``Position`` and ``Spectroscopic``\n# ``ancillary`` datasets. Note, that USID does not restrict or preclude the storage of other relevant datasets as\n# attributes of another dataset.\n#\n# For example, the ``Raw_Data`` dataset appears to contain several attributes whose keys / names match the names of\n# datasets we see above and values all appear to be HDF5 object references:\n\nfor key, val in sidpy.hdf_utils.get_attributes(h5_raw).items():\n print('{} : {}'.format(key, val))\n\n########################################################################################################################\n# As the name suggests, these HDF5 object references are references or addresses to datasets located elsewhere in the\n# file. Conventionally, one would need to apply this reference to the file handle to get the actual HDF5 Dataset / Group\n# object.\n#\n# ``get_auxiliary_datasets()`` simplifies this process by directly retrieving the actual Dataset / Group associated with\n# the attribute. Thus, we would be able to get a reference to the ``Bin_Frequencies`` Dataset via:\n\nh5_obj = sidpy.hdf_utils.get_auxiliary_datasets(h5_raw, 'Bin_Frequencies')[0]\nprint(h5_obj)\n# Lets prove that this object is the same as the 'Bin_Frequencies' object that can be directly addressed:\nprint(h5_obj == h5_f['/Measurement_000/Channel_000/Bin_Frequencies'])\n\n########################################################################################################################\n# Accessing Ancillary Datasets\n# =============================\n# One of the major benefits of h5USID is its ability to handle large multidimensional datasets at ease. ``Ancillary``\n# datasets serve as the keys or legends for explaining the dimensionality, reshape-ability, etc. of a dataset. There are\n# several functions in hdf_utils that simplify many common operations on ancillary datasets.\n#\n# Before we demonstrate the several useful functions in hdf_utils, lets access the position and spectroscopic ancillary\n# datasets using the ``get_auxiliary_datasets()`` function we used above:\n\ndset_list = sidpy.hdf_utils.get_auxiliary_datasets(h5_raw, ['Position_Indices', 'Position_Values',\n 'Spectroscopic_Indices', 'Spectroscopic_Values'])\nh5_pos_inds, h5_pos_vals, h5_spec_inds, h5_spec_vals = dset_list\n\n########################################################################################################################\n# As mentioned above, this is indeed a six dimensional dataset with two position dimensions and four spectroscopic\n# dimensions. The ``Field`` and ``Cycle`` dimensions do not have any units since they are dimensionless unlike the other\n# dimensions.\n#\n# get_dimensionality()\n# ---------------------\n# Now lets find out the number of steps in each of those dimensions using another handy function called\n# ``get_dimensionality()``:\n\npos_dim_sizes = usid.hdf_utils.get_dimensionality(h5_pos_inds)\nspec_dim_sizes = usid.hdf_utils.get_dimensionality(h5_spec_inds)\npos_dim_names = sidpy.hdf_utils.get_attr(h5_pos_inds, 'labels')\nspec_dim_names = sidpy.hdf_utils.get_attr(h5_spec_inds, 'labels')\n\nprint('Size of each Position dimension:')\nfor name, length in zip(pos_dim_names, pos_dim_sizes):\n print('{} : {}'.format(name, length))\nprint('\\nSize of each Spectroscopic dimension:')\nfor name, length in zip(spec_dim_names, spec_dim_sizes):\n print('{} : {}'.format(name, length))\n\n########################################################################################################################\n# get_sort_order()\n# ----------------\n#\n# In a few (rare) cases, the spectroscopic / position dimensions are not arranged in descending order of rate of change.\n# In other words, the dimensions in these ancillary matrices are not arranged from fastest-varying to slowest.\n# To account for such discrepancies, ``hdf_utils`` has a very handy function that goes through each of the columns or\n# rows in the ancillary indices matrices and finds the order in which these dimensions vary.\n#\n# Below we illustrate an example of sorting the names of the spectroscopic dimensions from fastest to slowest in\n# the BEPS data file:\n\nspec_sort_order = usid.hdf_utils.get_sort_order(h5_spec_inds)\nprint('Rate of change of spectroscopic dimensions: {}'.format(spec_sort_order))\nprint('\\nSpectroscopic dimensions arranged as is:')\nprint(spec_dim_names)\nsorted_spec_labels = np.array(spec_dim_names)[np.array(spec_sort_order)]\nprint('\\nSpectroscopic dimensions arranged from fastest to slowest')\nprint(sorted_spec_labels)\n\n########################################################################################################################\n# get_unit_values()\n# -----------------\n#\n# When visualizing the data it is essential to plot the data against appropriate values on the X, Y, or Z axes.\n# Recall that by definition that the values over which each dimension is varied, are repeated and tiled over the entire\n# position or spectroscopic dimension of the dataset. Thus, if we had just the bias waveform repeated over two cycles,\n# spectroscopic values would contain the bias waveform tiled twice and the cycle numbers repeated as many times as the\n# number of points in the bias waveform. Therefore, extracting the bias waveform or the cycle numbers from the ancillary\n# datasets is not trivial. This problem is especially challenging for multidimensional datasets such as the one under\n# consideration. Fortunately, ``hdf_utils`` has a very handy function for this as well:\n\npos_unit_values = usid.hdf_utils.get_unit_values(h5_pos_inds, h5_pos_vals)\nprint('Position unit values:')\nfor key, val in pos_unit_values.items():\n print('{} : {}'.format(key, val))\nspec_unit_values = usid.hdf_utils.get_unit_values(h5_spec_inds, h5_spec_vals)\n\n########################################################################################################################\n# Since the spectroscopic dimensions are quite complicated, lets visualize the results from ``get_unit_values()``:\n\nfig, axes = plt.subplots(ncols=2, nrows=2, figsize=(6.5, 6))\nfor axis, name in zip(axes.flat, spec_dim_names):\n axis.set_title(name)\n axis.plot(spec_unit_values[name], 'o-')\n\nfig.suptitle('Spectroscopic Dimensions', fontsize=16, y=1.05)\nfig.tight_layout()\n\n########################################################################################################################\n# Reshaping Data\n# ==============\n#\n# reshape_to_n_dims()\n# -------------------\n#\n# The USID model stores N dimensional datasets in a flattened 2D form of position x spectral values. It can become\n# challenging to retrieve the data in its original N-dimensional form, especially for multidimensional datasets such as\n# the one we are working on. Fortunately, all the information regarding the dimensionality of the dataset are contained\n# in the spectral and position ancillary datasets. ``reshape_to_n_dims()`` is a very useful function that can help\n# retrieve the N-dimensional form of the data using a simple function call:\n\nndim_form, success, labels = usid.hdf_utils.reshape_to_n_dims(h5_raw, get_labels=True)\nif success:\n print('Succeeded in reshaping flattened 2D dataset to N dimensions')\n print('Shape of the data in its original 2D form')\n print(h5_raw.shape)\n print('Shape of the N dimensional form of the dataset:')\n print(ndim_form.shape)\n print('And these are the dimensions')\n print(labels)\nelse:\n print('Failed in reshaping the dataset')\n\n########################################################################################################################\n# reshape_from_n_dims()\n# -----------------------\n# The inverse problem of reshaping an N dimensional dataset back to a 2D dataset (let's say for the purposes of\n# multivariate analysis or storing into h5USID files) is also easily solved using another handy\n# function - ``reshape_from_n_dims()``:\n\ntwo_dim_form, success = usid.hdf_utils.reshape_from_n_dims(ndim_form, h5_pos=h5_pos_inds, h5_spec=h5_spec_inds)\nif success:\n print('Shape of flattened two dimensional form')\n print(two_dim_form.shape)\nelse:\n print('Failed in flattening the N dimensional dataset')\n\n########################################################################################################################\n# Close and delete the h5_file\nh5_f.close()\nos.remove(h5_path)\n"
] |
[
[
"numpy.array",
"matplotlib.pyplot.subplots"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
t-b/pynwb
|
[
"b58e7b003247485120380360bb112bc6b22c7e60"
] |
[
"docs/gallery/general/linking_data.py"
] |
[
"'''\nModular Data Storage using External Files\n===========================================\n\nPyNWB supports linking between files using external links.\n'''\n\n####################\n# Example Use Case: Integrating data from multiple files\n# ---------------------------------------------------------\n#\n# NBWContainer classes (e.g., :py:meth:`~pynwb.base.TimeSeries`) support the integration of data stored in external\n# HDF5 files with NWB data files via external links. To make things more concrete, let's look at the following use\n# case. We want to simultaneously record multiple data steams during data acquisition. Using the concept of external\n# links allows us to save each data stream to an external HDF5 files during data acquisition and to\n# afterwards link the data into a single NWB:N file. In this case, each recording becomes represented by a\n# separate file-system object that can be set as read-only once the experiment is done. In the following\n# we are using :py:meth:`~pynwb.base.TimeSeries` as an example, but the same approach works for other\n# NWBContainers as well.\n#\n#\n\n####################\n# .. tip::\n#\n# The same strategies we use here for creating External Links also apply to Soft Links.\n# The main difference between soft and external links is that soft links point to other\n# objects within the same file while external links point to objects in external files.\n#\n\n####################\n# .. tip::\n#\n# In the case of :py:meth:`~pynwb.base.TimeSeries`, the uncorrected time stamps generated by the acquisition\n# system can be stored (or linked) in the *sync* group. In the NWB:N format, hardware-recorded time data\n# must then be corrected to a common time base (e.g., timestamps from all hardware sources aligned) before\n# it can be included in the *timestamps* of the *TimeSeries* This means, in the case\n# of :py:meth:`~pynwb.base.TimeSeries` we need to be careful that we are not including data with incompatible\n# timestamps in the same file when using external links.\n#\n\n####################\n# .. warning::\n#\n# External links can become stale/break. Since external links are pointing to data in other files\n# external links may become invalid any time files are modified on the file system, e.g., renamed,\n# moved or access permissions are changed.\n#\n\n####################\n# Creating test data\n# ---------------------------\n#\n# In the following we are creating 2 TimeSeries each written to a separate file. In the following we\n# then show how we can integrate these files into a single NWBFile.\n\n\nfrom datetime import datetime\nfrom dateutil.tz import tzlocal\nfrom pynwb import NWBFile\nfrom pynwb import TimeSeries\nfrom pynwb import NWBHDF5IO\nimport numpy as np\n\n# Create the base data\nstart_time = datetime(2017, 4, 3, 11, tzinfo=tzlocal())\ncreate_date = datetime(2017, 4, 15, 12, tzinfo=tzlocal())\ndata = np.arange(1000).reshape((100, 10))\ntimestamps = np.arange(100)\nfilename1 = 'external1_example.nwb'\nfilename2 = 'external2_example.nwb'\nfilename3 = 'external_linkcontainer_example.nwb'\nfilename4 = 'external_linkdataset_example.nwb'\n\n# Create the first file\nnwbfile1 = NWBFile(session_description='demonstrate external files',\n identifier='NWBE1',\n session_start_time=start_time,\n file_create_date=create_date)\n# Create the second file\ntest_ts1 = TimeSeries(name='test_timeseries1',\n data=data,\n unit='SIunit',\n timestamps=timestamps)\nnwbfile1.add_acquisition(test_ts1)\n# Write the first file\nio = NWBHDF5IO(filename1, 'w')\nio.write(nwbfile1)\nio.close()\n\n# Create the second file\nnwbfile2 = NWBFile(session_description='demonstrate external files',\n identifier='NWBE2',\n session_start_time=start_time,\n file_create_date=create_date)\n# Create the second file\ntest_ts2 = TimeSeries(name='test_timeseries2',\n data=data,\n unit='SIunit',\n timestamps=timestamps)\nnwbfile2.add_acquisition(test_ts2)\n# Write the second file\nio = NWBHDF5IO(filename2, 'w')\nio.write(nwbfile2)\nio.close()\n\n\n#####################\n# Linking to select datasets\n# --------------------------\n#\n\n####################\n# Step 1: Create the new NWBFile\n# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\n# Create the first file\nnwbfile4 = NWBFile(session_description='demonstrate external files',\n identifier='NWBE4',\n session_start_time=start_time,\n file_create_date=create_date)\n\n\n####################\n# Step 2: Get the dataset you want to link to\n# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n# Now let's open our test files and retrieve our timeseries.\n#\n\n# Get the first timeseries\nio1 = NWBHDF5IO(filename1)\nnwbfile1 = io1.read()\ntimeseries_1 = nwbfile1.get_acquisition('test_timeseries1')\ntimeseries_1_data = timeseries_1.data\n\n####################\n# Step 3: Create the object you want to link to the data\n# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n#\n# To link to the dataset we can simply assign the data object (here `` timeseries_1.data``) to a new ``TimeSeries``\n\n# Create a new timeseries that links to our data\ntest_ts4 = TimeSeries(name='test_timeseries4',\n data=timeseries_1_data, # <-------\n unit='SIunit',\n timestamps=timestamps)\nnwbfile4.add_acquisition(test_ts4)\n\n####################\n# In the above case we did not make it explicit how we want to handle the data from\n# our TimeSeries, this means that :py:class:`~pynwb.NWBHDF5IO` will need to\n# determine on write how to treat the dataset. We can make this explicit and customize this\n# behavior on a per-dataset basis by wrapping our dataset using\n# :py:meth:`~pynwb.form.backends.hdf5.h5_utils.H5DataIO`\n\nfrom pynwb.form.backends.hdf5.h5_utils import H5DataIO\n\n# Create another timeseries that links to the same data\ntest_ts5 = TimeSeries(name='test_timeseries5',\n data=H5DataIO(data=timeseries_1_data, # <-------\n link_data=True), # <-------\n unit='SIunit',\n timestamps=timestamps)\nnwbfile4.add_acquisition(test_ts5)\n\n####################\n# Step 4: Write the data\n# ^^^^^^^^^^^^^^^^^^^^^^^\n#\nfrom pynwb import NWBHDF5IO\n\nio4 = NWBHDF5IO(filename4, 'w')\nio4.write(nwbfile4,\n link_data=True) # <-------- Specify default behavior to link rather than copy data\nio4.close()\n\n#####################\n# .. note::\n#\n# In the case of TimeSeries one advantage of linking to just the main dataset is that we can now\n# use our own timestamps in case the timestamps in the original file are not aligned with the\n# clock of the NWBFile we are creating. In this way we can use the linking to \"re-align\" different\n# TimeSeries without having to copy the main data.\n\n\n####################\n# Linking to whole Containers\n# ---------------------------\n#\n# Appending to files and linking is made possible by passing around the same\n# :py:class:`~pynwb.form.build.map.BuildManager`. You can get a manager to pass around\n# using the :py:meth:`~pynwb.get_manager` function.\n#\n\nfrom pynwb import get_manager\n\nmanager = get_manager()\n\n####################\n# .. tip::\n#\n# You can pass in extensions to :py:meth:`~pynwb.get_manager` using the *extensions* argument.\n\n####################\n# Step 1: Get the container object you want to link to\n# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n# Now let's open our test files and retrieve our timeseries.\n#\n\n# Get the first timeseries\nio1 = NWBHDF5IO(filename1, manager=manager)\nnwbfile1 = io1.read()\ntimeseries_1 = nwbfile1.get_acquisition('test_timeseries1')\n\n# Get the second timeseries\nio2 = NWBHDF5IO(filename2, manager=manager)\nnwbfile2 = io2.read()\ntimeseries_2 = nwbfile2.get_acquisition('test_timeseries2')\n\n####################\n# Step 2: Add the container to another NWBFile\n# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n# To intergrate both :py:meth:`~pynwb.base.TimeSeries` into a single file we simply create a new\n# :py:meth:`~pynwb.file.NWBFile` and our existing :py:meth:`~pynwb.base.TimeSeries` to it. PyNWB's\n# :py:meth:`~pynwb.NWBHDF5IO` backend then automatically detects that the TimeSeries have already\n# been written to another file and will create external links for us.\n#\n\n# Create a new NWBFile that links to the external timeseries\nnwbfile3 = NWBFile(session_description='demonstrate external files',\n identifier='NWBE3',\n session_start_time=start_time,\n file_create_date=create_date)\nnwbfile3.add_acquisition(timeseries_1) # <--------\nnwbfile3.add_acquisition(timeseries_2) # <--------\n\n# Write our third file that includes our two timeseries as external links\nio3 = NWBHDF5IO(filename3, 'w', manager=manager)\nio3.write(nwbfile3)\nio3.close()\n\n####################\n# Creating a single file for sharing\n# -----------------------------------\n#\n# External links are convenient but to share data we may want to hand a single file with all the\n# data to our collaborator rather than having to collect all relevant files. To do this,\n# :py:class:`~pynwb.form.backends.hdf5.h5tools.HDF5IO` (and in turn :py:class:`~pynwb.NWBHDF5IO`)\n# provide the convenience function :py:func:`~pynwb.form.backends.hdf5.h5tools.HDF5IO.copy_file`\n"
] |
[
[
"numpy.arange"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
YeongHyeon/CVAE
|
[
"5db95ea6a1a01475cd0356e31bf593f09b5479c2"
] |
[
"source/datamanager.py"
] |
[
"import numpy as np\nimport tensorflow as tf\n\nfrom sklearn.utils import shuffle\n\nclass Dataset(object):\n\n def __init__(self, normalize=True):\n\n print(\"\\nInitializing Dataset...\")\n\n self.normalize = normalize\n\n (x_tr, y_tr), (x_te, y_te) = tf.keras.datasets.mnist.load_data()\n self.x_tr, self.y_tr = x_tr, y_tr\n self.x_te, self.y_te = x_te, y_te\n\n # Type casting from uint8 to float32\n self.x_tr = np.ndarray.astype(self.x_tr, np.float32)\n self.x_te = np.ndarray.astype(self.x_te, np.float32)\n\n self.num_tr, self.num_te = self.x_tr.shape[0], self.x_te.shape[0]\n self.idx_tr, self.idx_te = 0, 0\n\n print(\"Number of data\\nTraining: %d, Test: %d\\n\" %(self.num_tr, self.num_te))\n\n x_sample, y_sample = self.x_te[0], self.y_te[0]\n self.height = x_sample.shape[0]\n self.width = x_sample.shape[1]\n try: self.channel = x_sample.shape[2]\n except: self.channel = 1\n\n self.min_val, self.max_val = x_sample.min(), x_sample.max()\n self.num_class = (y_te.max()+1)\n\n print(\"Information of data\")\n print(\"Shape Height: %d, Width: %d, Channel: %d\" %(self.height, self.width, self.channel))\n print(\"Value Min: %.3f, Max: %.3f\" %(self.min_val, self.max_val))\n print(\"Class %d\" %(self.num_class))\n print(\"Normalization: %r\" %(self.normalize))\n if(self.normalize): print(\"(from %.3f-%.3f to %.3f-%.3f)\" %(self.min_val, self.max_val, 0, 1))\n\n def reset_idx(self): self.idx_tr, self.idx_te = 0, 0\n\n def next_train(self, batch_size=1, fix=False):\n\n start, end = self.idx_tr, self.idx_tr+batch_size\n x_tr, y_tr = self.x_tr[start:end], self.y_tr[start:end]\n x_tr = np.expand_dims(x_tr, axis=3)\n\n terminator = False\n if(end >= self.num_tr):\n terminator = True\n self.idx_tr = 0\n self.x_tr, self.y_tr = shuffle(self.x_tr, self.y_tr)\n else: self.idx_tr = end\n\n if(fix): self.idx_tr = start\n\n if(x_tr.shape[0] != batch_size):\n x_tr, y_tr = self.x_tr[-1-batch_size:-1], self.y_tr[-1-batch_size:-1]\n x_tr = np.expand_dims(x_tr, axis=3)\n\n if(self.normalize):\n min_x, max_x = x_tr.min(), x_tr.max()\n x_tr = (x_tr - min_x) / (max_x - min_x)\n\n return x_tr, y_tr, terminator\n\n def next_test(self, batch_size=1):\n\n start, end = self.idx_te, self.idx_te+batch_size\n x_te, y_te = self.x_te[start:end], self.y_te[start:end]\n x_te = np.expand_dims(x_te, axis=3)\n\n terminator = False\n if(end >= self.num_te):\n terminator = True\n self.idx_te = 0\n else: self.idx_te = end\n\n if(self.normalize):\n min_x, max_x = x_te.min(), x_te.max()\n x_te = (x_te - min_x) / (max_x - min_x)\n\n return x_te, y_te, terminator\n"
] |
[
[
"sklearn.utils.shuffle",
"numpy.expand_dims",
"tensorflow.keras.datasets.mnist.load_data",
"numpy.ndarray.astype"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"2.2",
"2.3",
"2.4",
"2.5",
"2.6"
]
}
] |
bdpedigo/molesq
|
[
"297c08dc0a41390dda5e8e5fc1bda612d7c417c0"
] |
[
"tests/test_utils.py"
] |
[
"import numpy as np\n\nfrom molesq.utils import grid_field\n\n\ndef test_grid_field():\n test, shape = grid_field([0, 0], [2, 2])\n ref = np.array(\n [\n [0, 0],\n [0, 1],\n [1, 0],\n [1, 1],\n ]\n ).astype(test.dtype)\n assert np.allclose(test, ref)\n assert shape == (2, 2)\n"
] |
[
[
"numpy.array",
"numpy.allclose"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Saduf2019/tensorflow-1
|
[
"f210b2b2f8489ffe97edac886238242288950439",
"f210b2b2f8489ffe97edac886238242288950439"
] |
[
"tensorflow/python/ops/ragged/ragged_tensor.py",
"tensorflow/python/ops/ragged/ragged_tensor_supported_values_test.py"
] |
[
"# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Classes for storing ragged tensors and their values.\"\"\"\n\nimport functools\nimport operator\n\nimport typing\nimport numpy as np\n\nfrom tensorflow.python import tf2\nfrom tensorflow.python.client import session\nfrom tensorflow.python.framework import composite_tensor\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import sparse_tensor\nfrom tensorflow.python.framework import tensor_shape\nfrom tensorflow.python.framework import tensor_spec\nfrom tensorflow.python.framework import tensor_util\nfrom tensorflow.python.framework import type_spec\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import check_ops\nfrom tensorflow.python.ops import control_flow_ops\nfrom tensorflow.python.ops import gen_ragged_conversion_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops.ragged import ragged_config\nfrom tensorflow.python.ops.ragged import ragged_tensor_value\nfrom tensorflow.python.ops.ragged import ragged_util\nfrom tensorflow.python.ops.ragged.row_partition import RowPartition\nfrom tensorflow.python.types import core as core_types\nfrom tensorflow.python.types import internal as internal_types\nfrom tensorflow.python.util import dispatch\nfrom tensorflow.python.util.tf_export import tf_export\nfrom tensorflow.tools.docs import doc_controls\n\n# pylint: disable=protected-access\n_convert_row_partition = RowPartition._convert_row_partition\n# pylint: enable=protected-access\n\n#===============================================================================\n# RaggedTensor\n#===============================================================================\n\n\n@tf_export(\"RaggedTensor\")\nclass RaggedTensor(composite_tensor.CompositeTensor,\n internal_types.NativeObject):\n \"\"\"Represents a ragged tensor.\n\n A `RaggedTensor` is a tensor with one or more *ragged dimensions*, which are\n dimensions whose slices may have different lengths. For example, the inner\n (column) dimension of `rt=[[3, 1, 4, 1], [], [5, 9, 2], [6], []]` is ragged,\n since the column slices (`rt[0, :]`, ..., `rt[4, :]`) have different lengths.\n Dimensions whose slices all have the same length are called *uniform\n dimensions*. The outermost dimension of a `RaggedTensor` is always uniform,\n since it consists of a single slice (and so there is no possibility for\n differing slice lengths).\n\n The total number of dimensions in a `RaggedTensor` is called its *rank*,\n and the number of ragged dimensions in a `RaggedTensor` is called its\n *ragged-rank*. A `RaggedTensor`'s ragged-rank is fixed at graph creation\n time: it can't depend on the runtime values of `Tensor`s, and can't vary\n dynamically for different session runs.\n\n Note that the `__init__` constructor is private. Please use one of the\n following methods to construct a `RaggedTensor`:\n\n * `tf.RaggedTensor.from_row_lengths`\n * `tf.RaggedTensor.from_value_rowids`\n * `tf.RaggedTensor.from_row_splits`\n * `tf.RaggedTensor.from_row_starts`\n * `tf.RaggedTensor.from_row_limits`\n * `tf.RaggedTensor.from_nested_row_splits`\n * `tf.RaggedTensor.from_nested_row_lengths`\n * `tf.RaggedTensor.from_nested_value_rowids`\n\n ### Potentially Ragged Tensors\n\n Many ops support both `Tensor`s and `RaggedTensor`s\n (see [tf.ragged](https://www.tensorflow.org/api_docs/python/tf/ragged) for a\n full listing). The term \"potentially ragged tensor\" may be used to refer to a\n tensor that might be either a `Tensor` or a `RaggedTensor`. The ragged-rank\n of a `Tensor` is zero.\n\n ### Documenting RaggedTensor Shapes\n\n When documenting the shape of a RaggedTensor, ragged dimensions can be\n indicated by enclosing them in parentheses. For example, the shape of\n a 3-D `RaggedTensor` that stores the fixed-size word embedding for each\n word in a sentence, for each sentence in a batch, could be written as\n `[num_sentences, (num_words), embedding_size]`. The parentheses around\n `(num_words)` indicate that dimension is ragged, and that the length\n of each element list in that dimension may vary for each item.\n\n ### Component Tensors\n\n Internally, a `RaggedTensor` consists of a concatenated list of values that\n are partitioned into variable-length rows. In particular, each `RaggedTensor`\n consists of:\n\n * A `values` tensor, which concatenates the variable-length rows into a\n flattened list. For example, the `values` tensor for\n `[[3, 1, 4, 1], [], [5, 9, 2], [6], []]` is `[3, 1, 4, 1, 5, 9, 2, 6]`.\n\n * A `row_splits` vector, which indicates how those flattened values are\n divided into rows. In particular, the values for row `rt[i]` are stored\n in the slice `rt.values[rt.row_splits[i]:rt.row_splits[i+1]]`.\n\n Example:\n\n >>> print(tf.RaggedTensor.from_row_splits(\n ... values=[3, 1, 4, 1, 5, 9, 2, 6],\n ... row_splits=[0, 4, 4, 7, 8, 8]))\n <tf.RaggedTensor [[3, 1, 4, 1], [], [5, 9, 2], [6], []]>\n\n ### Alternative Row-Partitioning Schemes\n\n In addition to `row_splits`, ragged tensors provide support for five other\n row-partitioning schemes:\n\n * `row_lengths`: a vector with shape `[nrows]`, which specifies the length\n of each row.\n\n * `value_rowids` and `nrows`: `value_rowids` is a vector with shape\n `[nvals]`, corresponding one-to-one with `values`, which specifies\n each value's row index. In particular, the row `rt[row]` consists of the\n values `rt.values[j]` where `value_rowids[j]==row`. `nrows` is an\n integer scalar that specifies the number of rows in the\n `RaggedTensor`. (`nrows` is used to indicate trailing empty rows.)\n\n * `row_starts`: a vector with shape `[nrows]`, which specifies the start\n offset of each row. Equivalent to `row_splits[:-1]`.\n\n * `row_limits`: a vector with shape `[nrows]`, which specifies the stop\n offset of each row. Equivalent to `row_splits[1:]`.\n\n * `uniform_row_length`: A scalar tensor, specifying the length of every\n row. This row-partitioning scheme may only be used if all rows have\n the same length.\n\n Example: The following ragged tensors are equivalent, and all represent the\n nested list `[[3, 1, 4, 1], [], [5, 9, 2], [6], []]`.\n\n >>> values = [3, 1, 4, 1, 5, 9, 2, 6]\n >>> RaggedTensor.from_row_splits(values, row_splits=[0, 4, 4, 7, 8, 8])\n <tf.RaggedTensor [[3, 1, 4, 1], [], [5, 9, 2], [6], []]>\n >>> RaggedTensor.from_row_lengths(values, row_lengths=[4, 0, 3, 1, 0])\n <tf.RaggedTensor [[3, 1, 4, 1], [], [5, 9, 2], [6], []]>\n >>> RaggedTensor.from_value_rowids(\n ... values, value_rowids=[0, 0, 0, 0, 2, 2, 2, 3], nrows=5)\n <tf.RaggedTensor [[3, 1, 4, 1], [], [5, 9, 2], [6], []]>\n >>> RaggedTensor.from_row_starts(values, row_starts=[0, 4, 4, 7, 8])\n <tf.RaggedTensor [[3, 1, 4, 1], [], [5, 9, 2], [6], []]>\n >>> RaggedTensor.from_row_limits(values, row_limits=[4, 4, 7, 8, 8])\n <tf.RaggedTensor [[3, 1, 4, 1], [], [5, 9, 2], [6], []]>\n >>> RaggedTensor.from_uniform_row_length(values, uniform_row_length=2)\n <tf.RaggedTensor [[3, 1], [4, 1], [5, 9], [2, 6]]>\n\n ### Multiple Ragged Dimensions\n\n `RaggedTensor`s with multiple ragged dimensions can be defined by using\n a nested `RaggedTensor` for the `values` tensor. Each nested `RaggedTensor`\n adds a single ragged dimension.\n\n >>> inner_rt = RaggedTensor.from_row_splits( # =rt1 from above\n ... values=[3, 1, 4, 1, 5, 9, 2, 6], row_splits=[0, 4, 4, 7, 8, 8])\n >>> outer_rt = RaggedTensor.from_row_splits(\n ... values=inner_rt, row_splits=[0, 3, 3, 5])\n >>> print(outer_rt.to_list())\n [[[3, 1, 4, 1], [], [5, 9, 2]], [], [[6], []]]\n >>> print(outer_rt.ragged_rank)\n 2\n\n The factory function `RaggedTensor.from_nested_row_splits` may be used to\n construct a `RaggedTensor` with multiple ragged dimensions directly, by\n providing a list of `row_splits` tensors:\n\n >>> RaggedTensor.from_nested_row_splits(\n ... flat_values=[3, 1, 4, 1, 5, 9, 2, 6],\n ... nested_row_splits=([0, 3, 3, 5], [0, 4, 4, 7, 8, 8])).to_list()\n [[[3, 1, 4, 1], [], [5, 9, 2]], [], [[6], []]]\n\n ### Uniform Inner Dimensions\n\n `RaggedTensor`s with uniform inner dimensions can be defined\n by using a multidimensional `Tensor` for `values`.\n\n >>> rt = RaggedTensor.from_row_splits(values=tf.ones([5, 3], tf.int32),\n ... row_splits=[0, 2, 5])\n >>> print(rt.to_list())\n [[[1, 1, 1], [1, 1, 1]],\n [[1, 1, 1], [1, 1, 1], [1, 1, 1]]]\n >>> print(rt.shape)\n (2, None, 3)\n\n ### Uniform Outer Dimensions\n\n `RaggedTensor`s with uniform outer dimensions can be defined by using\n one or more `RaggedTensor` with a `uniform_row_length` row-partitioning\n tensor. For example, a `RaggedTensor` with shape `[2, 2, None]` can be\n constructed with this method from a `RaggedTensor` values with shape\n `[4, None]`:\n\n >>> values = tf.ragged.constant([[1, 2, 3], [4], [5, 6], [7, 8, 9, 10]])\n >>> print(values.shape)\n (4, None)\n >>> rt6 = tf.RaggedTensor.from_uniform_row_length(values, 2)\n >>> print(rt6)\n <tf.RaggedTensor [[[1, 2, 3], [4]], [[5, 6], [7, 8, 9, 10]]]>\n >>> print(rt6.shape)\n (2, 2, None)\n\n Note that `rt6` only contains one ragged dimension (the innermost\n dimension). In contrast, if `from_row_splits` is used to construct a similar\n `RaggedTensor`, then that `RaggedTensor` will have two ragged dimensions:\n\n >>> rt7 = tf.RaggedTensor.from_row_splits(values, [0, 2, 4])\n >>> print(rt7.shape)\n (2, None, None)\n\n Uniform and ragged outer dimensions may be interleaved, meaning that a\n tensor with any combination of ragged and uniform dimensions may be created.\n For example, a RaggedTensor `t4` with shape `[3, None, 4, 8, None, 2]` could\n be constructed as follows:\n\n ```python\n t0 = tf.zeros([1000, 2]) # Shape: [1000, 2]\n t1 = RaggedTensor.from_row_lengths(t0, [...]) # [160, None, 2]\n t2 = RaggedTensor.from_uniform_row_length(t1, 8) # [20, 8, None, 2]\n t3 = RaggedTensor.from_uniform_row_length(t2, 4) # [5, 4, 8, None, 2]\n t4 = RaggedTensor.from_row_lengths(t3, [...]) # [3, None, 4, 8, None, 2]\n ```\n\n \"\"\"\n\n #=============================================================================\n # Constructor (private)\n #=============================================================================\n @doc_controls.do_not_generate_docs\n def __init__(self, values, row_partition, internal=False):\n \"\"\"Creates a `RaggedTensor` with a specified partitioning for `values`.\n\n This constructor is private -- please use one of the following ops to\n build `RaggedTensor`s:\n\n * `tf.RaggedTensor.from_row_lengths`\n * `tf.RaggedTensor.from_value_rowids`\n * `tf.RaggedTensor.from_row_splits`\n * `tf.RaggedTensor.from_row_starts`\n * `tf.RaggedTensor.from_row_limits`\n * `tf.RaggedTensor.from_nested_row_splits`\n * `tf.RaggedTensor.from_nested_row_lengths`\n * `tf.RaggedTensor.from_nested_value_rowids`\n\n Args:\n values: A potentially ragged tensor of any dtype and shape `[nvals, ...]`.\n row_partition: A `RowPartition` object, representing the arrangement of\n the lists at the top level.\n internal: True if the constructor is being called by one of the factory\n methods. If false, an exception will be raised.\n\n Raises:\n ValueError: If internal = False. Note that this method is intended only\n for internal use.\n TypeError: If values is not a `RaggedTensor` or `Tensor`, or\n row_partition is not a `RowPartition`.\n \"\"\"\n\n if not internal:\n raise ValueError(\"RaggedTensor constructor is private; please use one \"\n \"of the factory methods instead (e.g., \"\n \"RaggedTensor.from_row_lengths())\")\n _assert_is_supported_ragged_values_type(values)\n if not isinstance(row_partition, RowPartition):\n raise TypeError(f\"Argument `row_partition` must be a RowPartition. \"\n f\"Received {row_partition}.\")\n\n # Validate shapes.\n values.shape.with_rank_at_least(1)\n if isinstance(values, RaggedTensor):\n # pylint: disable=protected-access\n assert row_partition.dtype == values._row_partition.dtype\n\n self._values = values\n self._row_partition = row_partition\n\n #=============================================================================\n # Factory Methods\n #=============================================================================\n\n @classmethod\n def _from_row_partition(cls, values, row_partition, validate=True):\n \"\"\"Creates a `RaggedTensor` with a row partition.\n\n This is used as a way for RaggedTensors to share row partitions.\n\n The outer dimension of values must be equal to `partition.nvals()`.\n\n Args:\n values: A potentially ragged tensor.\n row_partition: a `RowPartition`: can be shared between tensors.\n validate: If true, then use assertions to check that the arguments form a\n valid `RaggedTensor`.\n\n Returns:\n A `RaggedTensor`. `result.rank = values.rank + 1`.\n `result.ragged_rank = values.ragged_rank + 1`.\n\n Raises:\n ValueError: If partition.nvals() != _nrows(values)\n \"\"\"\n if not isinstance(row_partition, RowPartition):\n raise TypeError(f\"Argument `row_partition` must be a RowPartition. \"\n f\"Received {row_partition}.\")\n if not isinstance(validate, bool):\n raise TypeError(f\"Argument `validate` must have type bool. \"\n f\"Received {validate}.\")\n values, row_partition = cls._convert_values_and_partition(\n values, row_partition, \"partition\")\n if row_partition.has_precomputed_value_rowids():\n value_rowids_shape = row_partition.value_rowids().shape\n values.shape[:1].assert_is_compatible_with(value_rowids_shape)\n if validate:\n msg = \"Arguments to _from_row_partition do not form a valid RaggedTensor\"\n nvals = _nrows(values, row_partition.dtype)\n checks = [\n check_ops.assert_equal(\n math_ops.cast(row_partition.nvals(), row_partition.dtype),\n nvals,\n message=msg),\n ]\n if not isinstance(values, RaggedTensor):\n checks.append(check_ops.assert_rank_at_least(values, 1))\n row_partition = row_partition.with_dependencies(checks)\n return cls(values=values, internal=True, row_partition=row_partition)\n\n @classmethod\n @dispatch.add_dispatch_support\n def from_value_rowids(cls,\n values,\n value_rowids,\n nrows=None,\n name=None,\n validate=True):\n \"\"\"Creates a `RaggedTensor` with rows partitioned by `value_rowids`.\n\n The returned `RaggedTensor` corresponds with the python list defined by:\n\n ```python\n result = [[values[i] for i in range(len(values)) if value_rowids[i] == row]\n for row in range(nrows)]\n ```\n\n Args:\n values: A potentially ragged tensor with shape `[nvals, ...]`.\n value_rowids: A 1-D integer tensor with shape `[nvals]`, which corresponds\n one-to-one with `values`, and specifies each value's row index. Must be\n nonnegative, and must be sorted in ascending order.\n nrows: An integer scalar specifying the number of rows. This should be\n specified if the `RaggedTensor` may containing empty training rows. Must\n be greater than `value_rowids[-1]` (or zero if `value_rowids` is empty).\n Defaults to `value_rowids[-1] + 1` (or zero if `value_rowids` is empty).\n name: A name prefix for the RaggedTensor (optional).\n validate: If true, then use assertions to check that the arguments form\n a valid `RaggedTensor`. Note: these assertions incur a runtime cost,\n since they must be checked for each tensor value.\n\n Returns:\n A `RaggedTensor`. `result.rank = values.rank + 1`.\n `result.ragged_rank = values.ragged_rank + 1`.\n\n Raises:\n ValueError: If `nrows` is incompatible with `value_rowids`.\n\n #### Example:\n\n >>> print(tf.RaggedTensor.from_value_rowids(\n ... values=[3, 1, 4, 1, 5, 9, 2, 6],\n ... value_rowids=[0, 0, 0, 0, 2, 2, 2, 3],\n ... nrows=5))\n <tf.RaggedTensor [[3, 1, 4, 1], [], [5, 9, 2], [6], []]>\n\n \"\"\"\n if not isinstance(validate, bool):\n raise TypeError(f\"Argument `validate` must have type bool. \"\n f\"Received {validate}.\")\n\n with ops.name_scope(name, \"RaggedFromValueRowIds\",\n [values, value_rowids, nrows]):\n row_partition = RowPartition.from_value_rowids(\n value_rowids=value_rowids,\n nrows=nrows,\n validate=validate,\n dtype_hint=_get_optional_partition_dtype(values))\n return cls._from_row_partition(values, row_partition, validate=validate)\n\n @classmethod\n @dispatch.add_dispatch_support\n def from_row_splits(cls, values, row_splits, name=None, validate=True):\n \"\"\"Creates a `RaggedTensor` with rows partitioned by `row_splits`.\n\n The returned `RaggedTensor` corresponds with the python list defined by:\n\n ```python\n result = [values[row_splits[i]:row_splits[i + 1]]\n for i in range(len(row_splits) - 1)]\n ```\n\n Args:\n values: A potentially ragged tensor with shape `[nvals, ...]`.\n row_splits: A 1-D integer tensor with shape `[nrows+1]`. Must not be\n empty, and must be sorted in ascending order. `row_splits[0]` must be\n zero and `row_splits[-1]` must be `nvals`.\n name: A name prefix for the RaggedTensor (optional).\n validate: If true, then use assertions to check that the arguments form\n a valid `RaggedTensor`. Note: these assertions incur a runtime cost,\n since they must be checked for each tensor value.\n\n Returns:\n A `RaggedTensor`. `result.rank = values.rank + 1`.\n `result.ragged_rank = values.ragged_rank + 1`.\n\n Raises:\n ValueError: If `row_splits` is an empty list.\n\n #### Example:\n\n >>> print(tf.RaggedTensor.from_row_splits(\n ... values=[3, 1, 4, 1, 5, 9, 2, 6],\n ... row_splits=[0, 4, 4, 7, 8, 8]))\n <tf.RaggedTensor [[3, 1, 4, 1], [], [5, 9, 2], [6], []]>\n\n \"\"\"\n if not isinstance(validate, bool):\n raise TypeError(f\"Argument `validate` must have type bool. \"\n f\"Received {validate}.\")\n\n with ops.name_scope(name, \"RaggedFromRowSplits\", [values, row_splits]):\n row_partition = RowPartition.from_row_splits(\n row_splits=row_splits,\n validate=validate,\n dtype_hint=_get_optional_partition_dtype(values))\n return cls._from_row_partition(values, row_partition, validate=validate)\n\n @classmethod\n @dispatch.add_dispatch_support\n def from_row_lengths(cls, values, row_lengths, name=None, validate=True):\n \"\"\"Creates a `RaggedTensor` with rows partitioned by `row_lengths`.\n\n The returned `RaggedTensor` corresponds with the python list defined by:\n\n ```python\n result = [[values.pop(0) for i in range(length)]\n for length in row_lengths]\n ```\n\n Args:\n values: A potentially ragged tensor with shape `[nvals, ...]`.\n row_lengths: A 1-D integer tensor with shape `[nrows]`. Must be\n nonnegative. `sum(row_lengths)` must be `nvals`.\n name: A name prefix for the RaggedTensor (optional).\n validate: If true, then use assertions to check that the arguments form\n a valid `RaggedTensor`. Note: these assertions incur a runtime cost,\n since they must be checked for each tensor value.\n\n Returns:\n A `RaggedTensor`. `result.rank = values.rank + 1`.\n `result.ragged_rank = values.ragged_rank + 1`.\n\n #### Example:\n\n >>> print(tf.RaggedTensor.from_row_lengths(\n ... values=[3, 1, 4, 1, 5, 9, 2, 6],\n ... row_lengths=[4, 0, 3, 1, 0]))\n <tf.RaggedTensor [[3, 1, 4, 1], [], [5, 9, 2], [6], []]>\n\n \"\"\"\n if not isinstance(validate, bool):\n raise TypeError(f\"Argument `validate` must have type bool. \"\n f\"Received {validate}.\")\n\n with ops.name_scope(name, \"RaggedFromRowLengths\", [values, row_lengths]):\n row_partition = RowPartition.from_row_lengths(\n row_lengths=row_lengths,\n validate=validate,\n dtype_hint=_get_optional_partition_dtype(values))\n return cls._from_row_partition(values, row_partition, validate=validate)\n\n @classmethod\n @dispatch.add_dispatch_support\n def from_row_starts(cls, values, row_starts, name=None, validate=True):\n \"\"\"Creates a `RaggedTensor` with rows partitioned by `row_starts`.\n\n Equivalent to: `from_row_splits(values, concat([row_starts, nvals]))`.\n\n Args:\n values: A potentially ragged tensor with shape `[nvals, ...]`.\n row_starts: A 1-D integer tensor with shape `[nrows]`. Must be\n nonnegative and sorted in ascending order. If `nrows>0`, then\n `row_starts[0]` must be zero.\n name: A name prefix for the RaggedTensor (optional).\n validate: If true, then use assertions to check that the arguments form\n a valid `RaggedTensor`. Note: these assertions incur a runtime cost,\n since they must be checked for each tensor value.\n\n Returns:\n A `RaggedTensor`. `result.rank = values.rank + 1`.\n `result.ragged_rank = values.ragged_rank + 1`.\n\n #### Example:\n\n >>> print(tf.RaggedTensor.from_row_starts(\n ... values=[3, 1, 4, 1, 5, 9, 2, 6],\n ... row_starts=[0, 4, 4, 7, 8]))\n <tf.RaggedTensor [[3, 1, 4, 1], [], [5, 9, 2], [6], []]>\n\n \"\"\"\n if not isinstance(validate, bool):\n raise TypeError(f\"Argument `validate` must have type bool. \"\n f\"Received {validate}.\")\n with ops.name_scope(name, \"RaggedFromRowStarts\", [values, row_starts]):\n values = _convert_to_ragged_tensor_values(values)\n row_partition = RowPartition.from_row_starts(\n row_starts=row_starts,\n nvals=_nrows(values),\n validate=validate,\n dtype_hint=_get_optional_partition_dtype(values))\n return cls._from_row_partition(values, row_partition, validate=validate)\n\n @classmethod\n @dispatch.add_dispatch_support\n def from_row_limits(cls, values, row_limits, name=None, validate=True):\n \"\"\"Creates a `RaggedTensor` with rows partitioned by `row_limits`.\n\n Equivalent to: `from_row_splits(values, concat([0, row_limits]))`.\n\n Args:\n values: A potentially ragged tensor with shape `[nvals, ...]`.\n row_limits: A 1-D integer tensor with shape `[nrows]`. Must be sorted in\n ascending order. If `nrows>0`, then `row_limits[-1]` must be `nvals`.\n name: A name prefix for the RaggedTensor (optional).\n validate: If true, then use assertions to check that the arguments form\n a valid `RaggedTensor`. Note: these assertions incur a runtime cost,\n since they must be checked for each tensor value.\n\n Returns:\n A `RaggedTensor`. `result.rank = values.rank + 1`.\n `result.ragged_rank = values.ragged_rank + 1`.\n\n #### Example:\n\n >>> print(tf.RaggedTensor.from_row_limits(\n ... values=[3, 1, 4, 1, 5, 9, 2, 6],\n ... row_limits=[4, 4, 7, 8, 8]))\n <tf.RaggedTensor [[3, 1, 4, 1], [], [5, 9, 2], [6], []]>\n\n \"\"\"\n if not isinstance(validate, bool):\n raise TypeError(f\"Argument `validate` must have type bool. \"\n f\"Received {validate}.\")\n with ops.name_scope(name, \"RaggedFromRowLimits\", [values, row_limits]):\n values = _convert_to_ragged_tensor_values(values)\n row_partition = RowPartition.from_row_limits(\n row_limits=row_limits,\n validate=validate,\n dtype_hint=_get_optional_partition_dtype(values))\n return cls._from_row_partition(values, row_partition, validate=validate)\n\n @classmethod\n @dispatch.add_dispatch_support\n def from_uniform_row_length(cls,\n values,\n uniform_row_length,\n nrows=None,\n validate=True,\n name=None):\n \"\"\"Creates a `RaggedTensor` with rows partitioned by `uniform_row_length`.\n\n This method can be used to create `RaggedTensor`s with multiple uniform\n outer dimensions. For example, a `RaggedTensor` with shape `[2, 2, None]`\n can be constructed with this method from a `RaggedTensor` values with shape\n `[4, None]`:\n\n >>> values = tf.ragged.constant([[1, 2, 3], [4], [5, 6], [7, 8, 9, 10]])\n >>> print(values.shape)\n (4, None)\n >>> rt1 = tf.RaggedTensor.from_uniform_row_length(values, 2)\n >>> print(rt1)\n <tf.RaggedTensor [[[1, 2, 3], [4]], [[5, 6], [7, 8, 9, 10]]]>\n >>> print(rt1.shape)\n (2, 2, None)\n\n Note that `rt1` only contains one ragged dimension (the innermost\n dimension). In contrast, if `from_row_splits` is used to construct a similar\n `RaggedTensor`, then that `RaggedTensor` will have two ragged dimensions:\n\n >>> rt2 = tf.RaggedTensor.from_row_splits(values, [0, 2, 4])\n >>> print(rt2.shape)\n (2, None, None)\n\n Args:\n values: A potentially ragged tensor with shape `[nvals, ...]`.\n uniform_row_length: A scalar integer tensor. Must be nonnegative. The\n size of the outer axis of `values` must be evenly divisible by\n `uniform_row_length`.\n nrows: The number of rows in the constructed RaggedTensor. If not\n specified, then it defaults to `nvals/uniform_row_length` (or `0` if\n `uniform_row_length==0`). `nrows` only needs to be specified if\n `uniform_row_length` might be zero. `uniform_row_length*nrows` must be\n `nvals`.\n validate: If true, then use assertions to check that the arguments form\n a valid `RaggedTensor`. Note: these assertions incur a runtime cost,\n since they must be checked for each tensor value.\n name: A name prefix for the RaggedTensor (optional).\n\n Returns:\n A `RaggedTensor` that corresponds with the python list defined by:\n\n ```python\n result = [[values.pop(0) for i in range(uniform_row_length)]\n for _ in range(nrows)]\n ```\n\n `result.rank = values.rank + 1`.\n `result.ragged_rank = values.ragged_rank + 1`.\n \"\"\"\n if not isinstance(validate, bool):\n raise TypeError(f\"Argument `validate` must have type bool. \"\n f\"Received {validate}.\")\n with ops.name_scope(name, \"RaggedFromUniformRowLength\",\n [values, uniform_row_length, nrows]):\n values = _convert_to_ragged_tensor_values(values)\n uniform_row_length = _convert_row_partition(\n uniform_row_length, \"UniformRowLength\",\n _get_optional_partition_dtype(values))\n nvals = _nvals_uniform_row_length(values, uniform_row_length)\n row_partition = RowPartition.from_uniform_row_length(\n uniform_row_length=uniform_row_length,\n nvals=nvals,\n nrows=nrows,\n validate=validate,\n dtype_hint=_get_optional_partition_dtype(values))\n return cls._from_row_partition(values, row_partition, validate=validate)\n\n @classmethod\n @dispatch.add_dispatch_support\n def from_nested_value_rowids(cls,\n flat_values,\n nested_value_rowids,\n nested_nrows=None,\n name=None,\n validate=True):\n \"\"\"Creates a `RaggedTensor` from a nested list of `value_rowids` tensors.\n\n Equivalent to:\n\n ```python\n result = flat_values\n for (rowids, nrows) in reversed(zip(nested_value_rowids, nested_nrows)):\n result = from_value_rowids(result, rowids, nrows)\n ```\n\n Args:\n flat_values: A potentially ragged tensor.\n nested_value_rowids: A list of 1-D integer tensors. The `i`th tensor is\n used as the `value_rowids` for the `i`th ragged dimension.\n nested_nrows: A list of integer scalars. The `i`th scalar is used as the\n `nrows` for the `i`th ragged dimension.\n name: A name prefix for the RaggedTensor (optional).\n validate: If true, then use assertions to check that the arguments form\n a valid `RaggedTensor`. Note: these assertions incur a runtime cost,\n since they must be checked for each tensor value.\n\n Returns:\n A `RaggedTensor` (or `flat_values` if `nested_value_rowids` is empty).\n\n Raises:\n ValueError: If `len(nested_values_rowids) != len(nested_nrows)`.\n \"\"\"\n if not isinstance(validate, bool):\n raise TypeError(f\"Argument `validate` must have type bool. \"\n f\"Received {validate}.\")\n if isinstance(nested_value_rowids, ops.Tensor):\n raise TypeError(f\"Argument `nested_value_rowids` must be a list of \"\n f\"Tensors. Received {nested_value_rowids}.\")\n if nested_nrows is None:\n nested_nrows = [None] * len(nested_value_rowids)\n else:\n if isinstance(nested_nrows, ops.Tensor):\n raise TypeError(f\"Argument `nested_nrows` must be a list of \"\n f\"Tensors. Received {nested_nrows}.\")\n if len(nested_nrows) != len(nested_value_rowids):\n raise ValueError(\n f\"Argument `nested_nrows` must have the same length as \"\n f\"argument `nested_value_rowids`. len(nested_nrows) = \"\n f\"{len(nested_nrows)} vs. len(nested_values_rowids) = \"\n f\"{len(nested_value_rowids)}.\")\n\n with ops.name_scope(name, \"RaggedFromNestedValueRowIds\", [flat_values] +\n list(nested_value_rowids) + list(nested_nrows)):\n result = flat_values\n for value_rowids, nrows in reversed(\n list(zip(nested_value_rowids, nested_nrows))):\n result = cls.from_value_rowids(\n result, value_rowids, nrows, validate=validate)\n return result\n\n @classmethod\n @dispatch.add_dispatch_support\n def from_nested_row_splits(cls,\n flat_values,\n nested_row_splits,\n name=None,\n validate=True):\n \"\"\"Creates a `RaggedTensor` from a nested list of `row_splits` tensors.\n\n Equivalent to:\n\n ```python\n result = flat_values\n for row_splits in reversed(nested_row_splits):\n result = from_row_splits(result, row_splits)\n ```\n\n Args:\n flat_values: A potentially ragged tensor.\n nested_row_splits: A list of 1-D integer tensors. The `i`th tensor is\n used as the `row_splits` for the `i`th ragged dimension.\n name: A name prefix for the RaggedTensor (optional).\n validate: If true, then use assertions to check that the arguments form\n a valid `RaggedTensor`. Note: these assertions incur a runtime cost,\n since they must be checked for each tensor value.\n\n Returns:\n A `RaggedTensor` (or `flat_values` if `nested_row_splits` is empty).\n \"\"\"\n if not isinstance(validate, bool):\n raise TypeError(f\"Argument `validate` must have type bool. \"\n f\"Received {validate}.\")\n if isinstance(nested_row_splits, ops.Tensor):\n raise TypeError(f\"Argument `nested_row_splits` must be a list of \"\n f\"Tensors. Received {nested_row_splits}.\")\n with ops.name_scope(name, \"RaggedFromNestedRowSplits\",\n [flat_values] + list(nested_row_splits)):\n result = flat_values\n for splits in reversed(nested_row_splits):\n result = cls.from_row_splits(result, splits, validate=validate)\n return result\n\n @classmethod\n @dispatch.add_dispatch_support\n def from_nested_row_lengths(cls,\n flat_values,\n nested_row_lengths,\n name=None,\n validate=True):\n \"\"\"Creates a `RaggedTensor` from a nested list of `row_lengths` tensors.\n\n Equivalent to:\n\n ```python\n result = flat_values\n for row_lengths in reversed(nested_row_lengths):\n result = from_row_lengths(result, row_lengths)\n ```\n\n Args:\n flat_values: A potentially ragged tensor.\n nested_row_lengths: A list of 1-D integer tensors. The `i`th tensor is\n used as the `row_lengths` for the `i`th ragged dimension.\n name: A name prefix for the RaggedTensor (optional).\n validate: If true, then use assertions to check that the arguments form\n a valid `RaggedTensor`. Note: these assertions incur a runtime cost,\n since they must be checked for each tensor value.\n\n Returns:\n A `RaggedTensor` (or `flat_values` if `nested_row_lengths` is empty).\n \"\"\"\n if not isinstance(validate, bool):\n raise TypeError(f\"Argument `validate` must have type bool. \"\n f\"Received {validate}.\")\n if isinstance(nested_row_lengths, ops.Tensor):\n raise TypeError(f\"Argument `nested_row_lengths` must be a list of \"\n f\"Tensors. Received {nested_row_lengths}.\")\n with ops.name_scope(name, \"RaggedFromNestedRowlengths\",\n [flat_values] + list(nested_row_lengths)):\n result = flat_values\n for lengths in reversed(nested_row_lengths):\n result = cls.from_row_lengths(result, lengths, validate=validate)\n return result\n\n @classmethod\n def _from_nested_row_partitions(cls,\n flat_values,\n nested_row_partitions,\n name=None,\n validate=True):\n \"\"\"Creates a `RaggedTensor` from a nested list of row partitions.\n\n Equivalent to:\n\n ```python\n result = flat_values\n for row_partition in reversed(nested_row_partitions):\n result = _from_row_partition(result, row_partition)\n ```\n\n Args:\n flat_values: A potentially ragged tensor.\n nested_row_partitions: A list of row partitions. The `i`th element is\n used as the row partition for the `i`th ragged dimension.\n name: A name prefix for the RaggedTensor (optional).\n validate: If true, then use assertions to check that the arguments form\n a valid `RaggedTensor`. Note: these assertions incur a runtime cost,\n since they must be checked for each tensor value.\n\n Returns:\n A `RaggedTensor` (or `flat_values` if `nested_row_lengths` is empty).\n \"\"\"\n if not isinstance(validate, bool):\n raise TypeError(f\"Argument `validate` must have type bool. \"\n f\"Received {validate}.\")\n if isinstance(nested_row_partitions, RowPartition):\n raise TypeError(f\"Argument `nested_row_partitions` must be a list of \"\n f\"RowPartitions. Received {nested_row_partitions}.\")\n if isinstance(nested_row_partitions, ops.Tensor):\n raise TypeError(f\"Argument `nested_row_partitions` must be a list of \"\n f\"RowPartitions. Received {nested_row_partitions}.\")\n with ops.name_scope(name, \"RaggedFromNestedRowPartitions\",\n [flat_values] + list(nested_row_partitions)):\n result = flat_values\n for partition in reversed(nested_row_partitions):\n result = cls._from_row_partition(result, partition, validate=validate)\n return result\n\n @classmethod\n def _convert_values_and_partition(cls, values, row_partition, name):\n \"\"\"Converts `values` and `partition` to Tensors.\n\n If `values` is a `RaggedTensor`, then converts `values` and `partition`\n to have compatible row-partitioning dtypes. In particular, if any of the\n row partitioning tensors are `int64`, then all of the other row\n partitioning tensors wil be cast to `int64` (if auto_cast_partition_dtype()\n is true) or an error will be raised (if auto_cast_partition_dtype() is\n false).\n\n Args:\n values: The `values` for the `RaggedTensor` being constructed.\n row_partition: A RowPartition object for the `RaggedTensor` being\n constructed.\n name: The name of the RowPartition object.\n\n Returns:\n A tuple (values, partition).\n \"\"\"\n if not isinstance(row_partition, RowPartition):\n raise TypeError(f\"Argument `row_partition` must be a RowPartition. \"\n f\"Received {row_partition}.\")\n if isinstance(values, RaggedTensor):\n # pylint: disable=protected-access\n if values._row_partition.dtype != row_partition.dtype:\n if not ragged_config.auto_cast_partition_dtype():\n # pylint: disable=protected-access\n # TODO(edloper): get rid of the `name` parameter.\n raise ValueError(\n f\"Argument `row_partition` of RaggedTensor with name: {name} \"\n f\"must have same dtype as Argument `values`. \"\n f\"({row_partition.dtype} vs. {values._row_partition.dtype}).\")\n values = values.with_row_splits_dtype(row_partition.dtype)\n else:\n values = _convert_to_ragged_tensor_values(values)\n\n return (values, row_partition)\n\n #=============================================================================\n # Accessors\n #=============================================================================\n\n @property\n def dtype(self):\n \"\"\"The `DType` of values in this tensor.\"\"\"\n return self._values.dtype\n\n @property\n def shape(self):\n \"\"\"The statically known shape of this ragged tensor.\n\n Returns:\n A `TensorShape` containing the statically known shape of this ragged\n tensor. Ragged dimensions have a size of `None`.\n\n Examples:\n\n >>> tf.ragged.constant([[0], [1, 2]]).shape\n TensorShape([2, None])\n\n >>> tf.ragged.constant([[[0, 1]], [[1, 2], [3, 4]]], ragged_rank=1).shape\n TensorShape([2, None, 2])\n\n \"\"\"\n nrows = self._row_partition.static_nrows\n ncols = self._row_partition.static_uniform_row_length\n value_shape = self._values.shape[1:]\n return tensor_shape.TensorShape([nrows, ncols]).concatenate(value_shape)\n\n def get_shape(self):\n \"\"\"The statically known shape of this ragged tensor.\n\n Returns:\n A `TensorShape` containing the statically known shape of this ragged\n tensor. Ragged dimensions have a size of `None`.\n\n Alias for `shape` property.\n\n Examples:\n\n >>> tf.ragged.constant([[0], [1, 2]]).get_shape()\n TensorShape([2, None])\n\n >>> tf.ragged.constant(\n ... [[[0, 1]], [[1, 2], [3, 4]]], ragged_rank=1).get_shape()\n TensorShape([2, None, 2])\n\n \"\"\"\n return self.shape\n\n @property\n def ragged_rank(self):\n \"\"\"The number of times the RaggedTensor's flat_values is partitioned.\n\n Examples:\n\n >>> values = tf.ragged.constant([[1, 2, 3], [4], [5, 6], [7, 8, 9, 10]])\n >>> values.ragged_rank\n 1\n\n >>> rt = tf.RaggedTensor.from_uniform_row_length(values, 2)\n >>> rt.ragged_rank\n 2\n\n Returns:\n A Python `int` indicating the number of times the underlying `flat_values`\n Tensor has been partitioned to add a new dimension.\n I.e., `tf.rank(rt) = tf.rank(rt.flat_values) + rt.ragged_rank`.\n \"\"\"\n values_is_ragged = isinstance(self._values, RaggedTensor)\n return self._values.ragged_rank + 1 if values_is_ragged else 1\n\n @property\n def values(self):\n \"\"\"The concatenated rows for this ragged tensor.\n\n `rt.values` is a potentially ragged tensor formed by flattening the two\n outermost dimensions of `rt` into a single dimension.\n\n `rt.values.shape = [nvals] + rt.shape[2:]` (where `nvals` is the\n number of items in the outer two dimensions of `rt`).\n\n `rt.ragged_rank = self.ragged_rank - 1`\n\n Returns:\n A potentially ragged tensor.\n\n #### Example:\n\n >>> rt = tf.ragged.constant([[3, 1, 4, 1], [], [5, 9, 2], [6], []])\n >>> print(rt.values)\n tf.Tensor([3 1 4 1 5 9 2 6], shape=(8,), dtype=int32)\n\n \"\"\"\n return self._values\n\n @property\n def _nested_row_partitions(self):\n \"\"\"Returns the row partitions for this `RaggedTensor`.\"\"\"\n partitions = [self._row_partition]\n rt_values = self.values\n while isinstance(rt_values, RaggedTensor):\n # pylint: disable=protected-access\n partitions.append(rt_values._row_partition)\n rt_values = rt_values.values\n return tuple(partitions)\n\n @property\n def row_splits(self):\n \"\"\"The row-split indices for this ragged tensor's `values`.\n\n `rt.row_splits` specifies where the values for each row begin and end in\n `rt.values`. In particular, the values for row `rt[i]` are stored in\n the slice `rt.values[rt.row_splits[i]:rt.row_splits[i+1]]`.\n\n Returns:\n A 1-D integer `Tensor` with shape `[self.nrows+1]`.\n The returned tensor is non-empty, and is sorted in ascending order.\n `self.row_splits[0]` is zero, and `self.row_splits[-1]` is equal to\n `self.values.shape[0]`.\n\n #### Example:\n\n >>> rt = tf.ragged.constant([[3, 1, 4, 1], [], [5, 9, 2], [6], []])\n >>> print(rt.row_splits) # indices of row splits in rt.values\n tf.Tensor([0 4 4 7 8 8], shape=(6,), dtype=int64)\n\n \"\"\"\n return self._row_partition.row_splits()\n\n @property\n def uniform_row_length(self):\n \"\"\"The length of each row in this ragged tensor, or None if rows are ragged.\n\n >>> rt1 = tf.ragged.constant([[1, 2, 3], [4], [5, 6], [7, 8, 9, 10]])\n >>> print(rt1.uniform_row_length) # rows are ragged.\n None\n\n >>> rt2 = tf.RaggedTensor.from_uniform_row_length(\n ... values=rt1, uniform_row_length=2)\n >>> print(rt2)\n <tf.RaggedTensor [[[1, 2, 3], [4]], [[5, 6], [7, 8, 9, 10]]]>\n >>> print(rt2.uniform_row_length) # rows are not ragged (all have size 2).\n tf.Tensor(2, shape=(), dtype=int64)\n\n A RaggedTensor's rows are only considered to be uniform (i.e. non-ragged)\n if it can be determined statically (at graph construction time) that the\n rows all have the same length.\n\n Returns:\n A scalar integer `Tensor`, specifying the length of every row in this\n ragged tensor (for ragged tensors whose rows are uniform); or `None`\n (for ragged tensors whose rows are ragged).\n \"\"\"\n return self._row_partition.uniform_row_length()\n\n @property\n def flat_values(self):\n \"\"\"The innermost `values` tensor for this ragged tensor.\n\n Concretely, if `rt.values` is a `Tensor`, then `rt.flat_values` is\n `rt.values`; otherwise, `rt.flat_values` is `rt.values.flat_values`.\n\n Conceptually, `flat_values` is the tensor formed by flattening the\n outermost dimension and all of the ragged dimensions into a single\n dimension.\n\n `rt.flat_values.shape = [nvals] + rt.shape[rt.ragged_rank + 1:]`\n (where `nvals` is the number of items in the flattened dimensions).\n\n Returns:\n A `Tensor`.\n\n #### Example:\n\n >>> rt = tf.ragged.constant([[[3, 1, 4, 1], [], [5, 9, 2]], [], [[6], []]])\n >>> print(rt.flat_values)\n tf.Tensor([3 1 4 1 5 9 2 6], shape=(8,), dtype=int32)\n\n \"\"\"\n rt_values = self.values\n while isinstance(rt_values, RaggedTensor):\n rt_values = rt_values.values\n return rt_values\n\n @property\n def nested_row_splits(self):\n \"\"\"A tuple containing the row_splits for all ragged dimensions.\n\n `rt.nested_row_splits` is a tuple containing the `row_splits` tensors for\n all ragged dimensions in `rt`, ordered from outermost to innermost. In\n particular, `rt.nested_row_splits = (rt.row_splits,) + value_splits` where:\n\n * `value_splits = ()` if `rt.values` is a `Tensor`.\n * `value_splits = rt.values.nested_row_splits` otherwise.\n\n Returns:\n A `tuple` of 1-D integer `Tensor`s.\n\n #### Example:\n\n >>> rt = tf.ragged.constant(\n ... [[[[3, 1, 4, 1], [], [5, 9, 2]], [], [[6], []]]])\n >>> for i, splits in enumerate(rt.nested_row_splits):\n ... print('Splits for dimension %d: %s' % (i+1, splits.numpy()))\n Splits for dimension 1: [0 3]\n Splits for dimension 2: [0 3 3 5]\n Splits for dimension 3: [0 4 4 7 8 8]\n\n \"\"\"\n rt_nested_splits = [self.row_splits]\n rt_values = self.values\n while isinstance(rt_values, RaggedTensor):\n rt_nested_splits.append(rt_values.row_splits)\n rt_values = rt_values.values\n return tuple(rt_nested_splits)\n\n def value_rowids(self, name=None):\n \"\"\"Returns the row indices for the `values` in this ragged tensor.\n\n `rt.value_rowids()` corresponds one-to-one with the outermost dimension of\n `rt.values`, and specifies the row containing each value. In particular,\n the row `rt[row]` consists of the values `rt.values[j]` where\n `rt.value_rowids()[j] == row`.\n\n Args:\n name: A name prefix for the returned tensor (optional).\n\n Returns:\n A 1-D integer `Tensor` with shape `self.values.shape[:1]`.\n The returned tensor is nonnegative, and is sorted in ascending order.\n\n #### Example:\n\n >>> rt = tf.ragged.constant([[3, 1, 4, 1], [], [5, 9, 2], [6], []])\n >>> print(rt.values)\n tf.Tensor([3 1 4 1 5 9 2 6], shape=(8,), dtype=int32)\n >>> print(rt.value_rowids()) # corresponds 1:1 with rt.values\n tf.Tensor([0 0 0 0 2 2 2 3], shape=(8,), dtype=int64)\n\n \"\"\"\n with ops.name_scope(name, \"RaggedValueRowIds\", [self]):\n return self._row_partition.value_rowids()\n\n def nested_value_rowids(self, name=None):\n \"\"\"Returns a tuple containing the value_rowids for all ragged dimensions.\n\n `rt.nested_value_rowids` is a tuple containing the `value_rowids` tensors\n for\n all ragged dimensions in `rt`, ordered from outermost to innermost. In\n particular, `rt.nested_value_rowids = (rt.value_rowids(),) + value_ids`\n where:\n\n * `value_ids = ()` if `rt.values` is a `Tensor`.\n * `value_ids = rt.values.nested_value_rowids` otherwise.\n\n Args:\n name: A name prefix for the returned tensors (optional).\n\n Returns:\n A `tuple` of 1-D integer `Tensor`s.\n\n #### Example:\n\n >>> rt = tf.ragged.constant(\n ... [[[[3, 1, 4, 1], [], [5, 9, 2]], [], [[6], []]]])\n >>> for i, ids in enumerate(rt.nested_value_rowids()):\n ... print('row ids for dimension %d: %s' % (i+1, ids.numpy()))\n row ids for dimension 1: [0 0 0]\n row ids for dimension 2: [0 0 0 2 2]\n row ids for dimension 3: [0 0 0 0 2 2 2 3]\n\n \"\"\"\n with ops.name_scope(name, \"RaggedNestedValueRowIds\", [self]):\n rt_nested_ids = [self.value_rowids()]\n rt_values = self.values\n while isinstance(rt_values, RaggedTensor):\n rt_nested_ids.append(rt_values.value_rowids())\n rt_values = rt_values.values\n return tuple(rt_nested_ids)\n\n def nrows(self, out_type=None, name=None):\n \"\"\"Returns the number of rows in this ragged tensor.\n\n I.e., the size of the outermost dimension of the tensor.\n\n Args:\n out_type: `dtype` for the returned tensor. Defaults to\n `self.row_splits.dtype`.\n name: A name prefix for the returned tensor (optional).\n\n Returns:\n A scalar `Tensor` with dtype `out_type`.\n\n #### Example:\n\n >>> rt = tf.ragged.constant([[3, 1, 4, 1], [], [5, 9, 2], [6], []])\n >>> print(rt.nrows()) # rt has 5 rows.\n tf.Tensor(5, shape=(), dtype=int64)\n\n \"\"\"\n with ops.name_scope(name, \"RaggedNRows\", [self]):\n if out_type is None:\n return self._row_partition.nrows()\n else:\n return math_ops.cast(self._row_partition.nrows(), dtype=out_type)\n\n def row_starts(self, name=None):\n \"\"\"Returns the start indices for rows in this ragged tensor.\n\n These indices specify where the values for each row begin in\n `self.values`. `rt.row_starts()` is equal to `rt.row_splits[:-1]`.\n\n Args:\n name: A name prefix for the returned tensor (optional).\n\n Returns:\n A 1-D integer Tensor with shape `[nrows]`.\n The returned tensor is nonnegative, and is sorted in ascending order.\n\n #### Example:\n\n >>> rt = tf.ragged.constant([[3, 1, 4, 1], [], [5, 9, 2], [6], []])\n >>> print(rt.values)\n tf.Tensor([3 1 4 1 5 9 2 6], shape=(8,), dtype=int32)\n >>> print(rt.row_starts()) # indices of row starts in rt.values\n tf.Tensor([0 4 4 7 8], shape=(5,), dtype=int64)\n\n \"\"\"\n with ops.name_scope(name, \"RaggedRowStarts\", [self]):\n return self._row_partition.row_starts()\n\n def row_limits(self, name=None):\n \"\"\"Returns the limit indices for rows in this ragged tensor.\n\n These indices specify where the values for each row end in\n `self.values`. `rt.row_limits(self)` is equal to `rt.row_splits[:-1]`.\n\n Args:\n name: A name prefix for the returned tensor (optional).\n\n Returns:\n A 1-D integer Tensor with shape `[nrows]`.\n The returned tensor is nonnegative, and is sorted in ascending order.\n\n #### Example:\n\n >>> rt = tf.ragged.constant([[3, 1, 4, 1], [], [5, 9, 2], [6], []])\n >>> print(rt.values)\n tf.Tensor([3 1 4 1 5 9 2 6], shape=(8,), dtype=int32)\n >>> print(rt.row_limits()) # indices of row limits in rt.values\n tf.Tensor([4 4 7 8 8], shape=(5,), dtype=int64)\n\n \"\"\"\n with ops.name_scope(name, \"RaggedRowLimits\", [self]):\n return self._row_partition.row_limits()\n\n def row_lengths(self, axis=1, name=None):\n \"\"\"Returns the lengths of the rows in this ragged tensor.\n\n `rt.row_lengths()[i]` indicates the number of values in the\n `i`th row of `rt`.\n\n Args:\n axis: An integer constant indicating the axis whose row lengths should be\n returned.\n name: A name prefix for the returned tensor (optional).\n\n Returns:\n A potentially ragged integer Tensor with shape `self.shape[:axis]`.\n\n Raises:\n ValueError: If `axis` is out of bounds.\n\n #### Example:\n\n >>> rt = tf.ragged.constant(\n ... [[[3, 1, 4], [1]], [], [[5, 9], [2]], [[6]], []])\n >>> print(rt.row_lengths()) # lengths of rows in rt\n tf.Tensor([2 0 2 1 0], shape=(5,), dtype=int64)\n >>> print(rt.row_lengths(axis=2)) # lengths of axis=2 rows.\n <tf.RaggedTensor [[3, 1], [], [2, 1], [1], []]>\n\n \"\"\"\n if axis == 0:\n return self._row_partition.nrows()\n\n if axis == 1:\n return self._row_partition.row_lengths()\n\n with ops.name_scope(name, \"RaggedRowLengths\", [self]):\n axis = array_ops.get_positive_axis(\n axis, self.shape.rank, ndims_name=\"rank(self)\")\n if axis == 0:\n return self.nrows()\n elif axis == 1:\n splits = self.row_splits\n return splits[1:] - splits[:-1]\n elif isinstance(self.values, RaggedTensor):\n return self.with_values(self.values.row_lengths(axis - 1))\n else:\n shape = array_ops.shape(self.values, out_type=self._row_partition.dtype)\n return self.with_values(\n array_ops.ones(shape[:axis - 1], self._row_partition.dtype) *\n shape[axis - 1])\n\n def nested_row_lengths(self, name=None):\n \"\"\"Returns a tuple containing the row_lengths for all ragged dimensions.\n\n `rt.nested_row_lengths()` is a tuple containing the `row_lengths` tensors\n for all ragged dimensions in `rt`, ordered from outermost to innermost.\n\n Args:\n name: A name prefix for the returned tensors (optional).\n\n Returns:\n A `tuple` of 1-D integer `Tensors`. The length of the tuple is equal to\n `self.ragged_rank`.\n \"\"\"\n with ops.name_scope(name, \"RaggedNestedRowLengths\", [self]):\n rt_nested_row_lengths = []\n rt = self\n while isinstance(rt, RaggedTensor):\n rt_nested_row_lengths.append(rt.row_lengths())\n rt = rt.values\n return tuple(rt_nested_row_lengths)\n\n def bounding_shape(self, axis=None, name=None, out_type=None):\n \"\"\"Returns the tight bounding box shape for this `RaggedTensor`.\n\n Args:\n axis: An integer scalar or vector indicating which axes to return the\n bounding box for. If not specified, then the full bounding box is\n returned.\n name: A name prefix for the returned tensor (optional).\n out_type: `dtype` for the returned tensor. Defaults to\n `self.row_splits.dtype`.\n\n Returns:\n An integer `Tensor` (`dtype=self.row_splits.dtype`). If `axis` is not\n specified, then `output` is a vector with\n `output.shape=[self.shape.ndims]`. If `axis` is a scalar, then the\n `output` is a scalar. If `axis` is a vector, then `output` is a vector,\n where `output[i]` is the bounding size for dimension `axis[i]`.\n\n #### Example:\n\n >>> rt = tf.ragged.constant([[1, 2, 3, 4], [5], [], [6, 7, 8, 9], [10]])\n >>> rt.bounding_shape().numpy()\n array([5, 4])\n\n \"\"\"\n if out_type is None:\n out_type = self._row_partition.dtype\n else:\n out_type = dtypes.as_dtype(out_type)\n with ops.name_scope(name, \"RaggedBoundingBox\", [self, axis]):\n nested_splits = self.nested_row_splits\n rt_flat_values = self.flat_values\n\n # Optimized special cases for when axis=0 or axis=1:\n if isinstance(axis, int):\n if axis == 0:\n return array_ops.shape(nested_splits[0], out_type=out_type)[0] - 1\n elif axis == 1:\n result = math_ops.maximum(math_ops.reduce_max(self.row_lengths()), 0)\n if out_type != self._row_partition.dtype:\n result = math_ops.cast(result, out_type)\n return result\n\n splits_shape = array_ops.shape(self.row_splits, out_type=out_type)\n flat_values_shape = array_ops.shape(rt_flat_values, out_type=out_type)\n\n ragged_dimensions = [splits_shape[0] - 1] + [\n math_ops.maximum(math_ops.reduce_max(splits[1:] - splits[:-1]), 0)\n for splits in nested_splits\n ]\n inner_dimensions = flat_values_shape[1:]\n\n if out_type != self._row_partition.dtype:\n ragged_dimensions = [\n math_ops.cast(d, out_type) for d in ragged_dimensions\n ]\n bbox = array_ops.concat(\n [array_ops.stack(ragged_dimensions), inner_dimensions], axis=0)\n return bbox if axis is None else array_ops.gather(bbox, axis)\n\n #=============================================================================\n # Transformation\n #=============================================================================\n\n def with_values(self, new_values):\n \"\"\"Returns a copy of `self` with `values` replaced by `new_value`.\n\n Preserves cached row-partitioning tensors such as `self.cached_nrows` and\n `self.cached_value_rowids` if they have values.\n\n Args:\n new_values: Potentially ragged tensor to use as the `values` for the\n returned `RaggedTensor`. Must have `rank > 0`, and must have the same\n number of rows as `self.values`.\n\n Returns:\n A `RaggedTensor`. `result.rank = 1 + new_values.rank`.\n `result.ragged_rank = 1 + new_values.ragged_rank`\n \"\"\"\n new_values = _convert_to_ragged_tensor_values(new_values)\n new_values.shape.with_rank_at_least(1)\n self.values.shape[:1].assert_is_compatible_with(new_values.shape[:1])\n if (isinstance(new_values, RaggedTensor) and\n self._row_partition.dtype != new_values.row_splits.dtype):\n if not ragged_config.auto_cast_partition_dtype():\n raise ValueError(\"self and new_values have mismatched row_splits \"\n \"dtypes; use RaggedTensor.with_row_splits_dtype() to \"\n \"convert them to compatible dtypes.\")\n new_values = new_values.with_row_splits_dtype(dtypes.int64)\n return self.with_row_splits_dtype(dtypes.int64).with_values(new_values)\n return RaggedTensor(\n values=new_values, row_partition=self._row_partition, internal=True)\n\n def with_flat_values(self, new_values):\n \"\"\"Returns a copy of `self` with `flat_values` replaced by `new_value`.\n\n Preserves cached row-partitioning tensors such as `self.cached_nrows` and\n `self.cached_value_rowids` if they have values.\n\n Args:\n new_values: Potentially ragged tensor that should replace\n `self.flat_values`. Must have `rank > 0`, and must have the same number\n of rows as `self.flat_values`.\n\n Returns:\n A `RaggedTensor`.\n `result.rank = self.ragged_rank + new_values.rank`.\n `result.ragged_rank = self.ragged_rank + new_values.ragged_rank`.\n \"\"\"\n if isinstance(self._values, RaggedTensor):\n return self.with_values(self.values.with_flat_values(new_values))\n else:\n new_values = _convert_to_ragged_tensor_values(new_values)\n return self.with_values(new_values)\n\n def with_row_splits_dtype(self, dtype):\n \"\"\"Returns a copy of this RaggedTensor with the given `row_splits` dtype.\n\n For RaggedTensors with multiple ragged dimensions, the `row_splits` for all\n nested `RaggedTensor` objects are cast to the given dtype.\n\n Args:\n dtype: The dtype for `row_splits`. One of `tf.int32` or `tf.int64`.\n\n Returns:\n A copy of this RaggedTensor, with the `row_splits` cast to the given\n type.\n \"\"\"\n dtype = dtypes.as_dtype(dtype)\n if dtype not in (dtypes.int32, dtypes.int64):\n raise ValueError(f\"Argument `row_splits` dtype must be int32 or int64. \"\n f\"Received {dtype}.\")\n if self._row_partition.dtype == dtype:\n return self\n current_values = self._values\n if isinstance(current_values, RaggedTensor):\n return RaggedTensor(\n values=current_values.with_row_splits_dtype(dtype),\n row_partition=self._row_partition.with_row_splits_dtype(dtype),\n internal=True)\n else:\n return RaggedTensor(\n values=current_values,\n row_partition=self._row_partition.with_row_splits_dtype(dtype),\n internal=True)\n\n def merge_dims(self, outer_axis, inner_axis):\n \"\"\"Merges outer_axis...inner_axis into a single dimension.\n\n Returns a copy of this RaggedTensor with the specified range of dimensions\n flattened into a single dimension, with elements in row-major order.\n\n #### Examples:\n\n >>> rt = tf.ragged.constant([[[1, 2], [3]], [[4, 5, 6]]])\n >>> print(rt.merge_dims(0, 1))\n <tf.RaggedTensor [[1, 2], [3], [4, 5, 6]]>\n >>> print(rt.merge_dims(1, 2))\n <tf.RaggedTensor [[1, 2, 3], [4, 5, 6]]>\n >>> print(rt.merge_dims(0, 2))\n tf.Tensor([1 2 3 4 5 6], shape=(6,), dtype=int32)\n\n To mimic the behavior of `np.flatten` (which flattens all dimensions), use\n `rt.merge_dims(0, -1). To mimic the behavior of `tf.layers.Flatten` (which\n flattens all dimensions except the outermost batch dimension), use\n `rt.merge_dims(1, -1)`.\n\n Args:\n outer_axis: `int`: The first dimension in the range of dimensions to\n merge. May be negative if `self.shape.rank` is statically known.\n inner_axis: `int`: The last dimension in the range of dimensions to merge.\n May be negative if `self.shape.rank` is statically known.\n\n Returns:\n A copy of this tensor, with the specified dimensions merged into a\n single dimension. The shape of the returned tensor will be\n `self.shape[:outer_axis] + [N] + self.shape[inner_axis + 1:]`, where `N`\n is the total number of slices in the merged dimensions.\n \"\"\"\n outer_axis = array_ops.get_positive_axis(\n outer_axis,\n self.shape.rank,\n axis_name=\"outer_axis\",\n ndims_name=\"rank(self)\")\n inner_axis = array_ops.get_positive_axis(\n inner_axis,\n self.shape.rank,\n axis_name=\"inner_axis\",\n ndims_name=\"rank(self)\")\n if not outer_axis <= inner_axis:\n raise ValueError(f\"Expected outer_axis ({outer_axis}) to be less than or \"\n f\"equal to inner_axis ({inner_axis}).\")\n return merge_dims(self, outer_axis, inner_axis)\n\n def _set_shape(self, shape):\n \"\"\"Updates the static shape of `self` to be `shape`.\n\n * If a dimension of `shape` has known rank, and is encoded via\n partitioning, then this will update the corresponding partition to\n define `_uniform_row_length` and `nrows`.\n * If a dimension of `shape` has a known rank, and is encoded as one\n of the `flat_values` dimensions, then `flat_values.set_shape()` will\n be used to update its shape.\n\n Warning: Using this method to assert an incorrect shape for a RaggedTensor\n (i.e., one that's not consistent with its actual shape) can cause\n segmentation faults and very difficult-to-diagnose behavior. Only use this\n method if you are certain that the shape is correct.\n\n Args:\n shape: `tf.TensorShape` specifying the shape for this `RaggedTensor`.\n \"\"\"\n # TODO(edloper): Refactor this to not directly access private members\n # of RowPartition.\n # pylint: disable=protected-access\n\n shape = tensor_shape.as_shape(shape)\n if shape.rank is None:\n return # Nothing to do.\n\n shape = shape.as_list()\n\n # Outermost dimension\n if shape[0] is not None:\n self._row_partition._row_splits.set_shape(shape[0] + 1)\n\n # Partitioned dimensions\n dtype = self._row_partition.dtype\n for i, partition in enumerate(self._nested_row_partitions):\n size = shape[i + 1]\n if size is not None:\n if partition._uniform_row_length is not None:\n old_row_length = tensor_util.constant_value(\n partition._uniform_row_length)\n if old_row_length is not None:\n if size == old_row_length:\n continue # already have shape info for this axis.\n else:\n raise ValueError(f\"Inconsistent size for axis {i + 1}: \"\n f\"{old_row_length} vs. {size}.\")\n partition._uniform_row_length = ops.convert_to_tensor(size, dtype)\n if partition._nrows is None:\n partition._nrows = array_ops.size(\n partition._row_splits, out_type=dtype) - 1\n\n # self.flat_values could be a CompositeTensor and doesn't have set_shape.\n if hasattr(self.flat_values, \"set_shape\"):\n # Inner dimensions\n flat_shape = tensor_shape.as_shape([None] + shape[self.ragged_rank + 1:])\n self.flat_values.set_shape(flat_shape)\n\n #=============================================================================\n # Tensor Type Conversions\n #=============================================================================\n\n @classmethod\n @dispatch.add_dispatch_support\n def from_tensor(cls,\n tensor,\n lengths=None,\n padding=None,\n ragged_rank=1,\n name=None,\n row_splits_dtype=dtypes.int64):\n \"\"\"Converts a `tf.Tensor` into a `RaggedTensor`.\n\n The set of absent/default values may be specified using a vector of lengths\n or a padding value (but not both). If `lengths` is specified, then the\n output tensor will satisfy `output[row] = tensor[row][:lengths[row]]`. If\n 'lengths' is a list of lists or tuple of lists, those lists will be used\n as nested row lengths. If `padding` is specified, then any row *suffix*\n consisting entirely of `padding` will be excluded from the returned\n `RaggedTensor`. If neither `lengths` nor `padding` is specified, then the\n returned `RaggedTensor` will have no absent/default values.\n\n Examples:\n\n >>> dt = tf.constant([[5, 7, 0], [0, 3, 0], [6, 0, 0]])\n >>> tf.RaggedTensor.from_tensor(dt)\n <tf.RaggedTensor [[5, 7, 0], [0, 3, 0], [6, 0, 0]]>\n >>> tf.RaggedTensor.from_tensor(dt, lengths=[1, 0, 3])\n <tf.RaggedTensor [[5], [], [6, 0, 0]]>\n\n >>> tf.RaggedTensor.from_tensor(dt, padding=0)\n <tf.RaggedTensor [[5, 7], [0, 3], [6]]>\n\n >>> dt = tf.constant([[[5, 0], [7, 0], [0, 0]],\n ... [[0, 0], [3, 0], [0, 0]],\n ... [[6, 0], [0, 0], [0, 0]]])\n >>> tf.RaggedTensor.from_tensor(dt, lengths=([2, 0, 3], [1, 1, 2, 0, 1]))\n <tf.RaggedTensor [[[5], [7]], [], [[6, 0], [], [0]]]>\n\n Args:\n tensor: The `Tensor` to convert. Must have rank `ragged_rank + 1` or\n higher.\n lengths: An optional set of row lengths, specified using a 1-D integer\n `Tensor` whose length is equal to `tensor.shape[0]` (the number of rows\n in `tensor`). If specified, then `output[row]` will contain\n `tensor[row][:lengths[row]]`. Negative lengths are treated as zero. You\n may optionally pass a list or tuple of lengths to this argument, which\n will be used as nested row lengths to construct a ragged tensor with\n multiple ragged dimensions.\n padding: An optional padding value. If specified, then any row suffix\n consisting entirely of `padding` will be excluded from the returned\n RaggedTensor. `padding` is a `Tensor` with the same dtype as `tensor`\n and with `shape=tensor.shape[ragged_rank + 1:]`.\n ragged_rank: Integer specifying the ragged rank for the returned\n `RaggedTensor`. Must be greater than zero.\n name: A name prefix for the returned tensors (optional).\n row_splits_dtype: `dtype` for the returned `RaggedTensor`'s `row_splits`\n tensor. One of `tf.int32` or `tf.int64`.\n\n Returns:\n A `RaggedTensor` with the specified `ragged_rank`. The shape of the\n returned ragged tensor is compatible with the shape of `tensor`.\n Raises:\n ValueError: If both `lengths` and `padding` are specified.\n \"\"\"\n row_splits_dtype = dtypes.as_dtype(row_splits_dtype)\n if lengths is not None and padding is not None:\n raise ValueError(\"Specify argument `lengths` or `padding`, but not both.\")\n if not isinstance(ragged_rank, int):\n raise TypeError(f\"Argument `ragged_rank` must be an int. \"\n f\"Received {ragged_rank}.\")\n if ragged_rank <= 0:\n raise ValueError(f\"Argument `ragged_rank` must be greater than 0. \"\n f\"Received {ragged_rank}.\")\n\n with ops.name_scope(name, \"RaggedFromTensor\", [tensor, lengths, padding]):\n tensor = ops.convert_to_tensor(tensor, name=\"tensor\")\n tensor.shape.with_rank_at_least(ragged_rank + 1)\n input_shape = array_ops.shape(tensor, out_type=row_splits_dtype)\n ncols = input_shape[1]\n\n # Handle nested row lengths.\n if (lengths is not None and isinstance(lengths, (list, tuple)) and\n len(lengths) and not isinstance(lengths[0], (int, float))):\n if ragged_rank not in (1, len(lengths)):\n # Note: we accept `ragged_rank=1` here because it's the default value;\n # i.e., if the user passes in a tuple of lengths, but doesn't specify\n # ragged_rank, then we should use that tuple to determine ragged_rank.\n # We only want to complain if they pass in an explicit ragged_rank\n # that doesn't match len(lengths).\n raise ValueError(f\"If Argument `lengths` is a tuple of row_lengths, \"\n f\"argument `ragged_rank` must be \"\n f\"len(lengths): {len(lengths)}. Received \"\n f\"ragged_rank: {ragged_rank}.\")\n # Rather than reconstructing the tensor mask directly, we can\n # recreate it as a boolean RaggedTensor, then densify that and use\n # that as the mask to clear out the unused data in the passed tensor.\n tensor.shape.with_rank_at_least(len(lengths) + 1)\n num_tokens = math_ops.reduce_sum(lengths[-1])\n ones_mask = array_ops.ones([num_tokens], dtype=dtypes.bool)\n ragged_mask = cls.from_nested_row_lengths(\n ones_mask, lengths, validate=False)\n dense_ragged_mask = ragged_mask.to_tensor(default_value=False)\n masked_data = array_ops.boolean_mask(tensor, dense_ragged_mask)\n return cls.from_nested_row_lengths(masked_data, lengths, validate=False)\n\n # Handle ragged_rank>1 via recursion:\n # If the output should have multiple ragged dimensions, then first\n # flatten the tensor to eliminate all but the last ragged dimension,\n # and recursively convert that flattened tensor. Then add on the splits\n # for the dimensions that we flattened out.\n if ragged_rank > 1:\n if tensor.shape.is_fully_defined():\n input_shape = tensor.shape.as_list()\n # The total number of elements in each dimension. E.g., if\n # input_shape=[3, 4, 5, 6], then dim[2] has 3*4*5 elements in total.\n dim_size = np.cumprod(input_shape)\n new_shape = [dim_size[ragged_rank - 1]] + input_shape[ragged_rank:]\n else:\n dim_size = math_ops.cumprod(input_shape)\n new_shape = array_ops.concat(\n [[dim_size[ragged_rank - 1]], input_shape[ragged_rank:]], axis=0)\n flattened = array_ops.reshape(tensor, new_shape)\n result = cls.from_tensor(\n flattened, lengths, padding, row_splits_dtype=row_splits_dtype)\n\n for axis in range(ragged_rank - 1, 0, -1):\n dim_len = tensor_shape.dimension_at_index(tensor.shape, axis).value\n if dim_len is None:\n dim_len = input_shape[axis]\n else:\n dim_len = constant_op.constant(dim_len, row_splits_dtype)\n result = RaggedTensor.from_uniform_row_length(\n values=result,\n uniform_row_length=dim_len,\n nrows=dim_size[axis - 1],\n validate=False)\n return result\n\n # If padding was specified, then use it to find row lengths.\n if padding is not None:\n padding = ops.convert_to_tensor(\n padding, name=\"padding\", dtype=tensor.dtype)\n padding.shape.assert_is_compatible_with(tensor.shape[2:])\n\n # Find places where the padding is equal to the tensor. (This will\n # broadcast `padding` across the outermost 2 dimensions of `tensor`,\n # so `has_default_value.shape = tensor.shape`.)\n has_default_value = math_ops.equal(padding, tensor)\n\n # If the padding isn't a scalar, then require that all values in the\n # padding match each item in the tensor. After this block of code,\n # `has_default.shape = tensor.shape[:2]`. (Unfortunately, we can't just\n # use reduce_all for both cases, becaue when you pass an empty `axis`\n # list to reduce_all, it reduces all axes; but we want it to reduce no\n # axes -- i.e., to be a no-op.)\n tensor_rank = array_ops.rank(tensor)\n reduce_axis = math_ops.range(2, tensor_rank)\n has_default = control_flow_ops.cond(\n tensor_rank > 2,\n lambda: math_ops.reduce_all(has_default_value, axis=reduce_axis),\n lambda: has_default_value)\n has_default.set_shape(tensor_shape.TensorShape([None, None]))\n has_default.set_shape(tensor.shape[:2])\n\n # Use has_default to find the length of each row: for each\n # non-default item in a row, calculate the length that the row needs to\n # have to include that item; and then take the max of those values\n # (across each row).\n has_nondefault = math_ops.logical_not(has_default)\n has_nondefault = math_ops.cast(has_nondefault, row_splits_dtype)\n length_for_nondefault_value = (\n has_nondefault *\n array_ops.expand_dims(math_ops.range(1, ncols + 1), 0))\n lengths = math_ops.reduce_max(length_for_nondefault_value, axis=1)\n\n if lengths is not None:\n # If we have lengths (either directly supplied, or computed from\n # paddings), then use those to construct splits; and then use masking\n # to get the corresponding values.\n lengths = ragged_util.convert_to_int_tensor(lengths, \"lengths\",\n row_splits_dtype)\n lengths.shape.assert_has_rank(1)\n lengths = math_ops.minimum(lengths, ncols)\n lengths = math_ops.maximum(lengths, 0)\n limits = math_ops.cumsum(lengths)\n splits = array_ops.concat(\n [array_ops.zeros([1], row_splits_dtype), limits], axis=0)\n mask = array_ops.sequence_mask(lengths, maxlen=ncols)\n values = array_ops.boolean_mask(tensor, mask)\n return cls.from_row_splits(values, splits, validate=False)\n\n # If neither padding nor lengths were specified, then create a splits\n # vector that contains no default values, and reshape the input tensor\n # to form the values for the RaggedTensor.\n values_shape = array_ops.concat(\n [[input_shape[0] * input_shape[1]], input_shape[2:]], axis=0)\n values = array_ops.reshape(tensor, values_shape)\n const_nrows = tensor_shape.dimension_at_index(tensor.shape, 0).value\n const_ncols = tensor_shape.dimension_at_index(tensor.shape, 1).value\n if const_nrows is not None:\n nrows = constant_op.constant(const_nrows, row_splits_dtype)\n else:\n nrows = input_shape[0]\n if const_ncols is not None:\n ncols = constant_op.constant(const_ncols, row_splits_dtype)\n else:\n ncols = input_shape[1]\n return RaggedTensor.from_uniform_row_length(\n values=values, uniform_row_length=ncols, nrows=nrows, validate=False)\n\n def to_tensor(self, default_value=None, name=None, shape=None):\n \"\"\"Converts this `RaggedTensor` into a `tf.Tensor`.\n\n If `shape` is specified, then the result is padded and/or truncated to\n the specified shape.\n\n Examples:\n\n >>> rt = tf.ragged.constant([[9, 8, 7], [], [6, 5], [4]])\n >>> print(rt.to_tensor())\n tf.Tensor(\n [[9 8 7] [0 0 0] [6 5 0] [4 0 0]], shape=(4, 3), dtype=int32)\n >>> print(rt.to_tensor(shape=[5, 2]))\n tf.Tensor(\n [[9 8] [0 0] [6 5] [4 0] [0 0]], shape=(5, 2), dtype=int32)\n\n Args:\n default_value: Value to set for indices not specified in `self`. Defaults\n to zero. `default_value` must be broadcastable to\n `self.shape[self.ragged_rank + 1:]`.\n name: A name prefix for the returned tensors (optional).\n shape: The shape of the resulting dense tensor. In particular,\n `result.shape[i]` is `shape[i]` (if `shape[i]` is not None), or\n `self.bounding_shape(i)` (otherwise).`shape.rank` must be `None` or\n equal to `self.rank`.\n\n Returns:\n A `Tensor` with shape `ragged.bounding_shape(self)` and the\n values specified by the non-empty values in `self`. Empty values are\n assigned `default_value`.\n \"\"\"\n with ops.name_scope(name, \"RaggedToTensor\", [self, default_value, shape]):\n if default_value is not None:\n default_value = ops.convert_to_tensor(\n default_value, name=\"default_value\", dtype=self.dtype)\n type_tensor_pairs = _get_row_partition_type_tensor_pairs(self)\n row_partition_types = [x[0] for x in type_tensor_pairs]\n row_partition_tensors = [x[1] for x in type_tensor_pairs]\n if default_value is None:\n default_value = array_ops.zeros((), self.dtype)\n\n if (isinstance(shape, (list, tuple)) and\n any(isinstance(v, ops.Tensor) for v in shape) and\n all(isinstance(v, (int, ops.Tensor)) for v in shape)):\n shape = array_ops.stack(shape)\n\n shape_tensor = _shape_as_tensor(shape, row_partition_tensors[0].dtype)\n tensor = gen_ragged_conversion_ops.ragged_tensor_to_tensor(\n shape=shape_tensor,\n values=self.flat_values,\n default_value=default_value,\n row_partition_types=row_partition_types,\n row_partition_tensors=row_partition_tensors)\n\n ragged_shape = self.shape\n\n if ragged_shape.rank is not None and not isinstance(shape, ops.Tensor):\n # Merged self.shape and shape, favoring the second one as it takes\n # into account potential padding added to the output.\n shape = tensor_shape.as_shape(shape)\n if shape.rank is None:\n output_shape = ragged_shape\n else:\n # At this point we can assume that hshape.rank == ragged_shape.rank\n # because otherwise it would have failed earlier.\n output_shape = [\n s1 if s1 is not None else s2\n for (s1, s2) in zip(shape.as_list(), ragged_shape.as_list())\n ]\n tensor.set_shape(output_shape)\n\n return tensor\n\n @classmethod\n @dispatch.add_dispatch_support\n def from_sparse(cls, st_input, name=None, row_splits_dtype=dtypes.int64):\n \"\"\"Converts a 2D `tf.sparse.SparseTensor` to a `RaggedTensor`.\n\n Each row of the `output` `RaggedTensor` will contain the explicit values\n from the same row in `st_input`. `st_input` must be ragged-right. If not\n it is not ragged-right, then an error will be generated.\n\n Example:\n\n >>> indices = [[0, 0], [0, 1], [0, 2], [1, 0], [3, 0]]\n >>> st = tf.sparse.SparseTensor(indices=indices,\n ... values=[1, 2, 3, 4, 5],\n ... dense_shape=[4, 3])\n >>> tf.RaggedTensor.from_sparse(st).to_list()\n [[1, 2, 3], [4], [], [5]]\n\n Currently, only two-dimensional `SparseTensors` are supported.\n\n Args:\n st_input: The sparse tensor to convert. Must have rank 2.\n name: A name prefix for the returned tensors (optional).\n row_splits_dtype: `dtype` for the returned `RaggedTensor`'s `row_splits`\n tensor. One of `tf.int32` or `tf.int64`.\n\n Returns:\n A `RaggedTensor` with the same values as `st_input`.\n `output.ragged_rank = rank(st_input) - 1`.\n `output.shape = [st_input.dense_shape[0], None]`.\n Raises:\n ValueError: If the number of dimensions in `st_input` is not known\n statically, or is not two.\n \"\"\"\n row_splits_dtype = dtypes.as_dtype(row_splits_dtype)\n if not sparse_tensor.is_sparse(st_input):\n raise TypeError(f\"Argument `st_input` must be of type SparseTensor, but \"\n f\"is of type {type(st_input).__name__}.\")\n with ops.name_scope(name, \"RaggedFromSparse\", [st_input]):\n st_input = sparse_tensor.convert_to_tensor_or_sparse_tensor(\n st_input, name=\"st_input\")\n\n if st_input.dense_shape.shape.ndims is None:\n static_rank_from_dense_shape = None\n else:\n static_rank_from_dense_shape = st_input.dense_shape.shape.dims[0].value\n\n if st_input.indices.shape.ndims is None:\n static_rank_from_indices = None\n else:\n static_rank_from_indices = st_input.indices.shape.dims[1].value\n\n if static_rank_from_dense_shape != 2 and static_rank_from_indices != 2:\n raise ValueError(\"rank(st_input) must be 2.\")\n\n with ops.control_dependencies(\n _assert_sparse_indices_are_ragged_right(st_input.indices)):\n # Treat sparse row indices as segment ids to generate a splits tensor\n # thta we can pair with the sparse tensor values. (Ignore sparse column\n # indices.)\n segment_ids = math_ops.cast(st_input.indices[:, 0], row_splits_dtype)\n num_segments = math_ops.cast(st_input.dense_shape[0], row_splits_dtype)\n return cls.from_value_rowids(\n st_input.values, segment_ids, num_segments, validate=False)\n\n def to_sparse(self, name=None):\n \"\"\"Converts this `RaggedTensor` into a `tf.sparse.SparseTensor`.\n\n Example:\n\n >>> rt = tf.ragged.constant([[1, 2, 3], [4], [], [5, 6]])\n >>> print(rt.to_sparse())\n SparseTensor(indices=tf.Tensor(\n [[0 0] [0 1] [0 2] [1 0] [3 0] [3 1]],\n shape=(6, 2), dtype=int64),\n values=tf.Tensor([1 2 3 4 5 6], shape=(6,), dtype=int32),\n dense_shape=tf.Tensor([4 3], shape=(2,), dtype=int64))\n\n Args:\n name: A name prefix for the returned tensors (optional).\n\n Returns:\n A SparseTensor with the same values as `self`.\n \"\"\"\n with ops.name_scope(name, \"RaggedToSparse\", [self]):\n result = gen_ragged_conversion_ops.ragged_tensor_to_sparse(\n self.nested_row_splits, self.flat_values, name=name)\n return sparse_tensor.SparseTensor(result.sparse_indices,\n result.sparse_values,\n result.sparse_dense_shape)\n\n @classmethod\n def _from_variant(cls,\n variant,\n dtype,\n output_ragged_rank,\n input_ragged_rank=None,\n row_splits_dtype=dtypes.int64,\n name=None):\n \"\"\"Converts a `variant` Tensor into a `RaggedTensor`.\n\n The input `variant` could be a scalar, meaning it encodes a single\n `RaggedTensor` with ragged_rank `output_ragged_rank`. Alternatively it could\n have an arbitrary rank, in which case each element is decoded into a\n `RaggedTensor` with ragged_rank `input_ragged_rank` and these are then\n stacked according to the input shape to output a single `RaggedTensor`\n with ragged_rank `output_ragged_rank`. If `input_ragged_rank` is not\n provided, it is inferred dynamically as `output_ragged_rank` -\n `rank(variant)`. If `input_ragged_rank` is provided, the following must be\n true: `output_ragged_rank` = `input_ragged_rank` + `rank(variant)`.\n\n Example:\n\n >>> rt = tf.ragged.constant([[0], [1, 2]])\n >>> et = rt._to_variant()\n >>> stacked_et = tf.stack([et, et])\n >>> tf.RaggedTensor._from_variant( # scalar input.\n ... et, dtype=tf.int32, output_ragged_rank=1).to_list()\n [[0], [1, 2]]\n >>> tf.RaggedTensor._from_variant( # batched input.\n ... stacked_et, dtype=tf.int32, output_ragged_rank=2).to_list()\n [[[0], [1, 2]], [[0], [1, 2]]]\n\n Args:\n variant: A `variant` Tensor representing an encoded (possibly\n nested-batched) `RaggedTensor`.\n dtype: The dtype of the encoded `RaggedTensor`.\n output_ragged_rank: The expected ragged rank of the output `RaggedTensor`.\n input_ragged_rank: The ragged rank of each encoded `RaggedTensor`. This is\n optional and inferred dynamically if not provided.\n row_splits_dtype: `dtype` for the RaggedTensor's `row_splits` tensor. One\n of `tf.int32` or `tf.int64`.\n name: A name prefix for the returned tensors (optional).\n\n Returns:\n A `RaggedTensor` of dtype `dtype` and ragged rank `output_ragged_rank`.\n\n Raises:\n ValueError: If the input rank is known, `input_ragged_rank` is provided\n and `output_ragged_rank` = `input_ragged_rank` + `rank(variant)` does\n not hold.\n \"\"\"\n variant = ops.convert_to_tensor(\n variant, name=\"variant\", dtype=dtypes.variant)\n if (variant.shape.ndims is not None and input_ragged_rank is not None and\n output_ragged_rank != input_ragged_rank + variant.shape.ndims):\n raise ValueError(\n f\"Argument `output_ragged_rank` ({output_ragged_rank}) must be equal \"\n f\"to `input_ragged_rank` + `variant.shape.ndims` \"\n f\"({input_ragged_rank} + {variant.shape.ndims}).\")\n input_ragged_rank = -1 if input_ragged_rank is None else input_ragged_rank\n with ops.name_scope(\n name, \"RaggedFromVariant\",\n [variant, dtype, input_ragged_rank, output_ragged_rank]):\n result = gen_ragged_conversion_ops.ragged_tensor_from_variant(\n variant, input_ragged_rank, output_ragged_rank, dtype,\n row_splits_dtype, name)\n return cls.from_nested_row_splits(\n result.output_dense_values,\n result.output_nested_splits,\n validate=False)\n\n def _to_variant(self, batched_input=False, name=None):\n \"\"\"Converts this `RaggedTensor` into a `variant` Tensor.\n\n If `batched_input` is `True`, then the `RaggedTensor` is unbatched along the\n zero-th dimension, each component `RaggedTensor` is encoded into a scalar\n `variant` Tensor, and these are stacked to return a 1-D `variant` Tensor.\n If `batched_input` is `False`, then the `RaggedTensor` is encoded as is and\n a scalar `variant` Tensor is returned.\n\n Example:\n >>> rt = tf.ragged.constant([[[0]], [[1]], [[2]]])\n >>> rt._to_variant().shape.as_list()\n []\n >>> rt._to_variant(batched_input=True).shape.as_list()\n [3]\n\n Args:\n batched_input: If `True`, the `RaggedTensor` is unbatched and converted to\n a `variant` vector. Set to `False` by default.\n name: A name prefix for the returned tensors (optional).\n\n Returns:\n A `variant` Tensor that encodes this `RaggedTensor`.\n \"\"\"\n with ops.name_scope(name, \"RaggedToVariant\", [self, batched_input]):\n return gen_ragged_conversion_ops.ragged_tensor_to_variant(\n self.nested_row_splits, self.flat_values, batched_input, name)\n\n #=============================================================================\n # String Encoding\n #=============================================================================\n def __repr__(self):\n if self._is_eager():\n # The np.array2string in _formatter provides a separator argument, but\n # doesn't handle recursive calls correctly. The np.printoptions handles\n # recursive calls correctly, but doesn't provide a separator argument.\n # Combines them together to print elements separated by comma, while\n # avoiding the redundant array prefixes and dtypes. For example,\n # the value of tf.ragged.constant([[1, 2], [3, 4]]) will look like\n #\n # [[1, 2],\n # [3, 4]]\n with np.printoptions(formatter={\"all\": _formatter}):\n value_text = _formatter(self.numpy())\n return f\"<tf.RaggedTensor {value_text}>\"\n else:\n return \"tf.RaggedTensor(values=%s, row_splits=%s)\" % (self.values,\n self.row_splits)\n\n #=============================================================================\n # Eager Execution Mode\n #=============================================================================\n\n def numpy(self):\n \"\"\"Returns a numpy `array` with the values for this `RaggedTensor`.\n\n Requires that this `RaggedTensor` was constructed in eager execution mode.\n\n Ragged dimensions are encoded using numpy `arrays` with `dtype=object` and\n `rank=1`, where each element is a single row.\n\n #### Examples\n\n In the following example, the value returned by `RaggedTensor.numpy()`\n contains three numpy `array` objects: one for each row (with `rank=1` and\n `dtype=int64`), and one to combine them (with `rank=1` and `dtype=object`):\n\n >>> tf.ragged.constant([[1, 2, 3], [4, 5]], dtype=tf.int64).numpy()\n array([array([1, 2, 3]), array([4, 5])], dtype=object)\n\n Uniform dimensions are encoded using multidimensional numpy `array`s. In\n the following example, the value returned by `RaggedTensor.numpy()` contains\n a single numpy `array` object, with `rank=2` and `dtype=int64`:\n\n >>> tf.ragged.constant([[1, 2, 3], [4, 5, 6]], dtype=tf.int64).numpy()\n array([[1, 2, 3], [4, 5, 6]])\n\n Returns:\n A numpy `array`.\n \"\"\"\n if not self._is_eager():\n raise ValueError(\"RaggedTensor.numpy() is only supported in eager mode.\")\n values = self.values.numpy()\n splits = self.row_splits.numpy()\n rows = [values[splits[i]:splits[i + 1]] for i in range(len(splits) - 1)]\n if not rows:\n return np.zeros((0, 0) + values.shape[1:], dtype=values.dtype)\n # Note: if `rows` have ragged lengths, then they will be stored in a\n # np.ndarray with dtype=object and rank=1. If they have uniform lengths,\n # they will be combined into a single np.ndarray with dtype=row.dtype and\n # rank=row.rank+1.\n #\n # Manually set dtype as numpy now complains when given ragged rows.\n has_variable_length_rows = any(len(row) != len(rows[0]) for row in rows)\n dtype = np.object_ if has_variable_length_rows else None\n return np.array(rows, dtype=dtype)\n\n def to_list(self):\n \"\"\"Returns a nested Python `list` with the values for this `RaggedTensor`.\n\n Requires that `rt` was constructed in eager execution mode.\n\n Returns:\n A nested Python `list`.\n \"\"\"\n if not isinstance(self.row_splits, ops.EagerTensor):\n raise ValueError(\"to_list can only be used in eager mode.\")\n row_splits = self.row_splits.numpy().tolist()\n values = self.values\n\n if isinstance(values, RaggedTensor):\n return [\n values[row_splits[i]:row_splits[i + 1]].to_list()\n for i in range(len(row_splits) - 1)\n ]\n else:\n # Convert values to a Python list.\n if hasattr(values, \"numpy\"):\n values_as_list = values.numpy().tolist()\n elif hasattr(values, \"to_list\"):\n values_as_list = values.to_list()\n else:\n raise ValueError(\"values must be convertible to a list\")\n\n return [\n values_as_list[row_splits[i]:row_splits[i + 1]]\n for i in range(len(row_splits) - 1)\n ]\n\n def _eager_value(self):\n \"\"\"Returns a RaggedTensorValue for self. Requires self._is_eager()=true.\"\"\"\n value = self.flat_values.numpy()\n for row_splits in reversed(self.nested_row_splits):\n value = ragged_tensor_value.RaggedTensorValue(value, row_splits.numpy())\n return value\n\n def _is_eager(self):\n \"\"\"Returns True if values & row_splits Tensors are all `EagerTensor`s.\"\"\"\n rt = self\n while isinstance(rt, RaggedTensor):\n if not isinstance(rt.row_splits, ops.EagerTensor):\n return False\n rt = rt.values\n return isinstance(rt, ops.EagerTensor)\n\n #=============================================================================\n # Operators\n #=============================================================================\n # To avoid circular dependencies, we define stub methods for operators here,\n # and then override them when the ragged_operators module is imported.\n\n def _overloaded_operator(name): # pylint: disable=no-self-argument\n\n def stub(*args, **kwargs):\n del args, kwargs\n raise ValueError(\n f\"You must import 'tensorflow.python.ops.ragged.ragged_ops' \"\n f\"before using RaggedTensor.{name}.\")\n\n return stub\n\n __getitem__ = _overloaded_operator(\"__getitem__\")\n __ge__ = _overloaded_operator(\"__ge__\")\n __gt__ = _overloaded_operator(\"__gt__\")\n __le__ = _overloaded_operator(\"__le__\")\n __lt__ = _overloaded_operator(\"__lt__\")\n __and__ = _overloaded_operator(\"__and__\")\n __rand__ = _overloaded_operator(\"__rand__\")\n __invert__ = _overloaded_operator(\"__invert__\")\n __ror__ = _overloaded_operator(\"__ror__\")\n __or__ = _overloaded_operator(\"__or__\")\n __xor__ = _overloaded_operator(\"__xor__\")\n __rxor__ = _overloaded_operator(\"__rxor__\")\n __abs__ = _overloaded_operator(\"__abs__\")\n __add__ = _overloaded_operator(\"__add__\")\n __radd__ = _overloaded_operator(\"__radd__\")\n __div__ = _overloaded_operator(\"__div__\")\n __rdiv__ = _overloaded_operator(\"__rdiv__\")\n __floordiv__ = _overloaded_operator(\"__floordiv__\")\n __rfloordiv__ = _overloaded_operator(\"__rfloordiv__\")\n __mod__ = _overloaded_operator(\"__mod__\")\n __rmod__ = _overloaded_operator(\"__rmod__\")\n __mul__ = _overloaded_operator(\"__mul__\")\n __rmul__ = _overloaded_operator(\"__rmul__\")\n __neg__ = _overloaded_operator(\"__neg__\")\n __pow__ = _overloaded_operator(\"__pow__\")\n __rpow__ = _overloaded_operator(\"__rpow__\")\n __sub__ = _overloaded_operator(\"__sub__\")\n __rsub__ = _overloaded_operator(\"__rsub__\")\n __truediv__ = _overloaded_operator(\"__truediv__\")\n __rtruediv__ = _overloaded_operator(\"__rtruediv__\")\n del _overloaded_operator\n\n #=============================================================================\n # Name Scope\n #=============================================================================\n\n # This private function is used by ops.name_scope to ensure that all of the\n # input tensors for the scope belong to the same graph. Defining this means\n # that you may include `RaggedTensor` objects in the name_scope `values`\n # list.\n def _as_graph_element(self):\n \"\"\"Convert `self` to a graph element.\"\"\"\n values = self.values\n while isinstance(values, RaggedTensor):\n values = values.values\n return values\n\n #=============================================================================\n # Composite Tensor\n #=============================================================================\n\n @property\n def _type_spec(self):\n return RaggedTensorSpec.from_value(self)\n\n def _shape_invariant_to_type_spec(self, shape):\n return RaggedTensorSpec(shape, self.dtype, self.ragged_rank,\n self.row_splits.dtype)\n\n def consumers(self):\n return self._consumers()\n\n\ndef is_ragged(value):\n \"\"\"Returns true if `value` is a ragged tensor or ragged tensor value.\"\"\"\n return isinstance(value,\n (RaggedTensor, ragged_tensor_value.RaggedTensorValue))\n\n\ndef match_row_splits_dtypes(*tensors, **kwargs):\n \"\"\"Return a copy of `tensors` with row_splits all having the same dtype.\n\n Args:\n *tensors: A list of Tensors or RaggedTensors.\n **kwargs: If 'return_dtype=True', then return a tuple (dtype, tensors),\n where `dtype` is the data type used by row-splits, and `tensors` is the\n converted list of `Tensors` and `RaggedTensors`.\n\n Returns:\n The converted list of `Tensors` and `RaggedTensors`.\n \"\"\"\n return_dtype = kwargs.pop(\"return_dtype\", False)\n if kwargs:\n raise ValueError(f\"Unexpected keyword args {kwargs}.\")\n\n has_int32 = False\n has_int64 = False\n for tensor in tensors:\n if isinstance(tensor, RaggedTensor):\n if tensor.row_splits.dtype == dtypes.int32:\n has_int32 = True\n else:\n has_int64 = True\n\n if has_int32 and has_int64:\n if not ragged_config.auto_cast_partition_dtype():\n raise ValueError(\"Input RaggedTensors have mismatched row_splits dtypes; \"\n \"use RaggedTensor.with_row_splits_dtype() to convert \"\n \"them to compatible dtypes.\")\n dtype = dtypes.int64\n tensors = tuple(\n t.with_row_splits_dtype(dtypes.int64) if isinstance(t, RaggedTensor\n ) else t\n for t in tensors)\n\n elif has_int32:\n dtype = dtypes.int32\n else:\n dtype = dtypes.int64\n\n if return_dtype:\n return (dtype, tensors)\n else:\n return tensors\n\n\n#===============================================================================\n# RaggedTensorSpec\n#===============================================================================\n@tf_export(\"RaggedTensorSpec\")\n@type_spec.register(\"tf.RaggedTensorSpec\")\nclass RaggedTensorSpec(type_spec.BatchableTypeSpec):\n \"\"\"Type specification for a `tf.RaggedTensor`.\"\"\"\n\n __slots__ = [\n \"_shape\", \"_dtype\", \"_ragged_rank\", \"_row_splits_dtype\",\n \"_flat_values_spec\"\n ]\n\n @property\n def dtype(self):\n \"\"\"The `tf.dtypes.DType` specified by this type for the RaggedTensor.\n\n Examples:\n\n >>> rt = tf.ragged.constant([[\"a\"], [\"b\", \"c\"]], dtype=tf.string)\n >>> tf.type_spec_from_value(rt).dtype\n tf.string\n\n Returns:\n A `tf.dtypes.DType` of the values in the RaggedTensor.\n \"\"\"\n return self._dtype\n\n @property\n def shape(self):\n \"\"\"The statically known shape of the RaggedTensor.\n\n Examples:\n\n >>> rt = tf.ragged.constant([[0], [1, 2]])\n >>> tf.type_spec_from_value(rt).shape\n TensorShape([2, None])\n\n >>> rt = tf.ragged.constant([[[0, 1]], [[1, 2], [3, 4]]], ragged_rank=1)\n >>> tf.type_spec_from_value(rt).shape\n TensorShape([2, None, 2])\n\n Returns:\n A `tf.TensorShape` containing the statically known shape of the\n RaggedTensor. Ragged dimensions have a size of `None`.\n \"\"\"\n return self._shape\n\n @property\n def ragged_rank(self):\n \"\"\"The number of times the RaggedTensor's flat_values is partitioned.\n\n Defaults to `shape.ndims - 1`.\n\n Examples:\n\n >>> values = tf.ragged.constant([[1, 2, 3], [4], [5, 6], [7, 8, 9, 10]])\n >>> tf.type_spec_from_value(values).ragged_rank\n 1\n\n >>> rt1 = tf.RaggedTensor.from_uniform_row_length(values, 2)\n >>> tf.type_spec_from_value(rt1).ragged_rank\n 2\n\n Returns:\n A Python `int` indicating the number of times the underlying `flat_values`\n Tensor has been partitioned to add a new dimension.\n I.e., `tf.rank(rt) = tf.rank(rt.flat_values) + rt.ragged_rank`.\n \"\"\"\n return self._ragged_rank\n\n @property\n def row_splits_dtype(self):\n \"\"\"The `tf.dtypes.DType` of the RaggedTensor's `row_splits`.\n\n Examples:\n\n >>> rt = tf.ragged.constant([[1, 2, 3], [4]], row_splits_dtype=tf.int64)\n >>> tf.type_spec_from_value(rt).row_splits_dtype\n tf.int64\n\n Returns:\n A `tf.dtypes.DType` for the RaggedTensor's `row_splits` tensor. One\n of `tf.int32` or `tf.int64`.\n \"\"\"\n return self._row_splits_dtype\n\n @property\n def flat_values_spec(self):\n \"\"\"The `TypeSpec` of the flat_values of RaggedTensor.\n\n Returns:\n - The TypeSpec of flat_values.\n - None when the flat_values is a Tensor.\n \"\"\"\n return self._flat_values_spec\n\n @property\n def value_type(self):\n return RaggedTensor if self._ragged_rank > 0 else ops.Tensor\n\n def __init__(self,\n shape=None,\n dtype=dtypes.float32,\n ragged_rank=None,\n row_splits_dtype=dtypes.int64,\n flat_values_spec=None):\n \"\"\"Constructs a type specification for a `tf.RaggedTensor`.\n\n Args:\n shape: The shape of the RaggedTensor, or `None` to allow any shape. If a\n shape is specified, then all ragged dimensions must have size `None`.\n dtype: `tf.DType` of values in the RaggedTensor.\n ragged_rank: Python integer, the number of times the RaggedTensor's\n flat_values is partitioned. Defaults to `shape.ndims - 1`.\n row_splits_dtype: `dtype` for the RaggedTensor's `row_splits` tensor. One\n of `tf.int32` or `tf.int64`.\n flat_values_spec: TypeSpec for flat_value of the RaggedTensor. It shall be\n provided when the flat_values is a CompositeTensor rather then Tensor.\n If both `dtype` and `flat_values_spec` and are provided, `dtype` must\n be the same as `flat_values_spec.dtype`. (experimental)\n \"\"\"\n self._shape = tensor_shape.as_shape(shape)\n self._row_splits_dtype = dtypes.as_dtype(row_splits_dtype)\n if flat_values_spec is not None:\n if dtype is None:\n dtype = flat_values_spec.dtype\n elif dtype != flat_values_spec.dtype:\n raise ValueError(\"dtype must be the same as flat_values_spec.dtype\")\n elif dtype is None:\n raise ValueError(\n \"At least one of dtype or flat_values_spec must be provided\")\n self._dtype = dtypes.as_dtype(dtype)\n self._flat_values_spec = flat_values_spec\n\n rank = self._shape.ndims\n if ragged_rank is None:\n if rank is None:\n raise ValueError(\"Must specify ragged_rank or \"\n \"a shape with a known rank.\")\n ragged_rank = rank - 1\n self._ragged_rank = ragged_rank\n if not isinstance(self._ragged_rank, int):\n raise TypeError(f\"Argument `ragged_rank` must be an int. \"\n f\"Recieved {ragged_rank}.\")\n\n if rank is not None:\n if ragged_rank >= rank:\n raise ValueError(f\"Argument `ragged_rank` ({ragged_rank}) must be less \"\n f\"than rank ({rank}).\")\n\n def is_compatible_with(self, spec_or_value):\n # RaggedTensor with ragged_rank 0 can be compatible with raw flat_values.\n if self._ragged_rank == 0:\n if self._flat_values_spec is None:\n if isinstance(spec_or_value, (ops.Tensor, tensor_spec.TensorSpec)):\n return tensor_spec.TensorSpec(\n self._shape, self._dtype).is_compatible_with(spec_or_value)\n elif not isinstance(spec_or_value, (RaggedTensor, RaggedTensorSpec)):\n return self._flat_values_spec.is_compatible_with(spec_or_value)\n return super(RaggedTensorSpec, self).is_compatible_with(spec_or_value)\n\n def _serialize(self):\n if self._flat_values_spec is None:\n return (self._shape, self._dtype, self._ragged_rank,\n self._row_splits_dtype)\n else:\n return (self._shape, self._dtype, self._ragged_rank,\n self._row_splits_dtype, self._flat_values_spec)\n\n @property\n def _component_specs(self):\n if self._ragged_rank == 0:\n if self._flat_values_spec is not None:\n return [self._flat_values_spec]\n else:\n return [tensor_spec.TensorSpec(self._shape, self._dtype)]\n\n flat_values_spec = self._flat_values_spec\n if flat_values_spec is None:\n flat_values_shape = tensor_shape.TensorShape([None]).concatenate(\n self._shape[self._ragged_rank + 1:])\n flat_values_spec = tensor_spec.TensorSpec(flat_values_shape, self._dtype)\n outer_dim = tensor_shape.dimension_at_index(self._shape, 0)\n outer_splits_shape = [None if outer_dim is None else outer_dim + 1]\n inner_splits_spec = tensor_spec.TensorSpec([None], self._row_splits_dtype)\n\n specs = ([\n flat_values_spec,\n tensor_spec.TensorSpec(outer_splits_shape, self._row_splits_dtype)\n ] + [inner_splits_spec for _ in range(self._ragged_rank - 1)])\n return specs\n\n def _to_components(self, value):\n if is_ragged(value):\n return [value.flat_values] + list(value.nested_row_splits)\n else:\n return [value]\n\n def _from_components(self, tensor_list):\n result = tensor_list[0]\n if (all(isinstance(t, np.ndarray) for t in tensor_list) and\n not tf2.enabled()):\n for row_splits in reversed(tensor_list[1:]):\n result = ragged_tensor_value.RaggedTensorValue(result, row_splits)\n else:\n if isinstance(tensor_list[0], np.ndarray):\n tensor_list = [ops.convert_to_tensor(t) for t in tensor_list]\n result = tensor_list[0]\n for row_splits in reversed(tensor_list[1:]):\n result = RaggedTensor(\n result,\n RowPartition.from_row_splits(row_splits, validate=False),\n internal=True)\n if self._shape.ndims is not None:\n if isinstance(result, RaggedTensor):\n result._set_shape(self._shape) # pylint: disable=protected-access\n # TODO(xjun): MaskedTensor doesn't implement set_shape.\n if self.flat_values_spec is not None and hasattr(result.flat_values,\n \"set_shape\"):\n result.flat_values.set_shape(self.flat_values_spec.shape)\n elif isinstance(result, ops.Tensor):\n result.set_shape(self._shape)\n return result\n\n # The RaggedTensorSpec tensor_list encoding uses to/from_variant ops\n # to (un)box the component tensors in a way that allows for batching &\n # unbatching.\n @property\n def _flat_tensor_specs(self):\n # NOTE(mishragaurav): The default flat shape of a boxed `RaggedTensor` is\n # `[]` (scalar), but a `RaggedTensorSpec` can also represent a batch of\n # boxed `RaggedTensor` objects with shape `(...)` (and batches of batches,\n # etc.), so the flat shape must be unknown.\n return [tensor_spec.TensorSpec(None, dtypes.variant)]\n\n def _to_tensor_list(self, value):\n # TODO(edloper): Update gen_ragged_conversion_ops that convert to and\n # from variant to include all of the row-partitioning tensors.\n if self._flat_values_spec is not None:\n raise ValueError(\"Customized value_type is not supported.\")\n ragged_rank = value.ragged_rank if isinstance(value, RaggedTensor) else 0\n if ragged_rank != self._ragged_rank:\n raise ValueError(f\"Ragged rank of value {ragged_rank} does not match \"\n f\"ragged rank of type {self._ragged_rank}.\")\n if ragged_rank == 0:\n return [\n gen_ragged_conversion_ops.ragged_tensor_to_variant(\n (), value, batched_input=False)\n ]\n # pylint: disable=protected-access\n return [value._to_variant(batched_input=False)]\n\n def _to_batched_tensor_list(self, value):\n if self._flat_values_spec is not None:\n raise ValueError(\"Customized value_type is not supported.\")\n ragged_rank = value.ragged_rank if isinstance(value, RaggedTensor) else 0\n if ragged_rank != self._ragged_rank:\n raise ValueError(f\"Ragged rank of value {ragged_rank} does not match \"\n f\"ragged rank of type {self._ragged_rank}.\")\n if ragged_rank == 0:\n # TODO(b/141789000) Update this to handle ragged_rank=0.\n raise ValueError(\n \"_to_batched_tensor_list doesn't support ragged_rank=0 yet\")\n # pylint: disable=protected-access\n return [value._to_variant(batched_input=True)]\n\n def _from_compatible_tensor_list(self, tensor_list):\n if self._flat_values_spec is not None:\n raise ValueError(\"Customized value_type is not supported.\")\n if self._ragged_rank < 0:\n raise ValueError(f\"Argument `ragged_rank` must be non-negative. \"\n f\"Received {self._ragged_rank}.\")\n result = RaggedTensor._from_variant( # pylint: disable=protected-access\n tensor_list[0],\n dtype=self._dtype,\n row_splits_dtype=self._row_splits_dtype,\n output_ragged_rank=self._ragged_rank)\n if self._shape.ndims is not None:\n if isinstance(result, RaggedTensor):\n result._set_shape(self._shape) # pylint: disable=protected-access\n # TODO(xjun): MaskedTensor doesn't implement set_shape.\n if self.flat_values_spec is not None and hasattr(self.flat_values,\n \"set_shape\"):\n result.flat_values.set_shape(self.flat_values_spec.shape)\n else:\n result.set_shape(self._shape)\n return result\n\n def _batch(self, batch_size):\n if self._flat_values_spec is not None:\n raise ValueError(\"Customized value_type is not supported.\")\n return RaggedTensorSpec(\n tensor_shape.TensorShape([batch_size]).concatenate(self._shape),\n self._dtype, self._ragged_rank + 1, self._row_splits_dtype)\n\n def _unbatch(self):\n if self._flat_values_spec is not None:\n raise ValueError(\"Customized value_type is not supported.\")\n # Note: Negative ragged_rank is allowed here because the dataset could be\n # subsequently batched again. If ragged_rank > 1, assume row_splits_dtype is\n # consistent. Errors are handled in\n # RaggedTensorSpec._from_compatible_tensor_list()\n return RaggedTensorSpec(self._shape[1:], self._dtype, self._ragged_rank - 1,\n self._row_splits_dtype)\n\n def _to_legacy_output_types(self):\n return self._dtype\n\n def _to_legacy_output_shapes(self):\n return self._shape\n\n def _to_legacy_output_classes(self):\n return self\n\n @classmethod\n def from_value(cls, value):\n if (isinstance(value, ragged_tensor_value.RaggedTensorValue) or\n isinstance(value.flat_values, ops.Tensor)):\n return cls(\n shape=value.shape,\n dtype=value.values.dtype,\n ragged_rank=value.ragged_rank,\n row_splits_dtype=value.row_splits.dtype)\n else:\n return cls(\n shape=value.shape,\n dtype=value.values.dtype,\n ragged_rank=value.ragged_rank,\n row_splits_dtype=value.row_splits.dtype,\n flat_values_spec=type_spec.type_spec_from_value(value.flat_values))\n\n\ntype_spec.register_type_spec_from_value_converter(\n ragged_tensor_value.RaggedTensorValue, RaggedTensorSpec.from_value)\n\n\n#===============================================================================\n# Convert value -> tensor\n#===============================================================================\ndef convert_to_tensor_or_ragged_tensor(value,\n dtype=None,\n preferred_dtype=None,\n name=None):\n \"\"\"Converts value to a `RaggedTensor` or `Tensor`.\n\n * If `value` is a `RaggedTensor`, then return it as-is.\n * If `value` is a `RaggedTensorValue`, return a corresponding constant\n `RaggedTensor`.\n * Otherwise, use `convert_to_tensor` to convert `value` to a `Tensor`.\n\n Args:\n value: A `RaggedTensor`, a `RaggedTensorValue`, or an object whose type has\n a registered `Tensor` conversion function.\n dtype: Optional element type for the returned tensor. If missing the type\n is inferred from the type of `value`.\n preferred_dtype: Optional element type for the returned tensor, used when\n dtype is None. This argument has no effect if `value` is already a\n tensor, or when conversion is not possible.\n name: Optional name to use if a new `Tensor` is created.\n\n Returns:\n A `Tensor` or `RaggedTensor`.\n \"\"\"\n if isinstance(value, RaggedTensor):\n if dtype and not dtype.is_compatible_with(value.dtype):\n raise ValueError(f\"Tensor conversion requested dtype {dtype.name} for \"\n f\"RaggedTensor with dtype {value.dtype.name}: {value}.\")\n return value\n elif isinstance(value, ragged_tensor_value.RaggedTensorValue):\n with ops.name_scope(name, \"ConvertToTensorOrRaggedTensor\", []):\n flat_values = ops.convert_to_tensor(\n value=value.flat_values,\n dtype=dtype,\n dtype_hint=preferred_dtype,\n name=\"flat_values\")\n return RaggedTensor.from_nested_row_splits(\n flat_values, value.nested_row_splits, validate=False)\n else:\n return ops.convert_to_tensor_v2_with_dispatch(\n value=value, dtype=dtype, dtype_hint=preferred_dtype, name=name)\n\n\ndef _convert_to_ragged_tensor_values(value):\n \"\"\"Converts value to supported RaggedTensor value.\n\n * If `value` is an object of supported value type, then return it as-is.\n * Otherwise convert it to Tensor or RaggedTensor.\n\n Args:\n value: An object of `Tensor`, `RaggedTensor` or registerred RaggedTensor\n value types, or an object whose type has a registered `Tensor` conversion\n function.\n\n Returns:\n An object of `Tensor`, `RaggedTensor` or registerred RaggedTensor\n value types\n \"\"\"\n if _is_supported_ragged_values_type(value):\n return value\n else:\n return convert_to_tensor_or_ragged_tensor(value, name=\"values\")\n\n\n#===============================================================================\n# Register RaggedTensor for use with session.run.\n#===============================================================================\ndef _ragged_tensor_value_from_components(components):\n components = list(components)\n value = components.pop()\n while components:\n value = ragged_tensor_value.RaggedTensorValue(value, components.pop())\n return value\n\n\ndef _ragged_tensor_session_fetch(rt):\n components = rt.nested_row_splits + (rt.flat_values,)\n return (components, _ragged_tensor_value_from_components)\n\n\ndef _ragged_tensor_session_feed(feed_key, feed_val):\n key_components = feed_key.nested_row_splits + (feed_key.flat_values,)\n val_components = feed_val.nested_row_splits + (feed_val.flat_values,)\n return zip(key_components, val_components)\n\n\ndef _ragged_tensor_session_feed_for_partial_run(feed_key):\n return feed_key.nested_row_splits + (feed_key.flat_values,)\n\n\nsession.register_session_run_conversion_functions(\n RaggedTensor, _ragged_tensor_session_fetch, _ragged_tensor_session_feed,\n _ragged_tensor_session_feed_for_partial_run)\n\n\n#===============================================================================\n# RaggedTensorType\n#===============================================================================\nclass RaggedTensorType:\n \"\"\"Encoding of a static type for a `RaggedTensor`.\n\n Use this type to express/declare that an output must have the type of\n `RaggedTensor`.\n \"\"\"\n\n def __init__(self, dtype, ragged_rank, row_splits_dtype=dtypes.int64):\n \"\"\"Initializes a RaggedTensorType object.\n\n Args:\n dtype: data type of the `RaggedTensor`'s inner values.\n ragged_rank: ragged_rank of the declared `RaggedTensor`.\n row_splits_dtype: data type for the `RaggedTensor`'s row splits.\n One of: `tf.int32` or `tf.int64`.\n \"\"\"\n row_splits_dtype = dtypes.as_dtype(row_splits_dtype)\n self._dtype = dtype\n self._ragged_rank = ragged_rank\n self._row_splits_dtype = row_splits_dtype\n\n dtype = property(lambda self: self._dtype)\n ragged_rank = property(lambda self: self._ragged_rank)\n row_splits_dtype = property(lambda self: self._row_splits_dtype)\n\n def __repr__(self):\n return \"RaggedTensorType(%r, %r, %r)\" % (self.dtype, self.ragged_rank,\n self.row_splits_dtype)\n\n\n#===============================================================================\n# Helper Functions\n#===============================================================================\ndef _assert_sparse_indices_are_ragged_right(indices):\n \"\"\"Checks that the given SparseTensor.indices tensor is ragged-right.\n\n Example: `indices = [[0, 0], [0, 1], [2, 0], [3, 1]]` is not ragged right\n because the entry `[3, 1]` skips a cell.\n\n Args:\n indices: The SparseTensor indices to check.\n\n Returns:\n A list of control dependency op tensors.\n \"\"\"\n index_prefix = indices[:, :-1]\n index_suffix = indices[:, -1]\n\n # Check whether each index is starting a new row in the innermost dimension\n # (prefix[i] != prefix[i-1]) or continuing a row (prefix[i] == prefix[i-1]).\n # (Note: this skips the first index; we will check that separately below.)\n index_prefix_changed = math_ops.reduce_any(\n math_ops.not_equal(index_prefix[1:], index_prefix[:-1]), axis=1)\n\n # Check two cases:\n # * For indices that start a new row: index_suffix[i] must be zero.\n # * For indices that continue a row: index_suffix[i] must be equal to\n # index_suffix[i-1]+1.\n index_ok = array_ops.where(\n index_prefix_changed, math_ops.equal(index_suffix[1:], 0),\n math_ops.equal(index_suffix[1:], index_suffix[:-1] + 1))\n\n # Also check that the very first index didn't skip any cells. The first\n # index starts a new row (by definition), so its suffix should be zero.\n sparse_indices_are_ragged_right = math_ops.logical_and(\n math_ops.reduce_all(math_ops.equal(index_suffix[:1], 0)),\n math_ops.reduce_all(index_ok))\n\n message = [\n \"SparseTensor is not right-ragged\", \"SparseTensor.indices =\", indices\n ]\n return [control_flow_ops.Assert(sparse_indices_are_ragged_right, message)]\n\n\[email protected](\"RaggedTensorToSparse\")\ndef _ragged_tensor_to_sparse_gradient(op, unused_sparse_indices_grad,\n sparse_values_grad,\n unused_sparse_shape_grad):\n \"\"\"Gradient for RaggedTensorToSparse.\"\"\"\n op_inputs_nested_row_splits = op.inputs[:-1]\n op_inputs_flat_values = op.inputs[-1]\n\n # No gradient for the RaggedTensor's nested_row_splits.\n nested_row_splits_gradient = [None] * len(op_inputs_nested_row_splits)\n\n # Gradient for the RaggedTensor's flat_values is formed by reshaping\n # the gradient for the SparseTensor's values.\n flat_values_shape = array_ops.shape(op_inputs_flat_values)\n flat_values_gradient = array_ops.reshape(sparse_values_grad,\n flat_values_shape)\n\n return nested_row_splits_gradient + [flat_values_gradient]\n\n\ndef _assert_monotonic_increasing(tensor, message=None):\n return check_ops.assert_non_negative(\n tensor[1:] - tensor[:-1], message=message)\n\n\ndef _assert_zero(tensor, message=None):\n return check_ops.assert_equal(\n tensor, constant_op.constant(0, dtype=tensor.dtype), message=message)\n\n\ndef _nrows(tensor, out_type=dtypes.int32):\n if isinstance(tensor, RaggedTensor):\n return tensor.nrows(out_type=out_type)\n else:\n return array_ops.shape(tensor, out_type=out_type)[0]\n\n\ndef merge_dims(value, outer_axis, inner_axis):\n \"\"\"Merges value[outer_axis...inner_axis] into a single dimension.\n\n See `RaggedTensor.merge_dims()` for more details. This helper differs from\n `RaggedTensor.merge_dims()` in that `value` may be a dense or ragged tensor.\n\n Args:\n value: A `RaggedTensor` or `Tensor`\n outer_axis: `int`\n inner_axis: `int`\n\n Returns:\n A flattened `RaggedTensor` or `Tensor`.\n \"\"\"\n if outer_axis == inner_axis:\n return value\n\n # Flatten outer dimensions of a RaggedTensor by just taking its values.\n while outer_axis == 0 and isinstance(value, RaggedTensor):\n value = value.values\n inner_axis -= 1\n if inner_axis == 0:\n return value\n\n # Flatten non-Ragged tensors using tf.reshape().\n if not isinstance(value, RaggedTensor):\n if value.shape.is_fully_defined():\n old_shape = value.shape.as_list()\n new_shape = old_shape[:outer_axis] + [-1] + old_shape[inner_axis + 1:]\n else:\n old_shape = array_ops.shape(value)\n new_shape = array_ops.concat(\n [old_shape[:outer_axis], [-1], old_shape[inner_axis + 1:]], axis=0)\n return array_ops.reshape(value, new_shape)\n\n # Handle outer_axis>1 via recursion.\n if outer_axis > 1:\n return value.with_values(\n merge_dims(value.values, outer_axis - 1, inner_axis - 1))\n\n # At this point, we know outer_axis == 1, and value is a RaggedTensor.\n # So we need to flatten the values and build a corresponding splits tensor.\n new_values = value.values\n new_splits = value.row_splits\n for axis in range(outer_axis, inner_axis):\n if isinstance(new_values, RaggedTensor):\n # Flatten a single ragged dimension.\n new_splits = array_ops.gather(new_values.row_splits, new_splits)\n new_values = new_values.values\n else:\n # Flatten all remaining dense dimensions.\n shape_split = inner_axis - axis + 1\n if new_values.shape.is_fully_defined():\n old_shape = new_values.shape.as_list()\n new_shape = [-1] + old_shape[shape_split:]\n flat_size = _prod(old_shape[1:shape_split])\n else:\n old_shape = array_ops.shape(new_values)\n new_shape = array_ops.concat([[-1], old_shape[shape_split:]], axis=0)\n flat_size = math_ops.cast(\n math_ops.reduce_prod(old_shape[1:shape_split]), new_splits.dtype)\n new_values = array_ops.reshape(new_values, new_shape)\n new_splits = new_splits * flat_size\n break\n return RaggedTensor.from_row_splits(new_values, new_splits)\n\n\ndef _prod(lst):\n \"\"\"Returns the product of the numbers in a list.\"\"\"\n return functools.reduce(operator.mul, lst, 1)\n\n\ndef _get_row_partition_type_tensor_pairs_tail(partition):\n \"\"\"Gets a row partition type tensor pair for the tail.\n\n If value_rowid is defined, then it is used. Otherwise, row_splits\n are used.\n\n Args:\n partition: a RowPartition.\n\n Returns:\n A list of (row_partition_type, row_partition_tensor) pairs.\n \"\"\"\n if partition.has_precomputed_value_rowids():\n return (\"VALUE_ROWIDS\", partition.value_rowids())\n else:\n return (\"ROW_SPLITS\", partition.row_splits())\n\n\ndef _get_row_partition_type_tensor_pairs(rt_input):\n \"\"\"Gets a list of the row partitions for rt_input.\n\n If value_rowids are defined, then they are used. Otherwise, row_splits\n are used. If the outermost level has value_rowids defind, then nrows is\n also added.\n\n Args:\n rt_input: a ragged tensor.\n\n Returns:\n A list of (row_partition_type, row_partition_tensor) pairs.\n \"\"\"\n partitions = rt_input._nested_row_partitions # pylint: disable=protected-access\n tail = [_get_row_partition_type_tensor_pairs_tail(x) for x in partitions[1:]]\n\n if partitions[0]._value_rowids is not None: # pylint: disable=protected-access\n return [(\"FIRST_DIM_SIZE\", partitions[0].nrows()),\n (\"VALUE_ROWIDS\", partitions[0].value_rowids())] + tail\n else:\n return [(\"ROW_SPLITS\", partitions[0].row_splits())] + tail\n\n\ndef _shape_as_tensor(shape, dtype):\n \"\"\"Takes shape and coerces it to a shape as a tensor.\n\n If the object is already a tensor, simply passes it on (result is guaranteed\n to be int64 or int32, but not necessarily dtype).\n If not, creates a tensor of type dtype.\n\n Result is either a scalar equal to -1 if the shape is unknown_rank.\n Otherwise, it is a vector, where unknown dimensions are represented with a\n value of -1.\n\n In C++, see TensorShapeFromTensor for parsing shapes in kernels, and\n InferenceContext::MakeShapeFromShapeTensorTreatScalarAsUnknownShape, for\n use in the shape inference function.\n\n Args:\n shape: input to coerce from TensorShape, Tensor, None, List[Optional[Int]],\n Tuple[Optional[Int]].\n dtype: tf.int64 or tf.int32\n\n Returns:\n a scalar or vector tensor of dtype tf.int32 or tf.int64.\n \"\"\"\n if dtype != dtypes.int64 and dtype != dtypes.int32:\n raise ValueError(f\"Expected int64 or int32 for dtype: got {dtype}.\")\n\n if isinstance(shape, ops.Tensor):\n if shape.dtype != dtypes.int64 and shape.dtype != dtypes.int32:\n return math_ops.cast(shape, dtype)\n return shape\n shape = tensor_shape.as_shape(shape)\n if not shape:\n # Imply rank is unknown using a -1 scalar.\n return constant_op.constant(-1, dtype=dtype)\n shape = [(-1 if x is None else x) for x in shape.as_list()]\n # At this point, shape is List[Int].\n return constant_op.constant(shape, dtype=dtype)\n\n\ndef _nvals_uniform_row_length(values, uniform_row_length):\n \"\"\"Get the number of values for uniform row length constructor.\"\"\"\n const_nvals = tensor_shape.dimension_at_index(values.shape, 0).value\n if const_nvals is not None:\n nvals = constant_op.constant(const_nvals, uniform_row_length.dtype)\n elif isinstance(values, RaggedTensor):\n nvals = values.nrows(out_type=uniform_row_length.dtype)\n else:\n nvals = array_ops.shape(values, out_type=uniform_row_length.dtype)[0]\n return nvals\n\n\ndef _get_optional_partition_dtype(values):\n \"\"\"Returns the partition dtype, or None if None exists.\"\"\"\n if isinstance(values, RaggedTensor):\n # pylint: disable=protected-access\n return values._row_partition.dtype\n return None\n\n\n_SUPPORTED_RAGGED_VALUE_TYPES = (ops.Tensor, RaggedTensor)\n\n\n# TODO(edloper): Consider whether we should change the registry to be on\n# TypeSpecs rather than ValueTypes.\ndef _add_supported_value_type(cls):\n \"\"\"Register the `cls` as supported value type of RaggedTenosr.\n\n The cls must be a subclass of CompositeTensor, and must support:\n - Properties:\n - x.shape\n - x.dtype\n - Methods:\n - x.__getitem__(idx) (method: returns a supported value type)\n - x.set_shape(shape)\n - Ops:\n - tf.shape(x) -- tf.shape(x)[0] must be a tf.Tensor.\n - tf.tile(x)\n - assert_rank_at_least(x)\n - tf.ones_like(x)\n - tf.gather(params=x, indices=Tensor)\n - tf.add(x, y)\n - tf.boolean_mask(x, ...)\n - @TODO(edloper): Complete this list\n\n Note: the following RaggedTensor, RaggedTensorSpec methods & ops are not\n currently supported unless `rt.values` is a RaggedTensor or a tf.Tensor:\n - rt.to_tensor()\n - rt.to_sparse_tensor()\n - rt._to_variant()\n - rt._from_variant()\n - tf.ragged.cross([rt])\n - tf.gather(params=x, indices=rt) # rt used for indices\n - RaggedTensorSpec methods:\n - _batch\n - _unbatch\n - _to_tensor_list\n - _to_batched_tensor_list\n - _from_compatible_tensor_list\n\n Args:\n cls: The type to be added to supported value types.\n \"\"\"\n if not issubclass(cls, composite_tensor.CompositeTensor):\n raise ValueError(f\"cls ({cls}) must be a subclass of CompositeTensor.\")\n if not hasattr(cls, \"shape\"):\n raise ValueError(\"cls must support the `shape` property.\")\n if not hasattr(cls, \"dtype\"):\n raise ValueError(\"cls must support the `dtype` property.\")\n global _SUPPORTED_RAGGED_VALUE_TYPES\n _SUPPORTED_RAGGED_VALUE_TYPES += (cls,)\n\n\ndef _is_supported_ragged_values_type(value):\n return isinstance(value, _SUPPORTED_RAGGED_VALUE_TYPES)\n\n\ndef _assert_is_supported_ragged_values_type(value):\n if not _is_supported_ragged_values_type(value):\n ok_types = \", \".join(cls.__name__ for cls in _SUPPORTED_RAGGED_VALUE_TYPES)\n raise TypeError(f\"type(values) must be one of: {ok_types}, got {value}.\")\n\n\ndef _formatter(x):\n \"\"\"Separate Numpy array elements with comma.\"\"\"\n if isinstance(x, np.ndarray):\n return np.array2string(x, separator=\", \")\n else:\n return str(x)\n\n# Type annotation indicating that a value is ragged. Includes RaggedTensor\n# as well as the (deprecated) RaggedTensorValue class from TF 1.x.\nRagged = typing.Union[RaggedTensor, ragged_tensor_value.RaggedTensorValue]\n\n# Type annotation indicating that a value is a ragged tensor, a dense tensor,\n# or a value that can be converted to a tensor (e.g. np.array).\n# TODO(edloper): Add Variable to TensorLike, and remove it from here.\nRaggedOrDense = typing.Union[Ragged, core_types.TensorLike]\n",
"# Copyright 2020 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for RaggedTensor supported value types.\"\"\"\n\nfrom absl.testing import parameterized\n\nfrom tensorflow.python.eager import context\nfrom tensorflow.python.framework import composite_tensor\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import tensor_shape\nfrom tensorflow.python.framework import tensor_spec\nfrom tensorflow.python.framework import test_util\nfrom tensorflow.python.framework import type_spec\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import check_ops\nfrom tensorflow.python.ops import clip_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import nn_ops\nfrom tensorflow.python.ops import string_ops\nfrom tensorflow.python.ops.ragged import ragged_factory_ops\nfrom tensorflow.python.ops.ragged import ragged_tensor\nfrom tensorflow.python.ops.ragged import ragged_tensor_test_ops as test_ops\nfrom tensorflow.python.ops.ragged.ragged_tensor import RaggedTensor\nfrom tensorflow.python.ops.ragged.ragged_tensor import RaggedTensorSpec\nfrom tensorflow.python.platform import googletest\nfrom tensorflow.python.util import dispatch\n\n\nclass WrappedTensor(composite_tensor.CompositeTensor):\n \"\"\"A class used to test extending RaggedTensor value type support.\n\n Simply wraps a `tf.Tensor` value.\n \"\"\"\n\n def __init__(self, value):\n if not isinstance(value, ops.Tensor):\n raise ValueError(\"Expect Tensor object, but get '%s'\" % value)\n self.value = value\n\n @property\n def shape(self):\n return self.value.shape\n\n @property\n def dtype(self):\n return self.value.dtype\n\n def __getitem__(self, idx):\n return WrappedTensor(self.value.__getitem__(idx))\n\n @property\n def _type_spec(self):\n return WrappedTensorSpec(type_spec.type_spec_from_value(self.value))\n\n def set_shape(self, shape):\n return self.value.set_shape(shape)\n\n\nclass WrappedTensorSpec(type_spec.TypeSpec):\n\n def __init__(self, value_spec):\n self._value_spec = value_spec\n\n @property\n def dtype(self):\n return self._value_spec.dtype\n\n @property\n def value_type(self):\n return WrappedTensor\n\n @property\n def shape(self):\n return self._value_spec.shape\n\n def _to_components(self, value):\n return value.value\n\n def _from_components(self, value):\n return WrappedTensor(value)\n\n def _component_specs(self):\n return self._value_spec\n\n def _serialize(self):\n return (self._value_spec,)\n\n\nclass WrappedTensorOpDispatcher(dispatch.GlobalOpDispatcher):\n \"\"\"Global op dispatcher for WrappedTensor.\"\"\"\n\n # For these ops, just return plain Tensors (not WrappedTensors).\n OPS_THAT_RETURN_UNTRACED_RESULTS = (array_ops.shape, array_ops.shape_v2,\n check_ops.assert_rank_at_least)\n\n def call_op(self, op, *args, **kwargs):\n return op(*args, **kwargs)\n\n def handle(self, op, args, kwargs):\n # Dispatcher only applies if at least one arg is a WrappedTensor.\n if not (any(self.is_wrapped_tensor_arg(x) for x in args) or\n any(self.is_wrapped_tensor_arg(x) for x in kwargs.values())):\n return self.NOT_SUPPORTED\n\n args = [self.unwrap(v) for v in args]\n kwargs = dict([(k, self.unwrap(v)) for (k, v) in kwargs.items()])\n value = self.call_op(op, *args, **kwargs)\n if op in self.OPS_THAT_RETURN_UNTRACED_RESULTS:\n return value\n else:\n return WrappedTensor(value)\n\n def unwrap(self, value):\n if isinstance(value, WrappedTensor):\n return value.value\n elif isinstance(value, (list, tuple)):\n return type(value)([self.unwrap(v) for v in value])\n else:\n return value\n\n def is_wrapped_tensor_arg(self, value):\n if isinstance(value, WrappedTensor):\n return True\n if isinstance(value, (list, tuple)):\n if any(isinstance(x, WrappedTensor) for x in value):\n return True\n return False\n\n\nWrappedTensorOpDispatcher().register()\nragged_tensor._add_supported_value_type(WrappedTensor)\n\n\n# pylint: disable=g-complex-comprehension\n@test_util.run_all_in_graph_and_eager_modes\nclass RaggedTensorSupportedValuesTest(test_util.TensorFlowTestCase,\n parameterized.TestCase):\n\n def assertAllTensorsEqual(self, list1, list2):\n self.assertLen(list1, len(list2))\n for (t1, t2) in zip(list1, list2):\n self.assertAllEqual(t1, t2)\n\n def testConstruction(self):\n tensor_values = constant_op.constant(\n ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h'])\n values = WrappedTensor(tensor_values)\n\n row_splits = constant_op.constant([0, 2, 2, 5, 6, 8], dtypes.int64)\n rt = RaggedTensor.from_row_splits(values, row_splits)\n self.assertIsInstance(rt.values, WrappedTensor)\n self.assertAllEqual(rt.values.value, tensor_values)\n self.assertAllEqual(rt.row_splits, row_splits)\n\n row_starts = constant_op.constant([0, 2, 2, 5, 6], dtypes.int64)\n rt = RaggedTensor.from_row_starts(values, row_starts)\n self.assertIsInstance(rt.values, WrappedTensor)\n self.assertAllEqual(rt.values.value, tensor_values)\n self.assertAllEqual(rt.row_starts(), row_starts)\n\n row_limits = constant_op.constant([2, 2, 5, 6, 8], dtypes.int64)\n rt = RaggedTensor.from_row_limits(values, row_limits)\n self.assertIsInstance(rt.values, WrappedTensor)\n self.assertAllEqual(rt.values.value, tensor_values)\n self.assertAllEqual(rt.row_limits(), row_limits)\n\n row_lengths = constant_op.constant([2, 0, 3, 1, 2], dtypes.int64)\n rt = RaggedTensor.from_row_lengths(values, row_lengths)\n self.assertIsInstance(rt.values, WrappedTensor)\n self.assertAllEqual(rt.values.value, tensor_values)\n self.assertAllEqual(rt.row_lengths(), row_lengths)\n\n rt = RaggedTensor.from_uniform_row_length(values, 4)\n self.assertIsInstance(rt.values, WrappedTensor)\n self.assertAllEqual(rt.values.value, tensor_values)\n self.assertAllEqual(rt.uniform_row_length, 4)\n\n def testWithValues(self):\n tensor_values = constant_op.constant(['a', 'b', 'c', 'd', 'e', 'f', 'g'])\n values = WrappedTensor(tensor_values)\n nested_row_splits = [[0, 2, 5], [0, 2, 2, 5, 6, 7]]\n rt = RaggedTensor.from_nested_row_splits(values, nested_row_splits)\n\n tensor_int = constant_op.constant([1, 2, 3, 4, 5])\n rt_int = rt.with_values(tensor_int)\n self.assertAllEqual(rt_int.values, tensor_int)\n\n rt_wrapped_int = rt.with_values(WrappedTensor(tensor_int))\n self.assertIsInstance(rt_wrapped_int.values, WrappedTensor)\n self.assertAllEqual(rt_wrapped_int.values.value, tensor_int)\n\n def testWithFlatValues(self):\n tensor_values = constant_op.constant(['a', 'b', 'c', 'd', 'e', 'f', 'g'])\n values = WrappedTensor(tensor_values)\n nested_row_splits = [[0, 2, 5], [0, 2, 2, 5, 6, 7]]\n rt = RaggedTensor.from_nested_row_splits(values, nested_row_splits)\n\n tensor_int = constant_op.constant([1, 2, 3, 4, 5, 6, 7])\n rt_int = rt.with_flat_values(tensor_int)\n self.assertAllEqual(rt_int.flat_values, tensor_int)\n\n rt_wrapped_int = rt.with_flat_values(WrappedTensor(tensor_int))\n self.assertIsInstance(rt_wrapped_int.flat_values, WrappedTensor)\n self.assertAllEqual(rt_wrapped_int.flat_values.value, tensor_int)\n\n @parameterized.parameters(\n #=========================================================================\n # Test each unary op.\n #=========================================================================\n [{'x': ([[-2.0, 3.0], [-3.0]]), 'op': op}\n for op in test_ops.UNARY_FLOAT_OPS] +\n [{'x': ([[True, False], [True]]),\n 'op': op}\n for op in test_ops.UNARY_BOOL_OPS] +\n [{'x': [[18, 512], [12412]],\n 'x_dtype': dtypes.int32,\n 'op': op}\n for op in test_ops.UNARY_INT_OPS] +\n [{'x': ([['abcd', 'efgh'], ['aabbccdd']]),\n 'op': op}\n for op in test_ops.UNARY_STRING_OPS] +\n [\n {'op': clip_ops.clip_by_value,\n 'x': ([[-2.0, 3.0], [-3.0]]),\n 'clip_value_min': 0.1, 'clip_value_max': 4.0},\n {'op': math_ops.cast,\n 'x': ([[-2.0, 3.0], [-3.0]]),\n 'dtype': dtypes.int32},\n {'op': math_ops.saturate_cast,\n 'x': ([[-2.0, 3.0], [-3.0]]),\n 'dtype': dtypes.int32},\n {'op': string_ops.string_to_hash_bucket,\n 'x': (\n [['abcd', 'efgh'], ['aabbccdd']]),\n 'num_buckets': 1000},\n {'op': string_ops.string_to_hash_bucket_fast,\n 'x': (\n [['abcd', 'efgh'], ['aabbccdd']]),\n 'num_buckets': 1000},\n {'op': string_ops.string_to_hash_bucket_strong,\n 'x': (\n [['abcd', 'efgh'], ['aabbccdd']]),\n 'num_buckets': 1000,\n 'key': [1231, 12512]},\n {'op': string_ops.string_to_number,\n 'x': ([['-2.0', '3.0'], ['-3.0']])},\n {'op': string_ops.regex_full_match,\n 'x': ([['hello', '123'], ['1+1']]),\n 'pattern': r'\\w+'},\n {'op': string_ops.regex_replace,\n 'x': ([['hello', '123'], ['1+1']]),\n 'pattern': r'\\d',\n 'rewrite': '#'},\n {'op': string_ops.substr,\n 'x': ([['hello', '123'], ['1+1']]),\n 'pos': 2, 'len': 3},\n {'op': array_ops.check_numerics,\n 'x': ([[-2.0, 3.0], [-3.0]]),\n 'message': 'check-numerics'},\n {'op': nn_ops.dropout,\n 'x': ([[-2.0, 3.0], [-3.0]]),\n 'rate': 0.5,\n 'seed': 1},\n {'op': array_ops.expand_dims_v2,\n 'x': ([[-2.0, 3.0], [-3.0]]),\n 'axis': -1},\n ]) # pyformat: disable\n def testUnaryElementwiseOp(self,\n x,\n x_dtype=None,\n op=math_ops.abs,\n **extra_args):\n x = ragged_factory_ops.constant(x, x_dtype)\n wrapped_x = ragged_tensor.RaggedTensor.from_nested_row_splits(\n WrappedTensor(x.flat_values), x.nested_row_splits)\n test_util.random_seed.set_seed(1234)\n res = op(x, **extra_args)\n test_util.random_seed.set_seed(1234)\n wrapped_res = op(wrapped_x, **extra_args)\n self.assertIsInstance(wrapped_res.flat_values, WrappedTensor)\n self.assertAllEqual(wrapped_res.flat_values.value, res.flat_values)\n self.assertAllTensorsEqual(wrapped_res.nested_row_splits,\n res.nested_row_splits)\n\n @parameterized.parameters(\n #=========================================================================\n # Test each binary op.\n #=========================================================================\n [{'x': [[-2.0, 3.0], [-3.0]],\n 'y': [[5.0, 1.0], [12.0]],\n 'op': op}\n for op in test_ops.BINARY_FLOAT_OPS] +\n [{'x': [[-2, 3], [-3]],\n 'y': [[5, 1], [12]],\n 'op': op}\n for op in test_ops.BINARY_INT_OPS] +\n [{'x': [[True, True], [False]],\n 'y': [[False, True], [False]],\n 'op': op}\n for op in test_ops.BINARY_BOOL_OPS]\n ) # pyformat: disable\n def testBinaryElementwiseOp(self, x, y, op=math_ops.add):\n x = ragged_factory_ops.constant(x)\n y = ragged_factory_ops.constant(y)\n wrapped_x = ragged_tensor.RaggedTensor.from_nested_row_splits(\n WrappedTensor(x.flat_values), x.nested_row_splits)\n wrapped_y = ragged_tensor.RaggedTensor.from_nested_row_splits(\n WrappedTensor(y.flat_values), y.nested_row_splits)\n res = op(x, y)\n wrapped_res = op(wrapped_x, wrapped_y)\n self.assertIsInstance(wrapped_res.flat_values, WrappedTensor)\n self.assertAllEqual(wrapped_res.flat_values.value, res.flat_values)\n self.assertAllTensorsEqual(wrapped_res.nested_row_splits,\n res.nested_row_splits)\n\n\n@test_util.run_all_in_graph_and_eager_modes\nclass RaggedTensorSpecSupportedValuesTest(test_util.TensorFlowTestCase,\n parameterized.TestCase):\n\n def assertAllTensorsEqual(self, list1, list2):\n self.assertLen(list1, len(list2))\n for (t1, t2) in zip(list1, list2):\n self.assertAllEqual(t1, t2)\n\n def testConstruction(self):\n flat_values_spec = WrappedTensorSpec(\n tensor_spec.TensorSpec(shape=(None, 5), dtype=dtypes.float32))\n spec1 = RaggedTensorSpec(\n shape=None,\n dtype=dtypes.float32,\n ragged_rank=1,\n row_splits_dtype=dtypes.int64,\n flat_values_spec=flat_values_spec)\n self.assertIsNone(spec1._shape.rank)\n self.assertEqual(spec1._dtype, dtypes.float32)\n self.assertEqual(spec1._row_splits_dtype, dtypes.int64)\n self.assertEqual(spec1._ragged_rank, 1)\n self.assertEqual(spec1._flat_values_spec, flat_values_spec)\n\n self.assertIsNone(spec1.shape.rank)\n self.assertEqual(spec1.dtype, dtypes.float32)\n self.assertEqual(spec1.row_splits_dtype, dtypes.int64)\n self.assertEqual(spec1.ragged_rank, 1)\n self.assertEqual(spec1.flat_values_spec, flat_values_spec)\n\n with self.assertRaisesRegex(\n ValueError, 'dtype must be the same as flat_values_spec.dtype'):\n spec1 = RaggedTensorSpec(\n shape=None,\n dtype=dtypes.float64,\n ragged_rank=1,\n row_splits_dtype=dtypes.int64,\n flat_values_spec=flat_values_spec)\n\n @parameterized.parameters([\n (RaggedTensorSpec(\n ragged_rank=1,\n flat_values_spec=tensor_spec.TensorSpec(None, dtypes.float32)),\n (tensor_shape.TensorShape(None), dtypes.float32, 1, dtypes.int64,\n tensor_spec.TensorSpec(None, dtypes.float32))),\n (RaggedTensorSpec(\n shape=(5, None, 5),\n ragged_rank=1,\n dtype=dtypes.float64,\n flat_values_spec=tensor_spec.TensorSpec(\n (5,), dtypes.float64)), (tensor_shape.TensorShape(\n (5, None, 5)), dtypes.float64, 1, dtypes.int64,\n tensor_spec.TensorSpec((5,),\n dtypes.float64))),\n ])\n def testSerialize(self, rt_spec, expected):\n serialization = rt_spec._serialize()\n # TensorShape has an unconventional definition of equality, so we can't use\n # assertEqual directly here. But repr() is deterministic and lossless for\n # the expected values, so we can use that instead.\n self.assertEqual(repr(serialization), repr(expected))\n\n @parameterized.parameters([\n (RaggedTensorSpec(\n ragged_rank=0,\n shape=[5, 3],\n flat_values_spec=WrappedTensorSpec(\n tensor_spec.TensorSpec([5, 3], dtypes.float32))),\n [WrappedTensorSpec(tensor_spec.TensorSpec([5, 3], dtypes.float32))]),\n (RaggedTensorSpec(\n ragged_rank=1,\n flat_values_spec=WrappedTensorSpec(\n tensor_spec.TensorSpec([None, 3], dtypes.float32))),\n [\n WrappedTensorSpec(tensor_spec.TensorSpec([None, 3], dtypes.float32)),\n tensor_spec.TensorSpec([None], dtypes.int64),\n ]),\n (RaggedTensorSpec(\n ragged_rank=2,\n dtype=dtypes.float64,\n flat_values_spec=WrappedTensorSpec(\n tensor_spec.TensorSpec([None, 3], dtypes.float64))),\n [\n WrappedTensorSpec(tensor_spec.TensorSpec([None, 3], dtypes.float64)),\n tensor_spec.TensorSpec([None], dtypes.int64),\n tensor_spec.TensorSpec([None], dtypes.int64),\n ]),\n (RaggedTensorSpec(\n shape=[5, None, None],\n dtype=dtypes.string,\n flat_values_spec=WrappedTensorSpec(\n tensor_spec.TensorSpec([None, 3], dtypes.string))),\n [\n WrappedTensorSpec(tensor_spec.TensorSpec([None, 3], dtypes.string)),\n tensor_spec.TensorSpec([6], dtypes.int64),\n tensor_spec.TensorSpec([None], dtypes.int64),\n ]),\n ])\n def testComponentSpecs(self, rt_spec, expected):\n self.assertEqual(rt_spec._component_specs, expected)\n\n @parameterized.parameters([\n {\n 'rt_spec':\n RaggedTensorSpec(\n shape=[3, None, None],\n ragged_rank=1,\n flat_values_spec=WrappedTensorSpec(\n tensor_spec.TensorSpec(None, dtype=dtypes.float32))),\n 'flat_values': [[1.0, 2.0], [3.0, 4.0]],\n 'nested_row_splits': [[0, 1, 1, 2]],\n },\n {\n 'rt_spec':\n RaggedTensorSpec(\n shape=[2, None, None],\n flat_values_spec=WrappedTensorSpec(\n tensor_spec.TensorSpec(None, dtype=dtypes.float32))),\n 'flat_values': [1.0, 2.0, 3.0, 4.0],\n 'nested_row_splits': [[0, 2, 4], [0, 2, 3, 3, 4]],\n },\n ])\n def testToFromComponents(self, rt_spec, flat_values, nested_row_splits):\n wrapped_tensor = WrappedTensor(constant_op.constant(flat_values))\n rt = RaggedTensor.from_nested_row_splits(wrapped_tensor, nested_row_splits)\n components = rt_spec._to_components(rt)\n self.assertIsInstance(components[0], WrappedTensor)\n self.assertAllEqual(components[0].value, wrapped_tensor.value)\n self.assertAllTensorsEqual(components[1:], nested_row_splits)\n rt_reconstructed = rt_spec._from_components(components)\n self.assertIsInstance(rt_reconstructed.flat_values, WrappedTensor)\n self.assertAllEqual(rt_reconstructed.flat_values.value,\n wrapped_tensor.value)\n self.assertAllTensorsEqual(rt_reconstructed.nested_row_splits,\n rt.nested_row_splits)\n self.assertEqual(rt_reconstructed.dtype, rt.dtype)\n\n def testIsCompatibleWith(self):\n spec1 = RaggedTensorSpec([32, None, None],\n dtypes.float32,\n 2,\n flat_values_spec=WrappedTensorSpec(\n tensor_spec.TensorSpec([None, None],\n dtypes.float32)))\n spec2 = RaggedTensorSpec(\n None,\n dtypes.float32,\n 2,\n flat_values_spec=WrappedTensorSpec(\n tensor_spec.TensorSpec(None, dtypes.float32)))\n spec3 = RaggedTensorSpec(\n None,\n dtypes.int32,\n 1,\n flat_values_spec=WrappedTensorSpec(\n tensor_spec.TensorSpec(None, dtypes.int32)))\n spec4 = RaggedTensorSpec([None],\n dtypes.int32,\n 0,\n flat_values_spec=WrappedTensorSpec(\n tensor_spec.TensorSpec(None, dtypes.int32)))\n spec5 = RaggedTensorSpec([None], dtypes.int32, 0)\n\n self.assertTrue(spec1.is_compatible_with(spec2))\n self.assertFalse(spec1.is_compatible_with(spec3))\n self.assertFalse(spec1.is_compatible_with(spec4))\n self.assertFalse(spec2.is_compatible_with(spec3))\n self.assertFalse(spec2.is_compatible_with(spec4))\n self.assertFalse(spec3.is_compatible_with(spec4))\n self.assertFalse(spec4.is_compatible_with(spec5))\n value = constant_op.constant([1, 2, 3])\n self.assertFalse(spec4.is_compatible_with(value))\n self.assertTrue(spec4.is_compatible_with(WrappedTensor(value)))\n\n def testToList(self):\n with context.eager_mode():\n tensor_values = constant_op.constant(\n ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h'])\n row_splits = constant_op.constant([0, 2, 2, 5, 6, 8], dtypes.int64)\n values = WrappedTensor(tensor_values)\n rt = RaggedTensor.from_row_splits(values, row_splits)\n expected = ragged_factory_ops.constant([['a', 'b'], [], ['c', 'd', 'e'],\n ['f'], ['g', 'h']]).to_list()\n\n with self.subTest('Raise on unsupported'):\n with self.assertRaisesRegex(\n ValueError,\n 'values must be convertible to a list',\n ):\n _ = rt.to_list()\n\n with self.subTest('Value with numpy method'):\n\n class WrappedTensorWithNumpy(WrappedTensor):\n\n def numpy(self):\n return self.value.numpy()\n\n values = WrappedTensorWithNumpy(tensor_values)\n rt = RaggedTensor.from_row_splits(values, row_splits)\n self.assertEqual(rt.to_list(), expected)\n\n with self.subTest('Value with to_list method'):\n\n class WrappedTensorWithToList(WrappedTensor):\n\n def to_list(self):\n return self.value.numpy().tolist()\n\n values = WrappedTensorWithToList(tensor_values)\n rt = RaggedTensor.from_row_splits(values, row_splits)\n self.assertEqual(rt.to_list(), expected)\n\n\nif __name__ == '__main__':\n googletest.main()\n"
] |
[
[
"tensorflow.python.framework.tensor_shape.TensorShape",
"tensorflow.python.ops.array_ops.shape",
"tensorflow.python.ops.math_ops.reduce_max",
"tensorflow.python.ops.array_ops.sequence_mask",
"tensorflow.python.ops.gen_ragged_conversion_ops.ragged_tensor_to_variant",
"tensorflow.python.ops.array_ops.zeros",
"tensorflow.python.ops.ragged.row_partition.RowPartition.from_row_splits",
"tensorflow.python.ops.math_ops.not_equal",
"tensorflow.python.framework.type_spec.type_spec_from_value",
"tensorflow.python.ops.control_flow_ops.Assert",
"tensorflow.python.ops.math_ops.logical_not",
"tensorflow.python.ops.array_ops.rank",
"tensorflow.python.ops.math_ops.reduce_all",
"tensorflow.python.framework.ops.RegisterGradient",
"tensorflow.python.util.tf_export.tf_export",
"tensorflow.python.ops.math_ops.reduce_prod",
"tensorflow.python.ops.array_ops.gather",
"tensorflow.python.framework.sparse_tensor.SparseTensor",
"tensorflow.python.framework.tensor_util.constant_value",
"tensorflow.python.ops.array_ops.size",
"tensorflow.python.ops.array_ops.ones",
"tensorflow.python.framework.type_spec.register_type_spec_from_value_converter",
"numpy.array2string",
"tensorflow.python.ops.check_ops.assert_non_negative",
"numpy.zeros",
"tensorflow.python.ops.check_ops.assert_rank_at_least",
"tensorflow.python.framework.tensor_shape.dimension_at_index",
"tensorflow.python.framework.sparse_tensor.convert_to_tensor_or_sparse_tensor",
"tensorflow.python.ops.math_ops.cast",
"tensorflow.python.ops.math_ops.minimum",
"tensorflow.python.ops.math_ops.cumprod",
"tensorflow.python.ops.gen_ragged_conversion_ops.ragged_tensor_to_tensor",
"tensorflow.python.framework.type_spec.register",
"tensorflow.python.ops.array_ops.get_positive_axis",
"tensorflow.python.ops.math_ops.equal",
"tensorflow.python.client.session.register_session_run_conversion_functions",
"tensorflow.python.framework.dtypes.as_dtype",
"numpy.cumprod",
"tensorflow.python.ops.math_ops.maximum",
"tensorflow.python.framework.ops.convert_to_tensor",
"numpy.array",
"tensorflow.python.ops.array_ops.stack",
"tensorflow.python.framework.sparse_tensor.is_sparse",
"tensorflow.python.ops.ragged.ragged_config.auto_cast_partition_dtype",
"tensorflow.python.ops.math_ops.range",
"tensorflow.python.ops.gen_ragged_conversion_ops.ragged_tensor_to_sparse",
"tensorflow.python.ops.array_ops.concat",
"tensorflow.python.framework.tensor_spec.TensorSpec",
"tensorflow.python.ops.array_ops.boolean_mask",
"tensorflow.python.ops.ragged.ragged_util.convert_to_int_tensor",
"numpy.printoptions",
"tensorflow.python.ops.math_ops.cumsum",
"tensorflow.python.ops.gen_ragged_conversion_ops.ragged_tensor_from_variant",
"tensorflow.python.ops.array_ops.reshape",
"tensorflow.python.framework.ops.name_scope",
"tensorflow.python.tf2.enabled",
"tensorflow.python.framework.ops.convert_to_tensor_v2_with_dispatch",
"tensorflow.python.ops.ragged.ragged_tensor_value.RaggedTensorValue",
"tensorflow.python.framework.tensor_shape.as_shape",
"tensorflow.python.ops.math_ops.reduce_sum",
"tensorflow.python.framework.constant_op.constant"
],
[
"tensorflow.python.eager.context.eager_mode",
"tensorflow.python.ops.ragged.ragged_tensor.RaggedTensor.from_nested_row_splits",
"tensorflow.python.framework.tensor_shape.TensorShape",
"tensorflow.python.framework.tensor_spec.TensorSpec",
"tensorflow.python.framework.constant_op.constant",
"tensorflow.python.ops.ragged.ragged_tensor.RaggedTensorSpec",
"tensorflow.python.ops.ragged.ragged_factory_ops.constant",
"tensorflow.python.ops.ragged.ragged_tensor.RaggedTensor.from_row_lengths",
"tensorflow.python.ops.ragged.ragged_tensor._add_supported_value_type",
"tensorflow.python.framework.type_spec.type_spec_from_value",
"tensorflow.python.ops.ragged.ragged_tensor.RaggedTensor.from_row_limits",
"tensorflow.python.platform.googletest.main",
"tensorflow.python.framework.test_util.random_seed.set_seed",
"tensorflow.python.ops.ragged.ragged_tensor.RaggedTensor.from_uniform_row_length",
"tensorflow.python.ops.ragged.ragged_tensor.RaggedTensor.from_row_starts",
"tensorflow.python.ops.ragged.ragged_tensor.RaggedTensor.from_row_splits"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.7",
"2.6",
"2.4",
"2.9",
"2.5",
"2.8",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.7",
"2.6",
"2.2",
"2.3",
"2.4",
"2.9",
"2.5",
"2.8",
"2.10"
]
}
] |
zfergus2/Seam-Minimization-Service
|
[
"2cfce0e4e29d3b40b0b8626994a6e6b89d5303cd"
] |
[
"seam_erasure/points_in_triangle.py"
] |
[
"\"\"\"\nUtility file for testing if points are in a given triangle.\n\nWritten by Zachary Ferguson\n\"\"\"\n\nimport numpy\n\n\ndef points_in_triangle(tri, points, tol=1e-8):\n \"\"\"\n Test if the points are inside the triangle.\n\n Input:\n tri - the triangle as a matrix where the rows are the xy points.\n points - the points as a matrix where the rows are the xy points.\n Returns a vector of boolean values.\n \"\"\"\n # B is the transformation from xy to barycentric coordinates\n B = numpy.vstack([tri.T, numpy.ones(3)])\n\n vecs = numpy.vstack([points.T, numpy.ones((1, points.shape[0]))])\n\n # Convert the grid from XY locations to barycentric coordinates.\n # This will only fail of the triangle is degenerate.\n try:\n coords = numpy.linalg.solve(B, vecs)\n except Exception:\n return numpy.zeros(points.shape[0]).astype(bool)\n\n return numpy.all(coords >= -tol, axis=0)\n"
] |
[
[
"numpy.all",
"numpy.linalg.solve",
"numpy.zeros",
"numpy.ones"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
minotm/NTA
|
[
"cc8aba4ea46fe40ce621f1314d9798f54de41d5b"
] |
[
"preprocessing/her2_split_and_imbalance.py"
] |
[
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated 2022\n\n@author: Mason Minot\n\"\"\"\nfrom Levenshtein import distance as levenshtein_distance\nimport pandas as pd\nimport numpy as np\nfrom sklearn.model_selection import train_test_split\n\n\nprint('Now Executing Trastuzumab Train/Val/Test Splitting...')\n\n\"\"\"\nThis script serves to split the trastuzumab (her2) data into training, validation, and testing sets. The train\nand validation sets are selected to be an edit distance of 7 or less from the wild type trastuzumab. The \ntest set is an edit distance of 8 or greater from the wild type\n\nInputs\n----------\nlabeled trastuzumab data from Mason et. al 2021 github repo: https://github.com/dahjan/DMS_opt\nmHER_H3_AgPos.csv\nmHER_H3_AgNeg.csv\n\nOutputs\n-------\ncsv files containing training, validation, and testing sets\n\"\"\" \n\ndef add_LD_to_df(antigen_ID, data_frame):\n '''\n Function to add Edit Distance (Levenshtein Distance) from wt for each sequence to dataframe\n \n Parameters\n ----------\n antigen_ID : str\n corresponds to the antigen identity. in this case, her2\n data_frame : pandas.DataFrame\n dataframe containing all sequence & label data\n\n Returns\n -------\n data_frame : pandas.DataFrame\n input dataframe with an added column containing the Levenshtein Distance from the wild type\n for each sequence in the dataframe\n '''\n \n if antigen_ID == 'her2':\n wt_str = 'WGGDGFYAMK'\n LD_arr = []\n for i in range(len(data_frame)):\n LD_arr.append( levenshtein_distance(wt_str, data_frame['AASeq'].iloc[i]) )\n data_frame['LD'] = LD_arr\n \n return data_frame\n\ndef class_balance_binary(data_frame):\n '''\n Function to class balance dataset\n \n Parameters\n ----------\n\n data_frame : pandas.DataFrame\n dataframe containing all sequence & label data\n\n Returns\n -------\n data_frame : pandas.DataFrame\n class balanced dataframe. number of positive examples is equal to the number of negatives\n '''\n positives = data_frame[data_frame['AgClass'] == 1].copy()\n negatives = data_frame[data_frame['AgClass'] == 0].copy()\n min_list = min([len(ls) for ls in [positives, negatives]])\n positives = positives[: int(np.round(min_list))] \n negatives = negatives[: int(np.round(min_list))] \n return positives, negatives\n\n\nher2_path_local = '../data/her2/'\n\npos = pd.read_csv(her2_path_local + 'mHER_H3_AgPos.csv')\nneg = pd.read_csv(her2_path_local + 'mHER_H3_AgNeg.csv')\n\ndef combine_df_list_and_shuffle(df_list, keep = False):\n '''\n combines two dataframes, drops duplicates, & shuffles\n \n Parameters\n ----------\n\n data_frame : pandas.DataFrame\n dataframe containing all sequence & label data\n keep: bool\n whether or not to keep duplicates\n\n Returns\n -------\n data_frame : pandas.DataFrame\n combined, shuffled dataframe\n '''\n frames = df_list\n common_cols = list(set.intersection(*(set(df.columns) for df in frames)))\n combined_df = pd.concat([df[common_cols] for df in frames], ignore_index=True).drop_duplicates(subset='AASeq', keep=keep)\n np.random.seed(0)\n combined_df = combined_df.reindex(np.random.permutation(combined_df.index))\n return combined_df\n\nall_data_frames = [pos.copy(), neg.copy()]\ndata_frame = combine_df_list_and_shuffle(all_data_frames, keep = False)\ndata_frame = add_LD_to_df('her2', data_frame)\n\n\nselected_LD_split = 7\ntrain_df = data_frame[data_frame['LD'] <= selected_LD_split]\ntest_df_initial = data_frame[data_frame['LD'] > selected_LD_split]\n\n#Function to drop duplicates from two dataframes\ndef drop_test_seqs(train_df, test_df, seq_name):\n '''\n Function serves as a check to prevent dataleakage between training & test or training & val sets\n\n Parameters\n ----------\n train_df : pandas.DataFrame\n train dataframe\n test_df : pandas.DataFrame\n test dataframe\n seq_name : str\n corresponds to the dataframe column name containing sequences.\n\n Returns\n -------\n out_df : TYPE\n train dataframe without test sequences\n '''\n train_df = train_df.copy()\n train_df['df'] = 'train'\n test_df_copy = test_df.copy()\n test_df_copy['df'] = 'test'\n frames = [train_df.copy(),test_df_copy]\n common_cols = list(set.intersection(*(set(df.columns) for df in frames)))\n concat_df = pd.concat([df[common_cols] for df in frames], ignore_index=True)\n concat_df = concat_df.drop_duplicates(subset=[seq_name],keep=False)\n out_df = concat_df[concat_df['df'] == 'train']\n return out_df\n\ntrain_df = drop_test_seqs(train_df, test_df_initial, 'AASeq')\n\ndef drop_and_rename_columns(df):\n df = df.copy()\n df = df.rename(columns = {'AASeq': 'aaseq', 'AgClass': 'target'})\n df = df.drop(columns = ['Unnamed: 0', 'Fraction', 'NucSeq', 'Count', 'df'])\n return df\n \n#Balance test set & save to csv \ntest_df = test_df_initial.copy()\ntest_df['df'] = 'test' #add to df to facilitate using the function below\ntest_df = drop_and_rename_columns(test_df)\ntest_positives = test_df[test_df['target'] == 1]\ntest_negs = test_df[test_df['target'] == 0].sample(n = int(len(test_positives)), random_state = 1)\ntest_df = test_positives.append(test_negs,ignore_index = True)\ntest_df = test_df.reindex(np.random.permutation(test_df.index))\n\nout_path = '../data/her2/'\ntest_df.to_csv(out_path + 'her2_test.csv', index=False)\n\n\ntrain_df = drop_and_rename_columns(train_df)\n\n#Class balance full training data set & shuffle dataframe\ninitial_train_pos = train_df[train_df['target'] == 1]\ninitial_train_neg = train_df[train_df['target'] == 0]\n\ninitial_train_neg = initial_train_neg[initial_train_neg['LD'] != 3] #drop the single LD 3 seq from df. required for sklearn train test stratifying later in script\ninitial_train_pos = initial_train_pos[initial_train_pos['LD'] != 2] #drop the two LD 2 seq from df. required for sklearn train test stratifying later in script\n\nminlen = min([len(initial_train_pos),len(initial_train_neg) ])\ninitial_train_pos = initial_train_pos.sample(n = minlen, random_state = 1)\ninitial_train_neg = initial_train_neg.sample(n = minlen, random_state = 1)\n\n\ntrain_df = pd.DataFrame()\ntrain_df = initial_train_pos.append(initial_train_neg, ignore_index = True)\ntrain_df = train_df.sample(n = int(len(train_df)), random_state = 1)\n\n#Batch training & val sets with different quantities of class imbalance using positives as the minority class\nclass_imbalance_qty_list = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.95, 1.0] \n\n\ntrain_df_master_copy = train_df.copy()\nfor imbal_qty in class_imbalance_qty_list:\n #artificially increase class imbalance in training set by downsampling positives\n train_positives = train_df_master_copy[train_df_master_copy['target'] == 1]\n train_negs = train_df_master_copy[train_df_master_copy['target'] == 0]\n \n #new downsampling method using sklearn & edit distance\n if imbal_qty != 1.0:\n train_positives, x_discard, y_train, y_discard = train_test_split(train_positives, train_positives['target'], test_size = 1 - imbal_qty,\n random_state = 1, shuffle = True, stratify = train_positives['LD']) \n elif imbal_qty == 1.0:\n train_truncated = train_positives \n \n #split val set from training & maintain LD distribution per class\n train_positives, val_positives, y_train, y_val = train_test_split(train_positives, train_positives['target'], test_size = 1 - 0.8,\n random_state = 1, shuffle = True, stratify = train_positives['LD'])\n \n train_negs, val_negs, y_train, y_val = train_test_split(train_negs, train_negs['target'], test_size = 1 - 0.8,\n random_state = 1, shuffle = True, stratify = train_negs['LD'])\n\n train_df = train_positives.append(train_negs,ignore_index = True)\n train_df = train_df.reindex(np.random.permutation(train_df.index))\n\n val_df = val_positives.append(val_negs,ignore_index = True)\n val_df = val_df.reindex(np.random.permutation(val_df.index))\n \n\n train_df = drop_test_seqs(train_df, val_df, 'aaseq')\n train_df = train_df.drop(columns = ['df'])\n train_df = train_df.drop(columns = ['LD'])\n val_df = val_df.drop(columns = ['LD'])\n \n out_str_train = out_path + 'her2_train_imbal_' + str(imbal_qty) + '.csv'\n out_str_val = out_path + 'her2_val_imbal_' + str(imbal_qty) + '.csv'\n train_df.to_csv(out_str_train, index=False)\n val_df.to_csv(out_str_val, index=False)\n"
] |
[
[
"pandas.concat",
"pandas.read_csv",
"numpy.random.seed",
"sklearn.model_selection.train_test_split",
"pandas.DataFrame",
"numpy.round",
"numpy.random.permutation"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
jdvelasq/techminer-api
|
[
"d2bb7d20c326f2fe7cc06d7005dfb3f2053ea1da",
"d2bb7d20c326f2fe7cc06d7005dfb3f2053ea1da"
] |
[
"techminer2/slope_chart.py",
"techminer2/lotka_law.py"
] |
[
"\"\"\"\nSlope Chart\n===============================================================================\n\"\"\"\nimport textwrap\n\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\n\n# from .plots.multiindex2text import multindex2text\n\nTEXTLEN = 35\n\n\ndef slope_chart(\n matrix,\n figsize=(6, 6),\n cmap=\"Greys\",\n cmap_by=\"Reds\",\n fontsize=9,\n):\n\n matrix = matrix.copy()\n if isinstance(matrix.columns, pd.MultiIndex):\n matrix.columns = multindex2text(matrix.columns)\n\n if isinstance(matrix.index, pd.MultiIndex):\n matrix.index = multindex2text(matrix.index)\n\n matplotlib.rc(\"font\", size=fontsize)\n\n fig = plt.Figure(figsize=figsize)\n ax = fig.subplots()\n cmap = plt.cm.get_cmap(cmap)\n cmap_by = plt.cm.get_cmap(cmap_by)\n\n m = len(matrix.index)\n n = len(matrix.columns)\n maxmn = max(m, n)\n yleft = (maxmn - m) / 2.0 + np.linspace(0, m, m)\n yright = (maxmn - n) / 2.0 + np.linspace(0, n, n)\n\n ax.vlines(\n x=1,\n ymin=-1,\n ymax=maxmn + 1,\n color=\"gray\",\n alpha=0.7,\n linewidth=1,\n linestyles=\"dotted\",\n )\n\n ax.vlines(\n x=3,\n ymin=-1,\n ymax=maxmn + 1,\n color=\"gray\",\n alpha=0.7,\n linewidth=1,\n linestyles=\"dotted\",\n )\n\n #\n # Dibuja los ejes para las conexiones\n #\n ax.scatter(x=[1] * m, y=yleft, s=1)\n ax.scatter(x=[3] * n, y=yright, s=1)\n\n #\n # Dibuja las conexiones\n #\n maxlink = matrix.max().max()\n minlink = matrix.values.ravel()\n minlink = min([v for v in minlink if v > 0])\n for idx, index in enumerate(matrix.index):\n for icol, col in enumerate(matrix.columns):\n link = matrix.loc[index, col]\n if link > 0:\n ax.plot(\n [1, 3],\n [yleft[idx], yright[icol]],\n c=\"k\",\n linewidth=0.5 + 4 * (link - minlink) / (maxlink - minlink),\n alpha=0.5 + 0.5 * (link - minlink) / (maxlink - minlink),\n )\n\n #\n # Sizes\n #\n left_sizes = [int(t.split(\" \")[-1].split(\":\")[0]) for t in matrix.index]\n right_sizes = [int(t.split(\" \")[-1].split(\":\")[0]) for t in matrix.columns]\n\n min_size = min(left_sizes + right_sizes)\n max_size = max(left_sizes + right_sizes)\n\n left_sizes = [\n 150 + 2000 * (t - min_size) / (max_size - min_size) for t in left_sizes\n ]\n right_sizes = [\n 150 + 2000 * (t - min_size) / (max_size - min_size) for t in right_sizes\n ]\n\n #\n # Colors\n #\n left_colors = [int(t.split(\" \")[-1].split(\":\")[1]) for t in matrix.index]\n right_colors = [int(t.split(\" \")[-1].split(\":\")[1]) for t in matrix.columns]\n\n min_color = min(left_colors + right_colors)\n max_color = max(left_colors + right_colors)\n\n left_colors = [\n cmap_by(0.1 + 0.9 * (t - min_color) / (max_color - min_color))\n for t in left_colors\n ]\n right_colors = [\n cmap(0.1 + 0.9 * (t - min_color) / (max_color - min_color))\n for t in right_colors\n ]\n\n ax.scatter(\n x=[1] * m,\n y=yleft,\n s=left_sizes,\n c=left_colors,\n zorder=10,\n linewidths=1,\n edgecolors=\"k\",\n )\n\n for idx, text in enumerate(matrix.index):\n ax.plot([0.7, 1.0], [yleft[idx], yleft[idx]], \"-\", c=\"grey\")\n\n for idx, text in enumerate(matrix.index):\n ax.text(\n 0.7,\n yleft[idx],\n text,\n fontsize=10,\n ha=\"right\",\n va=\"center\",\n zorder=10,\n bbox=dict(\n facecolor=\"w\",\n alpha=1.0,\n edgecolor=\"gray\",\n boxstyle=\"round,pad=0.5\",\n ),\n )\n\n #\n # right y-axis\n #\n\n ax.scatter(\n x=[3] * n,\n y=yright,\n s=right_sizes,\n c=right_colors,\n zorder=10,\n linewidths=1,\n edgecolors=\"k\",\n )\n\n for idx, text in enumerate(matrix.columns):\n ax.plot([3.0, 3.3], [yright[idx], yright[idx]], \"-\", c=\"grey\")\n\n for idx, text in enumerate(matrix.columns):\n ax.text(\n 3.3,\n yright[idx],\n text,\n fontsize=10,\n ha=\"left\",\n va=\"center\",\n bbox=dict(\n facecolor=\"w\",\n alpha=1.0,\n edgecolor=\"gray\",\n boxstyle=\"round,pad=0.5\",\n ),\n zorder=11,\n )\n\n #\n # Figure size\n #\n # expand_ax_limits(ax)\n ax.invert_yaxis()\n ax.axis(\"off\")\n fig.set_tight_layout(True)\n\n return fig\n",
"\"\"\"\nLotka's Law\n===============================================================================\n\n\n>>> from techminer2 import *\n>>> directory = \"/workspaces/techminer2/data/\"\n>>> file_name = \"/workspaces/techminer2/sphinx/images/lotka.png\"\n>>> lotka_law(directory=directory).savefig(file_name)\n\n.. image:: images/lotka.png\n :width: 700px\n :align: center\n\n\n>>> lotka_law(directory=directory, plot=False)\n Num Authors % ... Acum Num Documents % Acum Num Documents\n0 1 0.16 % ... 5 2.04%\n1 2 0.31 % ... 11 4.49%\n2 35 5.48 % ... 49 20.0%\n3 601 94.05 % ... 245 100.0%\n<BLANKLINE>\n[4 rows x 9 columns]\n\n\"\"\"\n\n\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\nfrom .load_filtered_documents import load_filtered_documents\nfrom .column_indicators import column_indicators\n\n\ndef _lotka_core_authors(directory=\"./\"):\n \"\"\"\n Returns a dataframe with the core analysis.\n\n Parameters\n ----------\n dirpath_or_records: str or list\n path to the directory or the records object.\n\n Returns\n -------\n pandas.DataFrame\n Dataframe with the core authors of the records\n \"\"\"\n documents = load_filtered_documents(directory)\n documents = documents.copy()\n\n z = column_indicators(column=\"authors\", sep=\"; \", directory=directory)[\n \"num_documents\"\n ]\n\n authors_dict = {\n author: num_docs for author, num_docs in zip(z.index, z) if not pd.isna(author)\n }\n\n #\n # Num Authors x Documents written per Author\n #\n # z = z[[\"num_documents\"]]\n z = z.to_frame(name=\"num_documents\")\n z = z.groupby([\"num_documents\"]).size()\n w = [str(round(100 * a / sum(z), 2)) + \" %\" for a in z]\n z = pd.DataFrame(\n {\"Num Authors\": z.tolist(), \"%\": w, \"Documents written per Author\": z.index}\n )\n z = z.sort_values([\"Documents written per Author\"], ascending=False)\n z[\"Acum Num Authors\"] = z[\"Num Authors\"].cumsum()\n z[\"% Acum\"] = [\n str(round(100 * a / sum(z[\"Num Authors\"]), 2)) + \" %\"\n for a in z[\"Acum Num Authors\"]\n ]\n # ---- remove explode ------------------------------------------------------------>>>\n # m = explode(documents[[\"authors\", \"record_no\"]], \"authors\", sep=\"; \")\n m = documents[[\"authors\", \"record_no\"]].copy()\n m[\"authors\"] = m[\"authors\"].str.split(\";\")\n m = m.explode(\"authors\")\n m[\"authors\"] = m[\"authors\"].str.strip()\n # <<<--------------------------------------------------------------------------------\n m = m.dropna()\n m[\"Documents_written\"] = m.authors.map(lambda w: authors_dict[w])\n\n n = []\n for k in z[\"Documents written per Author\"]:\n s = m.query(\"Documents_written >= \" + str(k))\n s = s[[\"record_no\"]]\n s = s.drop_duplicates()\n n.append(len(s))\n\n k = []\n for index in range(len(n) - 1):\n k.append(n[index + 1] - n[index])\n k = [n[0]] + k\n z[\"Num Documents\"] = k\n z[\"% Num Documents\"] = [str(round(i / max(n) * 100, 2)) + \"%\" for i in k]\n z[\"Acum Num Documents\"] = n\n z[\"% Acum Num Documents\"] = [str(round(i / max(n) * 100, 2)) + \"%\" for i in n]\n\n z = z[\n [\n \"Num Authors\",\n \"%\",\n \"Acum Num Authors\",\n \"% Acum\",\n \"Documents written per Author\",\n \"Num Documents\",\n \"% Num Documents\",\n \"Acum Num Documents\",\n \"% Acum Num Documents\",\n ]\n ]\n\n return z.reset_index(drop=True)\n\n\ndef lotka_law(\n cmap=\"Greys\",\n figsize=(6, 6),\n directory=\"./\",\n plot=True,\n):\n \"\"\"\n Returns a dataframe with the core analysis.\n\n Parameters\n ----------\n dirpath_or_records: str or list\n path to the directory or the records object.\n\n Returns\n -------\n pandas.DataFrame\n Dataframe with the core sources of the records\n \"\"\"\n\n fig, ax_ = plt.subplots(figsize=figsize)\n cmap = plt.cm.get_cmap(cmap)\n color = cmap(0.6)\n\n data = _lotka_core_authors(directory)\n\n if plot is False:\n return data\n\n percentage_authors = data[\"%\"].map(lambda w: float(w[:-2])).tolist()\n percentage_authors.reverse()\n documents_written = data[\"Documents written per Author\"].tolist()\n documents_written.reverse()\n total_authors = data[\"Num Authors\"].max()\n theoretical = [total_authors / float(x * x) for x in documents_written]\n total_theoretical = sum(theoretical)\n perc_theoretical_authors = [w / total_theoretical * 100 for w in theoretical]\n\n ax_.plot(\n documents_written,\n percentage_authors,\n linestyle=\"-\",\n linewidth=2,\n color=\"k\",\n )\n ax_.fill_between(\n documents_written,\n percentage_authors,\n color=color,\n alpha=0.6,\n )\n\n ax_.plot(\n documents_written,\n perc_theoretical_authors,\n linestyle=\":\",\n linewidth=4,\n color=\"k\",\n )\n\n for side in [\"top\", \"right\", \"left\", \"bottom\"]:\n ax_.spines[side].set_visible(False)\n\n ax_.grid(axis=\"y\", color=\"gray\", linestyle=\":\")\n ax_.grid(axis=\"x\", color=\"gray\", linestyle=\":\")\n ax_.tick_params(axis=\"x\", labelsize=7)\n ax_.tick_params(axis=\"y\", labelsize=7)\n ax_.set_ylabel(\"% of Authors\", fontsize=9)\n ax_.set_xlabel(\"Documets written per Author\", fontsize=9)\n\n ax_.set_title(\n \"Frequency distribution of scientific productivity\",\n fontsize=10,\n color=\"dimgray\",\n loc=\"left\",\n )\n\n fig.set_tight_layout(True)\n\n return fig\n"
] |
[
[
"matplotlib.pyplot.cm.get_cmap",
"matplotlib.pyplot.Figure",
"matplotlib.rc",
"numpy.linspace"
],
[
"pandas.isna",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.cm.get_cmap"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"0.24",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
andrewkouri/lidar-bonnetal
|
[
"a0b5c6aba530701084ac66a02532689ed580f934"
] |
[
"train/tasks/semantic/dataset/kitti/parser.py"
] |
[
"import os\nimport numpy as np\nimport torch\nfrom torch.utils.data import Dataset\nfrom common.laserscan import LaserScan, SemLaserScan\n\nEXTENSIONS_SCAN = ['.bin']\nEXTENSIONS_LABEL = ['.label']\n\n\ndef is_scan(filename):\n return any(filename.endswith(ext) for ext in EXTENSIONS_SCAN)\n\n\ndef is_label(filename):\n return any(filename.endswith(ext) for ext in EXTENSIONS_LABEL)\n\n\nclass SemanticKitti(Dataset):\n\n def __init__(self, root, # directory where data is\n sequences, # sequences for this data (e.g. [1,3,4,6])\n labels, # label dict: (e.g 10: \"car\")\n color_map, # colors dict bgr (e.g 10: [255, 0, 0])\n learning_map, # classes to learn (0 to N-1 for xentropy)\n learning_map_inv, # inverse of previous (recover labels)\n sensor, # sensor to parse scans from\n max_points=150000, # max number of points present in dataset\n gt=True): # send ground truth?\n # save deats\n self.root = os.path.join(root, \"sequences\")\n self.sequences = sequences\n self.labels = labels\n self.color_map = color_map\n self.learning_map = learning_map\n self.learning_map_inv = learning_map_inv\n self.sensor = sensor\n self.sensor_img_H = sensor[\"img_prop\"][\"height\"]\n self.sensor_img_W = sensor[\"img_prop\"][\"width\"]\n self.sensor_img_means = torch.tensor(sensor[\"img_means\"],\n dtype=torch.float)\n self.sensor_img_stds = torch.tensor(sensor[\"img_stds\"],\n dtype=torch.float)\n self.sensor_fov_up = sensor[\"fov_up\"]\n self.sensor_fov_down = sensor[\"fov_down\"]\n self.max_points = max_points\n self.gt = gt\n\n # get number of classes (can't be len(self.learning_map) because there\n # are multiple repeated entries, so the number that matters is how many\n # there are for the xentropy)\n self.nclasses = len(self.learning_map_inv)\n\n # sanity checks\n\n # make sure directory exists\n if os.path.isdir(self.root):\n print(\"Sequences folder exists! Using sequences from %s\" % self.root)\n else:\n raise ValueError(\"Sequences folder doesn't exist! Exiting...\")\n\n # make sure labels is a dict\n assert (isinstance(self.labels, dict))\n\n # make sure color_map is a dict\n assert (isinstance(self.color_map, dict))\n\n # make sure learning_map is a dict\n assert (isinstance(self.learning_map, dict))\n\n # make sure sequences is a list\n assert (isinstance(self.sequences, list))\n\n # placeholder for filenames\n self.scan_files = []\n self.label_files = []\n\n # fill in with names, checking that all sequences are complete\n for seq in self.sequences:\n # to string\n seq = '{0:02d}'.format(int(seq))\n\n print(\"parsing seq {}\".format(seq))\n\n # get paths for each\n scan_path = os.path.join(self.root, seq, \"velodyne\")\n label_path = os.path.join(self.root, seq, \"labels\")\n\n # get files\n scan_files = [os.path.join(dp, f) for dp, dn, fn in os.walk(\n os.path.expanduser(scan_path)) for f in fn if is_scan(f)]\n label_files = [os.path.join(dp, f) for dp, dn, fn in os.walk(\n os.path.expanduser(label_path)) for f in fn if is_label(f)]\n\n # check all scans have labels\n if self.gt:\n assert (len(scan_files) == len(label_files))\n\n # extend list\n self.scan_files.extend(scan_files)\n self.label_files.extend(label_files)\n\n # sort for correspondance\n self.scan_files.sort()\n self.label_files.sort()\n\n print(\"Using {} scans from sequences {}\".format(len(self.scan_files),\n self.sequences))\n\n def __getitem__(self, index):\n # get item in tensor shape\n scan_file = self.scan_files[index]\n if self.gt:\n label_file = self.label_files[index]\n\n # open a semantic laserscan\n if self.gt:\n scan = SemLaserScan(self.color_map,\n project=True,\n H=self.sensor_img_H,\n W=self.sensor_img_W,\n fov_up=self.sensor_fov_up,\n fov_down=self.sensor_fov_down)\n else:\n scan = LaserScan(project=True,\n H=self.sensor_img_H,\n W=self.sensor_img_W,\n fov_up=self.sensor_fov_up,\n fov_down=self.sensor_fov_down)\n\n # open and obtain scan\n scan.open_scan(scan_file)\n if self.gt:\n scan.open_label(label_file)\n # map unused classes to used classes (also for projection)\n scan.sem_label = self.map(scan.sem_label, self.learning_map)\n scan.proj_sem_label = self.map(scan.proj_sem_label, self.learning_map)\n\n # make a tensor of the uncompressed data (with the max num points)\n unproj_n_points = scan.points.shape[0]\n unproj_xyz = torch.full((self.max_points, 3), -1.0, dtype=torch.float)\n unproj_xyz[:unproj_n_points] = torch.from_numpy(scan.points)\n unproj_range = torch.full([self.max_points], -1.0, dtype=torch.float)\n unproj_range[:unproj_n_points] = torch.from_numpy(scan.unproj_range)\n unproj_remissions = torch.full([self.max_points], -1.0, dtype=torch.float)\n unproj_remissions[:unproj_n_points] = torch.from_numpy(scan.remissions)\n if self.gt:\n unproj_labels = torch.full([self.max_points], -1.0, dtype=torch.int32)\n unproj_labels[:unproj_n_points] = torch.from_numpy(scan.sem_label)\n else:\n unproj_labels = []\n\n # get points and labels\n proj_range = torch.from_numpy(scan.proj_range).clone()\n proj_xyz = torch.from_numpy(scan.proj_xyz).clone()\n proj_remission = torch.from_numpy(scan.proj_remission).clone()\n proj_mask = torch.from_numpy(scan.proj_mask)\n if self.gt:\n proj_labels = torch.from_numpy(scan.proj_sem_label).clone()\n proj_labels = proj_labels * proj_mask\n else:\n proj_labels = []\n proj_x = torch.full([self.max_points], -1, dtype=torch.long)\n proj_x[:unproj_n_points] = torch.from_numpy(scan.proj_x)\n proj_y = torch.full([self.max_points], -1, dtype=torch.long)\n proj_y[:unproj_n_points] = torch.from_numpy(scan.proj_y)\n proj = torch.cat([proj_range.unsqueeze(0).clone(),\n proj_xyz.clone().permute(2, 0, 1),\n proj_remission.unsqueeze(0).clone()])\n proj = (proj - self.sensor_img_means[:, None, None]\n ) / self.sensor_img_stds[:, None, None]\n proj = proj * proj_mask.float()\n\n # get name and sequence\n path_norm = os.path.normpath(scan_file)\n path_split = path_norm.split(os.sep)\n path_seq = path_split[-3]\n path_name = path_split[-1].replace(\".bin\", \".label\")\n # print(\"path_norm: \", path_norm)\n # print(\"path_seq\", path_seq)\n # print(\"path_name\", path_name)\n\n # return\n return proj, proj_mask, proj_labels, unproj_labels, path_seq, path_name, proj_x, proj_y, proj_range, unproj_range, proj_xyz, unproj_xyz, proj_remission, unproj_remissions, unproj_n_points\n\n def __len__(self):\n return len(self.scan_files)\n\n @staticmethod\n def map(label, mapdict):\n # put label from original values to xentropy\n # or vice-versa, depending on dictionary values\n # make learning map a lookup table\n maxkey = 0\n for key, data in mapdict.items():\n if isinstance(data, list):\n nel = len(data)\n else:\n nel = 1\n if key > maxkey:\n maxkey = key\n # +100 hack making lut bigger just in case there are unknown labels\n if nel > 1:\n lut = np.zeros((maxkey + 100, nel), dtype=np.int32)\n else:\n lut = np.zeros((maxkey + 100), dtype=np.int32)\n for key, data in mapdict.items():\n try:\n lut[key] = data\n except IndexError:\n print(\"Wrong key \", key)\n # do the mapping\n return lut[label]\n\n\nclass Parser:\n # standard conv, BN, relu\n def __init__(self,\n root, # directory for data\n train_sequences, # sequences to train\n valid_sequences, # sequences to validate.\n test_sequences, # sequences to test (if none, don't get)\n labels, # labels in data\n color_map, # color for each label\n learning_map, # mapping for training labels\n learning_map_inv, # recover labels from xentropy\n sensor, # sensor to use\n max_points, # max points in each scan in entire dataset\n batch_size, # batch size for train and val\n workers, # threads to load data\n gt=True, # get gt?\n shuffle_train=True): # shuffle training set?\n super(Parser, self).__init__()\n\n # if I am training, get the dataset\n self.root = root\n self.train_sequences = train_sequences\n self.valid_sequences = valid_sequences\n self.test_sequences = test_sequences\n self.labels = labels\n self.color_map = color_map\n self.learning_map = learning_map\n self.learning_map_inv = learning_map_inv\n self.sensor = sensor\n self.max_points = max_points\n self.batch_size = batch_size\n self.workers = workers\n self.gt = gt\n self.shuffle_train = shuffle_train\n\n # number of classes that matters is the one for xentropy\n self.nclasses = len(self.learning_map_inv)\n\n # Data loading code\n self.train_dataset = SemanticKitti(root=self.root,\n sequences=self.train_sequences,\n labels=self.labels,\n color_map=self.color_map,\n learning_map=self.learning_map,\n learning_map_inv=self.learning_map_inv,\n sensor=self.sensor,\n max_points=max_points,\n gt=self.gt)\n\n self.trainloader = torch.utils.data.DataLoader(self.train_dataset,\n batch_size=self.batch_size,\n shuffle=self.shuffle_train,\n num_workers=self.workers,\n pin_memory=True,\n drop_last=True)\n assert len(self.trainloader) > 0\n self.trainiter = iter(self.trainloader)\n\n self.valid_dataset = SemanticKitti(root=self.root,\n sequences=self.valid_sequences,\n labels=self.labels,\n color_map=self.color_map,\n learning_map=self.learning_map,\n learning_map_inv=self.learning_map_inv,\n sensor=self.sensor,\n max_points=max_points,\n gt=self.gt)\n\n self.validloader = torch.utils.data.DataLoader(self.valid_dataset,\n batch_size=self.batch_size,\n shuffle=False,\n num_workers=self.workers,\n pin_memory=True,\n drop_last=True)\n assert len(self.validloader) > 0\n self.validiter = iter(self.validloader)\n\n if self.test_sequences:\n self.test_dataset = SemanticKitti(root=self.root,\n sequences=self.test_sequences,\n labels=self.labels,\n color_map=self.color_map,\n learning_map=self.learning_map,\n learning_map_inv=self.learning_map_inv,\n sensor=self.sensor,\n max_points=max_points,\n gt=False)\n\n self.testloader = torch.utils.data.DataLoader(self.test_dataset,\n batch_size=self.batch_size,\n shuffle=False,\n num_workers=self.workers,\n pin_memory=True,\n drop_last=True)\n assert len(self.testloader) > 0\n self.testiter = iter(self.testloader)\n\n def get_train_batch(self):\n scans = self.trainiter.next()\n return scans\n\n def get_train_set(self):\n return self.trainloader\n\n def get_valid_batch(self):\n scans = self.validiter.next()\n return scans\n\n def get_valid_set(self):\n return self.validloader\n\n def get_test_batch(self):\n scans = self.testiter.next()\n return scans\n\n def get_test_set(self):\n return self.testloader\n\n def get_train_size(self):\n return len(self.trainloader)\n\n def get_valid_size(self):\n return len(self.validloader)\n\n def get_test_size(self):\n return len(self.testloader)\n\n def get_n_classes(self):\n return self.nclasses\n\n def get_original_class_string(self, idx):\n return self.labels[idx]\n\n def get_xentropy_class_string(self, idx):\n return self.labels[self.learning_map_inv[idx]]\n\n def to_original(self, label):\n # put label in original values\n return SemanticKitti.map(label, self.learning_map_inv)\n\n def to_xentropy(self, label):\n # put label in xentropy values\n return SemanticKitti.map(label, self.learning_map)\n\n def to_color(self, label):\n # put label in original values\n label = SemanticKitti.map(label, self.learning_map_inv)\n # put label in color\n return SemanticKitti.map(label, self.color_map)\n"
] |
[
[
"torch.full",
"torch.utils.data.DataLoader",
"torch.from_numpy",
"torch.tensor",
"numpy.zeros"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
sarahsester/q_hackathon
|
[
"963dcfbe8e3fa8bda954f4fc6db8a238f1d8a720"
] |
[
"src/teacher_school_allocation.py"
] |
[
"from ortools.linear_solver import pywraplp\nimport pandas as pd\nimport numpy as np\n\n\ndef create_cost_matrix(distances, pref_big_school, pref_rural):\n cost_matrix = distances + 10 * pref_big_school + 10 * pref_rural\n return cost_matrix\n\n\ndef find_optimal_allocation(df_schools, distances, pref_big_school, pref_rural, number_teachers, number_schools):\n # Create cost matrix\n cost_matrix = create_cost_matrix(distances, pref_big_school, pref_rural)\n\n # Create the mip solver with the SCIP backend.\n solver = pywraplp.Solver.CreateSolver('SCIP')\n\n # x[t,s] is an array of 0-1 variables, which will be 1 if teacher t is assigned to school s.\n x = {}\n for t in range(number_teachers):\n for s in range(number_schools):\n x[t, s] = solver.IntVar(0, 1, '')\n\n # Constraint 1: Each teacher is assigned to one school.\n for t in range(number_teachers):\n solver.Add(solver.Sum([x[t, s] for s in range(number_schools)]) == 1)\n\n # Constraint 2: Each school is assigned to minimum x teachers.\n for s in range(number_schools):\n solver.Add(solver.Sum([x[t, s] for t in range(number_teachers)]) >= df_schools['min_number_of_teachers'][s])\n\n # Constraint 3: Each school is assigned to maximal x+20 teachers.\n for s in range(number_schools):\n solver.Add(\n solver.Sum([x[t, s] for t in range(number_teachers)]) <= df_schools['min_number_of_teachers'][s] + 20)\n\n # Constraint 4: Each teacher has a maximum cost of 100.\n for t in range(number_teachers):\n solver.Add(solver.Sum([cost_matrix[t][s] * x[t, s] for s in range(number_schools)]) <= 100)\n\n # Objective\n objective_terms = []\n for t in range(number_teachers):\n for s in range(number_schools):\n objective_terms.append(cost_matrix[t][s] * x[t, s])\n solver.Minimize(solver.Sum(objective_terms))\n\n # Solve\n status = solver.Solve()\n\n df = pd.DataFrame(columns=['iteration', 'teacher', 'school', 'cost', 'dist'])\n\n # Save costs for further iterations\n costs_per_teacher = []\n\n # Print solution.\n if status == pywraplp.Solver.OPTIMAL or status == pywraplp.Solver.FEASIBLE:\n print(f'Total cost = {solver.Objective().Value()}\\n')\n for t in range(number_teachers):\n for s in range(number_schools):\n # Test if x[t,s] is 1 (with tolerance for floating point arithmetic).\n if x[t, s].solution_value() > 0.5:\n print(f'Teacher {t} assigned to school {s}. Cost={cost_matrix[t][s]}')\n df = df.append({'iteration': 1, 'teacher': t, 'school': s, 'cost': cost_matrix[t][s],\n 'dist': distances[t][s],\n 'pref_school_size_unsatisfied': pref_big_school[t][s],\n 'pref_urban_rural_unsatisfied': pref_rural[t][s]},\n ignore_index=True)\n costs_per_teacher.append(cost_matrix[t][s])\n\n adapted_costs = cost_matrix * np.array(costs_per_teacher)[:, np.newaxis] / 10\n\n return df, adapted_costs\n\n\ndef find_optimal_allocation_it2(df_schools, distances, pref_big_school, pref_rural, number_teachers, number_schools,\n adapted_cost_matrix):\n # Create cost matrix\n cost_matrix = create_cost_matrix(distances, pref_big_school, pref_rural)\n\n # Create the mip solver with the SCIP backend.\n solver = pywraplp.Solver.CreateSolver('SCIP')\n\n # x[t,s] is an array of 0-1 variables, which will be 1 if teacher t is assigned to school s.\n x = {}\n for t in range(number_teachers):\n for s in range(number_schools):\n x[t, s] = solver.IntVar(0, 1, '')\n\n # Constraint 1: Each teacher is assigned to one school.\n for t in range(number_teachers):\n solver.Add(solver.Sum([x[t, s] for s in range(number_schools)]) == 1)\n\n # Constraint 2: Each school is assigned to minimum x teachers.\n for s in range(number_schools):\n solver.Add(solver.Sum([x[t, s] for t in range(number_teachers)]) >= df_schools['min_number_of_teachers'][s])\n\n # Constraint 3: Each school is assigned to maximal x+20 teachers.\n for s in range(number_schools):\n solver.Add(\n solver.Sum([x[t, s] for t in range(number_teachers)]) <= df_schools['min_number_of_teachers'][s] + 20)\n\n # Constraint 4: Each teacher has a maximum cost of 100.\n for t in range(number_teachers):\n solver.Add(solver.Sum([cost_matrix[t][s] * x[t, s] for s in range(number_schools)]) <= 100)\n\n # Objective\n objective_terms = []\n for t in range(number_teachers):\n for s in range(number_schools):\n objective_terms.append(adapted_cost_matrix[t][s] * x[t, s])\n solver.Minimize(solver.Sum(objective_terms))\n\n # Solve\n status = solver.Solve()\n\n df = pd.DataFrame(columns=['iteration', 'teacher', 'school', 'cost', 'dist'])\n\n # Print solution.\n if status == pywraplp.Solver.OPTIMAL or status == pywraplp.Solver.FEASIBLE:\n print(f'Total cost = {solver.Objective().Value()}\\n')\n for t in range(number_teachers):\n for s in range(number_schools):\n # Test if x[t,s] is 1 (with tolerance for floating point arithmetic).\n if x[t, s].solution_value() > 0.5:\n print(f'Teacher {t} assigned to school {s}. Cost={cost_matrix[t][s]}')\n df = df.append({'iteration': 2, 'teacher': t, 'school': s, 'cost': cost_matrix[t][s],\n 'dist': distances[t][s],\n 'pref_school_size_unsatisfied': pref_big_school[t][s],\n 'pref_urban_rural_unsatisfied': pref_rural[t][s]},\n ignore_index=True)\n\n return df\n\n\nif __name__ == '__main__':\n nb_of_teachers = 761\n nb_of_schools = 58\n\n # Get school data\n df_schools = pd.read_csv('../data/school_dataset.csv')\n\n # Get cost matrix\n distances = pd.read_pickle('../data/geopy_distance_matrix_Waldorfschule.pkl')\n # distances = np.random.rand(nb_of_teachers, nb_of_schools) * 200\n pref_big_school = pd.read_pickle(r'../data/preference_big_school_Waldorfschule.pkl')\n pref_rural = pd.read_pickle(r'../data/preference_rural_Waldorfschule.pkl')\n\n df, adapted_costs = find_optimal_allocation(df_schools, distances, pref_big_school, pref_rural,\n number_teachers=nb_of_teachers, number_schools=nb_of_schools)\n print(df)\n print(df.groupby(['school']).count()['teacher'])\n print(f'Average costs: {df[\"cost\"].mean()}.')\n print(f'Teacher {df[\"cost\"].argmin()} has minimum costs ({df[\"cost\"].min()}).')\n print(f'Teacher {df[\"cost\"].argmax()} has maximal costs ({df[\"cost\"].max()}).')\n\n print(adapted_costs)\n\n df2 = find_optimal_allocation_it2(df_schools, distances, pref_big_school, pref_rural, number_teachers=nb_of_teachers,\n number_schools=nb_of_schools, adapted_cost_matrix=adapted_costs)\n print(df2)\n print(df2.groupby(['school']).count()['teacher'])\n print(f'Average costs: {df2[\"cost\"].mean()}.')\n print(f'Teacher {df2[\"cost\"].argmin()} has minimum costs ({df2[\"cost\"].min()}).')\n print(f'Teacher {df2[\"cost\"].argmax()} has maximal costs ({df2[\"cost\"].max()}).')\n\n df_all = df.append(df2)\n df_all.to_csv('../data/results.csv', index=False)\n"
] |
[
[
"numpy.array",
"pandas.read_csv",
"pandas.read_pickle",
"pandas.DataFrame"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
rdesarz/rrtstar
|
[
"0e2737fbb7bb7e45789d606e6c6c2b7ce5824f65"
] |
[
"rrtstar/sampling.py"
] |
[
"import random\n\nimport numpy as np\n\nfrom rrtstar.geometry import Zone2d, Point2d\n\n\ndef generate_new_sample_uniform(planification_zone: Zone2d) -> Point2d:\n x = np.random.uniform(planification_zone.x_min, planification_zone.x_max, 1)\n y = np.random.uniform(planification_zone.y_min, planification_zone.y_max, 1)\n return Point2d(x[0], y[0])\n\n\ndef generate_new_sample_biased(goal: Point2d) -> Point2d:\n x, y = np.random.multivariate_normal(goal.to_array(), [[10, 0], [0, 10]]).T\n return Point2d(x, y)\n\n\ndef generate_new_sample_biased_towards_goal(\n planification_zone: Zone2d, goal: Point2d, goal_sample_rate: int\n) -> Point2d:\n # There is a probability to generate a sample that is the goal.\n # Therefore, the tree is biased to grow towards the goal.\n if random.randint(0, 100) > goal_sample_rate:\n return Point2d(\n random.uniform(planification_zone.x_min, planification_zone.x_max),\n random.uniform(planification_zone.y_min, planification_zone.y_max),\n )\n else:\n return Point2d(goal.x, goal.y)"
] |
[
[
"numpy.random.uniform"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Bobyuan1015/KDA
|
[
"ce442922deb93b1bfe2ad7c418f1c63f5c40e000"
] |
[
"helper/webSearchCloseWords.py"
] |
[
"# -*- coding: utf-8 -*-\n\"\"\"\n File Name: url_content.py\n Description : the main detail logic of auditing url sms\n Author : yuanfang\n date: 2019/12/13\n\"\"\"\n\nfrom lxml import html\nimport pandas as pd\nimport sys\nimport os\nimport pathlib\nimport re\nimport requests\n\nfrom helper.cut import func_timer\n\nproject_path = str(pathlib.Path(os.path.abspath(os.curdir)))\nsys.path.append(project_path)\nprint(sys.path)\n\nheaders = {'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.132 Safari/537.36'}\n\nxpaths=['/html/body/div[1]/div[2]/div[2]/p[11]//text()',\n '/html/body/div[1]/div[2]/div[2]/p[6]//text()',\n '/html/body/div[1]/div[2]/div[2]/p[8]//text()',\n '/html/body/div[1]/div[2]/div[2]/p[13]//text()',\n '/html/body/div[1]/div[2]/div[2]/p[2]//text()']\n\n\ndef web_search(text,closeWords=None):\n \"\"\"Get the synonyms from a very official chinese synonym web\n :param text: a chinese word phrase. type: str\n :return: a list of close words to the param word, then join the list into a list type:str\n \"\"\"\n\n if len(get_chinese(text)) <1:\n return '0'\n if closeWords != '0':\n return closeWords\n def getci(text):\n tree = html.fromstring(text)\n close_words = []\n for xpath_ in xpaths:\n text = tree.xpath(xpath_)\n if len(text) > 0:\n for ci in text:\n close_words.extend(ci.split())\n print('close:',close_words)\n return list(set(close_words))\n\n print('web_search ->', text)\n while True: # 一直循环,知道访问站点成功\n try:\n page = requests.get('https://kmcha.com/similar/' + text, headers=headers, timeout=2)\n # print(page.text)\n close_words = match_ci(page.text)\n # print(close_words)\n # print(' 近义词:',content)\n\n return ','.join(close_words)\n # print('response:',response)\n # response = requests.get(url)\n # content = response.content.decode()\n # print('content:', content)\n # return test_remove_redundants(content)\n\n except requests.exceptions.ConnectionError:\n print('ConnectionError -- please wait 3 seconds')\n return '0'\n # time.sleep(3)\n except requests.exceptions.ChunkedEncodingError:\n print('ChunkedEncodingError -- please wait 3 seconds')\n # time.sleep(3)\n return '0'\n except Exception as e:\n print('Unfortunitely -- An Unknow Error Happened, Please wait 3 seconds e:', e)\n # time.sleep(3)\n return '0'\n\n\n\n@func_timer\ndef web_search_close_keys(file):\n df = pd.read_csv(file)\n df['close_words'] = '0'\n df.fillna('0')\n df['close_words'] = df.apply(lambda row: web_search(row['finale_all_keys'], row['close_words']), axis=1)\n df.to_csv('keys_.csv',index=False)\n\ndef get_chinese(content):\n \"\"\"\n pick chinese only from a text\n :param text: type: str\n :return: chines text type: str\n\n \"\"\"\n print('content:',content)\n return re.sub('[^\\u4e00-\\u9fff]+', '', content)\n\ndef remove_redundant(text):\n words = text.split('的同义词')\n return list(set(words))\n\nstops=['的']\ndef match_ci(text):\n start='的相似词'\n end='热门查询'\n close_words=[]\n if start in text and end in text:\n start_index = text.find(start)+len(start)\n end_index = text.find(end)\n ci_sentences = text[start_index:end_index]\n temp = [close_words.extend(remove_redundant(get_chinese(s.strip()))) for s in ci_sentences.split(' ')]\n cis = [ci for ci in close_words if len(ci) > 0 and ci not in stops]\n return cis\n\n\n# df = pd.read_csv('key.csv')\n# print(type(df))\n# print(df.columns)\n# # df.drop(df[df.keys.isnull()].index,inplace=True)\n# df['closed_words'] = df['keys'].apply(web_search)\n# df.to_csv('done_keys.csv',index=False)\n"
] |
[
[
"pandas.read_csv"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
N-Lambin/TFE-EEG
|
[
"0308e790a8f1045c85cbc1ec9e054e6136af58a3"
] |
[
"Python/miscellaneous/plt.py"
] |
[
"import matplotlib.pyplot as plt\nimport csv\nimport numpy as np\nfrom scipy import signal\n\nfileDir = 'winkLeft\\\\winkLeft10Data\\\\'\nfileName = 'AF3.csv'\nfs = 128\nx = []\n\nwith open(\".\\\\csv\\\\csvCleanData\\\\\" + fileDir + fileName, \"r\") as csvfile:\n csv_reader = csv.reader(csvfile, delimiter = '\\r')\n\n for row in csv_reader:\n x.append(int(row[0]))\n\n x = np.array(x)\n f, fd = signal.periodogram(x, fs, nfft=64)\n plt.semilogy(f, fd, 'r')\n\nfileDir = 'neutral\\\\neutral10Data\\\\'\nfileName = 'AF3.csv'\nfs = 128\nx = []\n\nwith open(\".\\\\csv\\\\csvCleanData\\\\\" + fileDir + fileName, \"r\") as csvfile:\n csv_reader = csv.reader(csvfile, delimiter = '\\r')\n\n for row in csv_reader:\n x.append(int(row[0]))\n\n x = np.array(x)\n f, fd = signal.periodogram(x, fs, nfft=64)\n plt.semilogy(f, fd, 'b')\n plt.show()"
] |
[
[
"matplotlib.pyplot.semilogy",
"scipy.signal.periodogram",
"numpy.array",
"matplotlib.pyplot.show"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"0.16",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"1.3",
"1.8"
],
"tensorflow": []
}
] |
TUM-LMF/MTLCC-pytorch
|
[
"894a470be2fb4b9e2e0b9e20e8684131ffdb5577"
] |
[
"src/models/convlstm/convlstm.py"
] |
[
"import torch.nn as nn\nfrom torch.autograd import Variable\nimport torch\n\n\nclass ConvLSTMCell(nn.Module):\n\n def __init__(self, input_size, input_dim, hidden_dim, kernel_size, bias):\n \"\"\"\n Initialize ConvLSTM cell.\n \n Parameters\n ----------\n input_size: (int, int)\n Height and width of input tensor as (height, width).\n input_dim: int\n Number of channels of input tensor.\n hidden_dim: int\n Number of channels of hidden state.\n kernel_size: (int, int)\n Size of the convolutional kernel.\n bias: bool\n Whether or not to add the bias.\n \"\"\"\n\n super(ConvLSTMCell, self).__init__()\n\n self.height, self.width = input_size\n self.input_dim = input_dim\n self.hidden_dim = hidden_dim\n\n self.kernel_size = kernel_size\n self.padding = kernel_size[0] // 2, kernel_size[1] // 2\n self.bias = bias\n \n self.conv = nn.Conv2d(in_channels=self.input_dim + self.hidden_dim,\n out_channels=4 * self.hidden_dim,\n kernel_size=self.kernel_size,\n padding=self.padding,\n bias=self.bias)\n\n def forward(self, input_tensor, cur_state):\n \n h_cur, c_cur = cur_state\n \n combined = torch.cat([input_tensor, h_cur], dim=1) # concatenate along channel axis\n \n combined_conv = self.conv(combined)\n cc_i, cc_f, cc_o, cc_g = torch.split(combined_conv, self.hidden_dim, dim=1) \n i = torch.sigmoid(cc_i)\n f = torch.sigmoid(cc_f)\n o = torch.sigmoid(cc_o)\n g = torch.tanh(cc_g)\n\n c_next = f * c_cur + i * g\n h_next = o * torch.tanh(c_next)\n \n return h_next, c_next\n\n def init_hidden(self, batch_size):\n return (Variable(torch.zeros(batch_size, self.hidden_dim, self.height, self.width)).cuda(),\n Variable(torch.zeros(batch_size, self.hidden_dim, self.height, self.width)).cuda())\n\n\nclass ConvLSTM(nn.Module):\n\n def __init__(self, input_size, input_dim, hidden_dim, kernel_size, num_layers,\n batch_first=False, bias=True, return_all_layers=False):\n super(ConvLSTM, self).__init__()\n\n self._check_kernel_size_consistency(kernel_size)\n\n # Make sure that both `kernel_size` and `hidden_dim` are lists having len == num_layers\n kernel_size = self._extend_for_multilayer(kernel_size, num_layers)\n hidden_dim = self._extend_for_multilayer(hidden_dim, num_layers)\n if not len(kernel_size) == len(hidden_dim) == num_layers:\n raise ValueError('Inconsistent list length.')\n\n self.height, self.width = input_size\n\n self.input_dim = input_dim\n self.hidden_dim = hidden_dim\n self.kernel_size = kernel_size\n self.num_layers = num_layers\n self.batch_first = batch_first\n self.bias = bias\n self.return_all_layers = return_all_layers\n\n cell_list = []\n for i in range(0, self.num_layers):\n cur_input_dim = self.input_dim if i == 0 else self.hidden_dim[i-1]\n\n cell_list.append(ConvLSTMCell(input_size=(self.height, self.width),\n input_dim=cur_input_dim,\n hidden_dim=self.hidden_dim[i],\n kernel_size=self.kernel_size[i],\n bias=self.bias))\n\n self.cell_list = nn.ModuleList(cell_list)\n\n def forward(self, input_tensor, hidden_state=None):\n \"\"\"\n \n Parameters\n ----------\n input_tensor: todo \n 5-D Tensor either of shape (t, b, c, h, w) or (b, t, c, h, w)\n hidden_state: todo\n None. todo implement stateful\n \n Returns\n -------\n last_state_list, layer_output\n \"\"\"\n if not self.batch_first:\n # (t, b, c, h, w) -> (b, t, c, h, w)\n input_tensor.permute(1, 0, 2, 3, 4)\n\n # Implement stateful ConvLSTM\n if hidden_state is not None:\n raise NotImplementedError()\n else:\n hidden_state = self._init_hidden(batch_size=input_tensor.size(0))\n\n layer_output_list = []\n last_state_list = []\n\n seq_len = input_tensor.size(1)\n cur_layer_input = input_tensor\n\n for layer_idx in range(self.num_layers):\n\n h, c = hidden_state[layer_idx]\n output_inner = []\n for t in range(seq_len):\n\n h, c = self.cell_list[layer_idx](input_tensor=cur_layer_input[:, t, :, :, :],\n cur_state=[h, c])\n output_inner.append(h)\n\n layer_output = torch.stack(output_inner, dim=1)\n cur_layer_input = layer_output\n\n layer_output_list.append(layer_output)\n last_state_list.append([h, c])\n\n if not self.return_all_layers:\n layer_output_list = layer_output_list[-1:]\n last_state_list = last_state_list[-1:]\n\n return layer_output_list, last_state_list\n\n def _init_hidden(self, batch_size):\n init_states = []\n for i in range(self.num_layers):\n init_states.append(self.cell_list[i].init_hidden(batch_size))\n return init_states\n\n @staticmethod\n def _check_kernel_size_consistency(kernel_size):\n if not (isinstance(kernel_size, tuple) or\n (isinstance(kernel_size, list) and all([isinstance(elem, tuple) for elem in kernel_size]))):\n raise ValueError('`kernel_size` must be tuple or list of tuples')\n\n @staticmethod\n def _extend_for_multilayer(param, num_layers):\n if not isinstance(param, list):\n param = [param] * num_layers\n return param\n"
] |
[
[
"torch.sigmoid",
"torch.cat",
"torch.zeros",
"torch.nn.ModuleList",
"torch.nn.Conv2d",
"torch.tanh",
"torch.split",
"torch.stack"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
huakeda1/Basic-algorithm-and-framework-study-for-AI
|
[
"8776dc500772a6c1f28be9c4a426ed9eca2ec775"
] |
[
"search_engine/search_engine_with_rank.py"
] |
[
"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nimport pandas as pd\nimport jieba\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom functools import reduce\nimport numpy as np\nimport os\nimport re\nfrom scipy.spatial.distance import cosine\n\n\n# In[2]:\n\n\ncsv_file='dataset/news.csv'\nif os.path.exists(csv_file):\n news=pd.read_csv(csv_file,encoding='gb18030',nrows=20000)\n news['content']=news['content'].fillna('')\n news['cut_words']=news['content'].apply(lambda x:' '.join(list(jieba.cut(x))))\n news['cut_words'].to_csv('dataset/news_content.csv')\n print('news csv has been successfully processed')\n\n\n# In[3]:\n\n\ndef reduce_and(vectors):\n return reduce(lambda a,b:a&b,vectors)\n\n\n# In[4]:\n\n\nclass RetrievalEngine:\n def __init__(self,corpus):\n # token_pattern is set to be r\"(?u)\\b\\w\\w+\\b\" by default which can only accept words longer than two.\n # token_pattern is set to be r\"(?u)\\b\\w+\\b\" which can accept single word or alpha.\n # vocabulary can give words which will be used to build matrix\n # max_df can filter words which have higher exist frequency in all docs\n # tf is decided only by current doc, tf equals frequency in single doc.\n # idf is decided by how many docs have this word and how many docs are given here.\n # idf equals to 1+log((total_docs)/(docs_contain_thisword)) or 1+log((1+total_docs)/(1+docs_contain_thisword))\n # tfidf means tf*idf.\n self.vectorizer=TfidfVectorizer(token_pattern=r\"(?u)\\b\\w+\\b\",max_df=1.0,stop_words=[],vocabulary=None,use_idf=True,smooth_idf=True)\n self.vectorizer.fit(corpus)\n self.corpus=corpus\n self.d2w=self.vectorizer.transform(corpus).toarray()\n self.w2d=self.d2w.transpose()\n def get_words_id(self,words):\n ids=[self.vectorizer.vocabulary_[w] for w in words if w in self.vectorizer.vocabulary_]\n return ids\n def get_w2d_vectors(self,words):\n vectors=self.w2d[self.get_words_id(words)]\n return vectors\n # get the idnexes of docs which have all the specific words\n def get_combined_common_indices(self,words):\n try:\n indices=reduce_and([set(np.where(v)[0]) for v in self.get_w2d_vectors(words)])\n return indices\n except Exception as e:\n return []\n def get_sorted_indices(self,words):\n indices=self.get_combined_common_indices(words)\n query_vector=self.vectorizer.transform(words).toarray()[0]\n sorted_indices=sorted(indices,key=lambda indice:cosine(query_vector,self.d2w[indice]),reverse=True)\n return sorted_indices\n def get_requested_text(self,words):\n sorted_indices=self.get_sorted_indices(words)\n output=[self.corpus[indice] for indice in sorted_indices]\n return output\n \n\n\n# In[5]:\n\n\ncorpus=[\" \".join(list(jieba.cut(\"我爱吃香蕉\"))),\" \".join(list(jieba.cut(\"你爱吃苹果\"))),\" \".join(list(jieba.cut(\"苹果没有香蕉吃得好\")))]\nretrieval_engine=RetrievalEngine(corpus)\nprint(retrieval_engine.w2d)\nprint(retrieval_engine.vectorizer.vocabulary_)\nwords=list(jieba.cut(\"喜欢水果\"))\nprint(retrieval_engine.get_words_id(words))\n\nprint(retrieval_engine.get_w2d_vectors(words))\n\nprint(retrieval_engine.get_combined_common_indices(words))\nprint(retrieval_engine.get_sorted_indices(words))\nprint(retrieval_engine.get_requested_text(words))\n\n\n# In[ ]:\n\n\n\n\n"
] |
[
[
"numpy.where",
"pandas.read_csv",
"scipy.spatial.distance.cosine",
"sklearn.feature_extraction.text.TfidfVectorizer"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"0.16",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"1.3",
"1.8"
],
"tensorflow": []
}
] |
LordDarkula/eva
|
[
"93433bc88f361c277690c9e31f1b8de657f25823"
] |
[
"test/expression/test_logical.py"
] |
[
"# coding=utf-8\n# Copyright 2018-2020 EVA\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport unittest\nimport pandas as pd\nfrom mock import Mock\n\nfrom eva.expression.abstract_expression import ExpressionType\nfrom eva.expression.comparison_expression import ComparisonExpression\nfrom eva.expression.logical_expression import LogicalExpression\nfrom eva.expression.constant_value_expression import ConstantValueExpression\nfrom eva.expression.tuple_value_expression import TupleValueExpression\nfrom eva.models.storage.batch import Batch\n\n\nclass LogicalExpressionsTest(unittest.TestCase):\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n def test_logical_and(self):\n const_exp1 = ConstantValueExpression(1)\n const_exp2 = ConstantValueExpression(1)\n\n comparison_expression_left = ComparisonExpression(\n ExpressionType.COMPARE_EQUAL,\n const_exp1,\n const_exp2\n )\n const_exp1 = ConstantValueExpression(2)\n const_exp2 = ConstantValueExpression(1)\n comparison_expression_right = ComparisonExpression(\n ExpressionType.COMPARE_GREATER,\n const_exp1,\n const_exp2\n )\n logical_expr = LogicalExpression(\n ExpressionType.LOGICAL_AND,\n comparison_expression_left,\n comparison_expression_right\n )\n self.assertEqual(\n [True], logical_expr.evaluate(None).frames[0].tolist())\n\n def test_logical_or(self):\n const_exp1 = ConstantValueExpression(1)\n const_exp2 = ConstantValueExpression(1)\n\n comparison_expression_left = ComparisonExpression(\n ExpressionType.COMPARE_EQUAL,\n const_exp1,\n const_exp2\n )\n const_exp1 = ConstantValueExpression(1)\n const_exp2 = ConstantValueExpression(2)\n comparison_expression_right = ComparisonExpression(\n ExpressionType.COMPARE_GREATER,\n const_exp1,\n const_exp2\n )\n logical_expr = LogicalExpression(\n ExpressionType.LOGICAL_OR,\n comparison_expression_left,\n comparison_expression_right\n )\n self.assertEqual(\n [True],\n logical_expr.evaluate(None).frames[0].tolist()\n )\n\n def test_logical_not(self):\n const_exp1 = ConstantValueExpression(0)\n const_exp2 = ConstantValueExpression(1)\n\n comparison_expression_right = ComparisonExpression(\n ExpressionType.COMPARE_GREATER,\n const_exp1,\n const_exp2\n )\n logical_expr = LogicalExpression(\n ExpressionType.LOGICAL_NOT,\n None,\n comparison_expression_right\n )\n self.assertEqual(\n [True],\n logical_expr.evaluate(None).frames[0].tolist()\n )\n\n def test_short_circuiting_and_complete(self):\n # tests whether right-hand side is bypassed completely with and\n tup_val_exp_l = TupleValueExpression(col_name=0)\n tup_val_exp_r = TupleValueExpression(col_name=1)\n\n comp_exp_l = ComparisonExpression(\n ExpressionType.COMPARE_EQUAL,\n tup_val_exp_l,\n tup_val_exp_r\n )\n comp_exp_r = Mock(spec=ComparisonExpression)\n\n logical_exp = LogicalExpression(\n ExpressionType.LOGICAL_AND,\n comp_exp_l,\n comp_exp_r\n )\n\n tuples = Batch(pd.DataFrame(\n {0: [1, 2, 3], 1: [4, 5, 6]}))\n self.assertEqual(\n [False, False, False],\n logical_exp.evaluate(tuples).frames[0].tolist()\n )\n comp_exp_r.evaluate.assert_not_called()\n\n def test_short_circuiting_or_complete(self):\n # tests whether right-hand side is bypassed completely with or\n tup_val_exp_l = TupleValueExpression(col_name=0)\n tup_val_exp_r = TupleValueExpression(col_name=1)\n\n comp_exp_l = ComparisonExpression(\n ExpressionType.COMPARE_EQUAL,\n tup_val_exp_l,\n tup_val_exp_r\n )\n comp_exp_r = Mock(spec=ComparisonExpression)\n\n logical_exp = LogicalExpression(\n ExpressionType.LOGICAL_OR,\n comp_exp_l,\n comp_exp_r\n )\n\n tuples = Batch(pd.DataFrame(\n {0: [1, 2, 3], 1: [1, 2, 3]}))\n self.assertEqual(\n [True, True, True],\n logical_exp.evaluate(tuples).frames[0].tolist()\n )\n comp_exp_r.evaluate.assert_not_called()\n\n def test_short_circuiting_and_partial(self):\n # tests whether right-hand side is partially executed with and\n tup_val_exp_l = TupleValueExpression(col_name=0)\n tup_val_exp_r = TupleValueExpression(col_name=1)\n\n comp_exp_l = ComparisonExpression(\n ExpressionType.COMPARE_EQUAL,\n tup_val_exp_l,\n tup_val_exp_r\n )\n comp_exp_r = Mock(spec=ComparisonExpression)\n comp_exp_r.evaluate = Mock(return_value=Mock(frames=[[True], [False]]))\n\n logical_exp = LogicalExpression(\n ExpressionType.LOGICAL_AND,\n comp_exp_l,\n comp_exp_r\n )\n\n tuples = Batch(pd.DataFrame(\n {0: [1, 2, 3, 4], 1: [1, 2, 5, 6]}))\n self.assertEqual(\n [True, False, False, False],\n logical_exp.evaluate(tuples).frames[0].tolist()\n )\n comp_exp_r.evaluate.assert_called_once_with(tuples, mask=[0, 1])\n\n def test_short_circuiting_or_partial(self):\n # tests whether right-hand side is partially executed with or\n tup_val_exp_l = TupleValueExpression(col_name=0)\n tup_val_exp_r = TupleValueExpression(col_name=1)\n\n comp_exp_l = ComparisonExpression(\n ExpressionType.COMPARE_EQUAL,\n tup_val_exp_l,\n tup_val_exp_r\n )\n comp_exp_r = Mock(spec=ComparisonExpression)\n comp_exp_r.evaluate = Mock(return_value=Mock(frames=[[True], [False]]))\n\n logical_exp = LogicalExpression(\n ExpressionType.LOGICAL_OR,\n comp_exp_l,\n comp_exp_r\n )\n\n tuples = Batch(pd.DataFrame(\n {0: [1, 2, 3, 4], 1: [5, 6, 3, 4]}))\n self.assertEqual(\n [True, False, True, True],\n logical_exp.evaluate(tuples).frames[0].tolist()\n )\n comp_exp_r.evaluate.assert_called_once_with(tuples, mask=[0, 1])\n"
] |
[
[
"pandas.DataFrame"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
rravenel/furuta_pendulum
|
[
"b2f2a3bb8c6f2676671a24c6f9ea4d8e6479835f",
"b2f2a3bb8c6f2676671a24c6f9ea4d8e6479835f"
] |
[
"bldc/odrive/drag_profiler.py",
"simple/src/run.py"
] |
[
"import odrive\nimport matplotlib.pyplot as plt\nimport time\n\n'''\nMeasure internal drag/friction. Spin it up and clock it slowing down.\n\n1465rpm / 154 rad/s / 24.4Hz: -356 rad/s^2\n2197rpm / 230 rad/s / 36.6Hz: -378 rad/s^2\n2930rpm / 307 rad/s / 48.8Hz: -342 rad/s^2\n3663rpm / 383 rad/s / 61.0Hz: -324 rad/s^2\n\n'''\n# max is 600,000\nv_target = 500000\nt_sample = 5\nstill_count = 20\nc2rad = 1303.8\nt_cut = 1.05\nv_sample = 25\n\nprint(\"Connecting...\")\nd = odrive.find_any()\nprint(\"Connected\")\nx = d.axis0\n\nx.controller.config.control_mode = 1\nx.controller.current_setpoint = 0\n\ndef test():\n\tx.controller.config.control_mode = 2\n\tx.controller.vel_setpoint = v_target\n\ttime.sleep(0.5)\n\n\tx.controller.config.control_mode = 1\n\tx.controller.current_setpoint = 0\n\n\tv_last = 0\n\tt_start = time.time()\n\tt_last = t_start\n\tnow = t_start\n\tzero_v_count = 0\n\twhile now - t_start < t_sample:\n\t\tif zero_v_count >= still_count:\n\t\t\tbreak\n\n\t\tv = x.encoder.vel_estimate\n\t\tv = v / c2rad\n\t\tnow = time.time()\n\t\tdv = v - v_last\n\t\tdt = now - t_last\n\t\ta = dv/dt\n\n\t\tbuf_t.append(now)\t\n\t\tbuf_v.append(v)\n\t\tbuf_a.append(a)\n\n\t\tv_last = v\n\t\tt_last = now\n\n\t\tif 0 == int(v):\n\t\t\tzero_v_count += 1\n\t\telse:\n\t\t\tzero_v_count = 0\n\n\nbuf_t = []\nbuf_v = []\nbuf_a = []\n\ncount = 1\nfor i in range(count):\n\ttest()\n\n# throw out first sample from v = 0\nbuf_t = buf_t[1:]\nbuf_v = buf_v[1:]\nbuf_a = buf_a[1:]\n\ndata = []\n\ndrag_map_v = []\ndrag_map = []\nbuf_seg = []\n\nt_start = buf_t[0]\nfor i in range(len(buf_t)):\n\tt = buf_t[i] - t_start\n\tv = buf_v[i]\n\ta = int(buf_a[i])\n\n\tprint(\"#%d:\\tt: %fs\\tv: %frad/s\\ta: %drad/s2\" % (i, t, v, a))\n\n\tif t > 0.05 and t < t_cut:\n\t\tdata.append(a)\n\n\tbuf_seg.append(a)\n\tif i > 0 and 0 == i%v_sample:\n\t\tv_diff = buf_v[i-10] - v\n\t\tdrag_map_v.append(v + v_diff/2)\n\t\tdrag_map.append(sum(buf_seg)/len(buf_seg))\n\t\tbuf_seg = []\n\t\tprint(\"\\tv: %f\\td: %f\" % (drag_map_v[-1], drag_map[-1]))\n\n\t# alter for rendering\n\tbuf_t[i] = t\n\tbuf_v[i] = 25 * v\n\ndrag = sum(data) / len(data)\nprint(\"Acceleration due to drag: %frad/s2\" % (drag))\n\n#plt.plot(buf_t, len(buf_t) * [0])\n#plt.plot(buf_t, buf_a)\n#plt.plot(buf_t, buf_v)\nplt.plot(drag_map_v, len(drag_map) * [0])\nplt.plot(drag_map_v, drag_map)\nplt.show()\n",
"import sys\nimport re\nimport multiprocessing\nimport os.path as osp\nimport gym\nfrom collections import defaultdict\nimport tensorflow as tf\nimport numpy as np\n\nfrom baselines.common.vec_env import VecFrameStack, VecNormalize, VecEnv\nfrom baselines.common.vec_env.vec_video_recorder import VecVideoRecorder\nfrom baselines.common.cmd_util import common_arg_parser, parse_unknown_args, make_vec_env, make_env\nfrom baselines.common.tf_util import get_session\nfrom baselines import logger\nfrom importlib import import_module\n\ntry:\n from mpi4py import MPI\nexcept ImportError:\n MPI = None\n\ntry:\n import pybullet_envs\nexcept ImportError:\n pybullet_envs = None\n\ntry:\n import roboschool\nexcept ImportError:\n roboschool = None\n\n_game_envs = defaultdict(set)\nfor env in gym.envs.registry.all():\n # TODO: solve this with regexes\n env_type = env.entry_point.split(':')[0].split('.')[-1]\n _game_envs[env_type].add(env.id)\n\n# reading benchmark names directly from retro requires\n# importing retro here, and for some reason that crashes tensorflow\n# in ubuntu\n_game_envs['retro'] = {\n 'BubbleBobble-Nes',\n 'SuperMarioBros-Nes',\n 'TwinBee3PokoPokoDaimaou-Nes',\n 'SpaceHarrier-Nes',\n 'SonicTheHedgehog-Genesis',\n 'Vectorman-Genesis',\n 'FinalFight-Snes',\n 'SpaceInvaders-Snes',\n}\n\n\ndef train(args, extra_args):\n env_type, env_id = get_env_type(args)\n print('env_type: {}'.format(env_type))\n\n total_timesteps = int(args.num_timesteps)\n seed = args.seed\n\n learn = get_learn_function(args.alg)\n alg_kwargs = get_learn_function_defaults(args.alg, env_type)\n alg_kwargs.update(extra_args)\n\n env = build_env(args)\n if args.save_video_interval != 0:\n env = VecVideoRecorder(env, osp.join(logger.get_dir(), \"videos\"), record_video_trigger=lambda x: x % args.save_video_interval == 0, video_length=args.save_video_length)\n\n if args.network:\n alg_kwargs['network'] = args.network\n else:\n if alg_kwargs.get('network') is None:\n alg_kwargs['network'] = get_default_network(env_type)\n\n print('Training {} on {}:{} with arguments \\n{}'.format(args.alg, env_type, env_id, alg_kwargs))\n\n model = learn(\n env=env,\n seed=seed,\n total_timesteps=total_timesteps,\n **alg_kwargs\n )\n\n return model, env\n\n\ndef build_env(args):\n ncpu = multiprocessing.cpu_count()\n if sys.platform == 'darwin': ncpu //= 2\n nenv = args.num_env or ncpu\n alg = args.alg\n seed = args.seed\n\n env_type, env_id = get_env_type(args)\n\n if env_type in {'atari', 'retro'}:\n if alg == 'deepq':\n env = make_env(env_id, env_type, seed=seed, wrapper_kwargs={'frame_stack': True})\n elif alg == 'trpo_mpi':\n env = make_env(env_id, env_type, seed=seed)\n else:\n frame_stack_size = 4\n env = make_vec_env(env_id, env_type, nenv, seed, gamestate=args.gamestate, reward_scale=args.reward_scale)\n env = VecFrameStack(env, frame_stack_size)\n\n else:\n config = tf.ConfigProto(allow_soft_placement=True,\n intra_op_parallelism_threads=1,\n inter_op_parallelism_threads=1)\n config.gpu_options.allow_growth = True\n get_session(config=config)\n\n flatten_dict_observations = alg not in {'her'}\n env = make_vec_env(env_id, env_type, args.num_env or 1, seed, reward_scale=args.reward_scale, flatten_dict_observations=flatten_dict_observations)\n\n if env_type == 'mujoco':\n env = VecNormalize(env, use_tf=True)\n\n return env\n\n\ndef get_env_type(args):\n env_id = args.env\n\n if args.env_type is not None:\n return args.env_type, env_id\n\n # Re-parse the gym registry, since we could have new envs since last time.\n for env in gym.envs.registry.all():\n env_type = env.entry_point.split(':')[0].split('.')[-1]\n _game_envs[env_type].add(env.id) # This is a set so add is idempotent\n\n if env_id in _game_envs.keys():\n env_type = env_id\n env_id = [g for g in _game_envs[env_type]][0]\n else:\n env_type = None\n for g, e in _game_envs.items():\n if env_id in e:\n env_type = g\n break\n if ':' in env_id:\n env_type = re.sub(r':.*', '', env_id)\n assert env_type is not None, 'env_id {} is not recognized in env types'.format(env_id, _game_envs.keys())\n\n return env_type, env_id\n\n\ndef get_default_network(env_type):\n if env_type in {'atari', 'retro'}:\n return 'cnn'\n else:\n return 'mlp'\n\ndef get_alg_module(alg, submodule=None):\n submodule = submodule or alg\n try:\n # first try to import the alg module from baselines\n alg_module = import_module('.'.join(['baselines', alg, submodule]))\n except ImportError:\n # then from rl_algs\n alg_module = import_module('.'.join(['rl_' + 'algs', alg, submodule]))\n\n return alg_module\n\n\ndef get_learn_function(alg):\n return get_alg_module(alg).learn\n\n\ndef get_learn_function_defaults(alg, env_type):\n try:\n alg_defaults = get_alg_module(alg, 'defaults')\n kwargs = getattr(alg_defaults, env_type)()\n except (ImportError, AttributeError):\n kwargs = {}\n return kwargs\n\n\n\ndef parse_cmdline_kwargs(args):\n '''\n convert a list of '='-spaced command-line arguments to a dictionary, evaluating python objects when possible\n '''\n def parse(v):\n\n assert isinstance(v, str)\n try:\n return eval(v)\n except (NameError, SyntaxError):\n return v\n\n return {k: parse(v) for k,v in parse_unknown_args(args).items()}\n\n\ndef configure_logger(log_path, **kwargs):\n if log_path is not None:\n logger.configure(log_path)\n else:\n logger.configure(**kwargs)\n\n\ndef main(args):\n # configure logger, disable logging in child MPI processes (with rank > 0)\n\n arg_parser = common_arg_parser()\n args, unknown_args = arg_parser.parse_known_args(args)\n extra_args = parse_cmdline_kwargs(unknown_args)\n\n if MPI is None or MPI.COMM_WORLD.Get_rank() == 0:\n rank = 0\n configure_logger(args.log_path)\n else:\n rank = MPI.COMM_WORLD.Get_rank()\n configure_logger(args.log_path, format_strs=[])\n\n model, env = train(args, extra_args)\n\n if args.save_path is not None and rank == 0:\n save_path = osp.expanduser(args.save_path)\n model.save(save_path)\n\n if args.play:\n logger.log(\"Running trained model\")\n obs = env.reset()\n\n state = model.initial_state if hasattr(model, 'initial_state') else None\n dones = np.zeros((1,))\n\n episode_rew = np.zeros(env.num_envs) if isinstance(env, VecEnv) else np.zeros(1)\n while True:\n if state is not None:\n actions, _, state, _ = model.step(obs,S=state, M=dones)\n else:\n actions, _, _, _ = model.step(obs)\n\n obs, rew, done, _ = env.step(actions)\n #print(\"cos: %.4f\\tsin: %.4f\\tvel: %.4f\\tu: %.4f\" % (obs[0][0], obs[0][1], obs[0][2], actions[0]))\n \n episode_rew += rew\n env.render()\n done_any = done.any() if isinstance(done, np.ndarray) else done\n if done_any:\n for i in np.nonzero(done)[0]:\n print('episode_rew={}'.format(episode_rew[i]))\n episode_rew[i] = 0\n\n env.close()\n\n return model\n\nif __name__ == '__main__':\n main(sys.argv)\n "
] |
[
[
"matplotlib.pyplot.plot",
"matplotlib.pyplot.show"
],
[
"tensorflow.ConfigProto",
"numpy.zeros",
"numpy.nonzero"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
arassadin/SYQ
|
[
"d30e6f0053ada3ad504038698a8756425594aa22",
"b17744c2aba3aba7e7e72decb3b8a02792d33b54"
] |
[
"tensorpack/train/input_data.py",
"tensorpack/callbacks/dump.py"
] |
[
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# File: input_data.py\n# Author: Yuxin Wu <[email protected]>\n\nimport tensorflow as tf\nimport threading\nfrom abc import ABCMeta, abstractmethod\nimport six\n\nfrom ..dataflow import DataFlow, RepeatedData\nfrom ..tfutils.summary import add_moving_summary\nfrom ..utils import logger\nfrom ..callbacks.concurrency import StartProcOrThread\n\n__all__ = ['QueueInput', 'FeedfreeInput', 'TensorInput',\n 'DummyConstantInput']\n\[email protected]_metaclass(ABCMeta)\nclass InputData(object):\n pass\n\nclass FeedInput(InputData):\n def __init__(self, ds):\n assert isinstance(ds, DataFlow), ds\n self.ds = ds\n\n def size(self):\n return self.ds.size()\n\n def _setup(self, trainer):\n self.input_vars = trainer.model.get_input_vars()\n rds = RepeatedData(self.ds, -1)\n rds.reset_state()\n self.data_producer = rds.get_data()\n\n def next_feed(self):\n data = next(self.data_producer)\n feed = dict(zip(self.input_vars, data))\n return feed\n\nclass FeedfreeInput(InputData):\n def get_input_tensors(self):\n return self._get_input_tensors()\n\n @abstractmethod\n def _get_input_tensors(self):\n \"\"\"\n always create and return a list of new input tensors\n \"\"\"\n\nclass EnqueueThread(threading.Thread):\n def __init__(self, trainer, queue, ds, input_placehdrs):\n super(EnqueueThread, self).__init__()\n self.name = 'EnqueueThread'\n self.daemon = True\n\n self.dataflow = ds\n self.queue = queue\n\n self.sess = trainer.sess\n self.coord = trainer.coord\n self.placehdrs = input_placehdrs\n\n self.op = self.queue.enqueue(self.placehdrs)\n self.close_op = self.queue.close(cancel_pending_enqueues=True)\n self.size_op = self.queue.size()\n add_moving_summary(tf.cast(\n self.size_op, tf.float32, name='input_queue_size'))\n\n def run(self):\n self.dataflow.reset_state()\n with self.sess.as_default():\n try:\n while True:\n for dp in self.dataflow.get_data():\n if self.coord.should_stop():\n return\n feed = dict(zip(self.placehdrs, dp))\n #print 'qsize:', self.sess.run([self.op, self.size_op], feed_dict=feed)[1]\n self.op.run(feed_dict=feed)\n except tf.errors.CancelledError as e:\n pass\n except Exception:\n logger.exception(\"Exception in EnqueueThread:\")\n finally:\n self.coord.request_stop()\n try:\n self.sess.run(self.close_op)\n except RuntimeError: # session already closed\n pass\n logger.info(\"Enqueue Thread Exited.\")\n\nclass QueueInput(FeedfreeInput):\n def __init__(self, ds, queue=None):\n \"\"\"\n :param ds: a `DataFlow` instance\n :param queue: a `tf.QueueBase` instance to be used to buffer datapoints.\n Defaults to a FIFO queue of size 50.\n \"\"\"\n assert isinstance(ds, DataFlow), ds\n self.queue = queue\n self.ds = ds\n\n def size(self):\n return self.ds.size()\n\n def _setup(self, trainer):\n self.input_placehdrs = trainer.model.get_input_vars()\n assert len(self.input_placehdrs) > 0, \\\n \"QueueInput can only be used with input placeholders!\"\n if self.queue is None:\n self.queue = tf.FIFOQueue(\n 50, [x.dtype for x in self.input_placehdrs],\n name='input_queue')\n self.thread = EnqueueThread(\n trainer, self.queue, self.ds, self.input_placehdrs)\n trainer.config.callbacks.append(StartProcOrThread(self.thread))\n\n def _get_input_tensors(self):\n ret = self.queue.dequeue(name='input_deque')\n if isinstance(ret, tf.Tensor): # only one input\n ret = [ret]\n assert len(ret) == len(self.input_placehdrs)\n for qv, v in zip(ret, self.input_placehdrs):\n qv.set_shape(v.get_shape())\n\n # test the overhead of queue\n #with tf.device('/gpu:0'):\n #ret = [tf.Variable(tf.random_normal([128,224,224,3],\n #dtype=tf.float32), trainable=False),\n #tf.Variable(tf.ones([128], dtype=tf.int32), trainable=False)]\n return ret\n\nclass DummyConstantInput(QueueInput):\n \"\"\" only for debugging performance issues \"\"\"\n def __init__(self, ds, shapes):\n super(DummyConstantInput, self).__init__(ds)\n self.shapes = shapes\n logger.warn(\"Using dummy input for debug!\")\n\n def _get_input_tensors(self):\n placehdrs = self.input_placehdrs\n assert len(self.shapes) == len(placehdrs)\n ret = []\n for idx, p in enumerate(placehdrs):\n with tf.device('/gpu:0'):\n ret.append(tf.get_variable('dummy-' + p.op.name,\n shape=self.shapes[idx], dtype=p.dtype, trainable=False,\n initializer=tf.constant_initializer()))\n return ret\n\nclass TensorInput(FeedfreeInput):\n def __init__(self, get_tensor_fn, size=None):\n self.get_tensor_fn = get_tensor_fn\n self._size = size\n\n def size(self):\n if self._size is None:\n raise ValueError(\"size of TensorInput is undefined!\")\n return self._size\n\n def _setup(self, trainer):\n pass\n\n def _get_input_tensors(self):\n return self.get_tensor_fn()\n",
"# -*- coding: UTF-8 -*-\n# File: dump.py\n# Author: Yuxin Wu <[email protected]>\n\nimport os\nimport cv2\nimport numpy as np\n\nfrom .base import Callback\nfrom ..utils import logger\nfrom ..tfutils import get_op_var_name\n\n__all__ = ['DumpParamAsImage']\n\nclass DumpParamAsImage(Callback):\n \"\"\"\n Dump a variable to image(s) after every epoch to logger.LOG_DIR.\n \"\"\"\n def __init__(self, var_name, prefix=None, map_func=None, scale=255, clip=False):\n \"\"\"\n :param var_name: the name of the variable.\n :param prefix: the filename prefix for saved images. Default is the op name.\n :param map_func: map the value of the variable to an image or list of\n images of shape [h, w] or [h, w, c]. If None, will use identity\n :param scale: a multiplier on pixel values, applied after map_func. default to 255\n :param clip: whether to clip the result to [0, 255]\n \"\"\"\n op_name, self.var_name = get_op_var_name(var_name)\n self.func = map_func\n if prefix is None:\n self.prefix = op_name\n else:\n self.prefix = prefix\n self.log_dir = logger.LOG_DIR\n self.scale = scale\n self.clip = clip\n\n def _before_train(self):\n # TODO might not work for multiGPU?\n self.var = self.graph.get_tensor_by_name(self.var_name)\n\n def _trigger_epoch(self):\n val = self.trainer.sess.run(self.var)\n if self.func is not None:\n val = self.func(val)\n if isinstance(val, list):\n for idx, im in enumerate(val):\n self._dump_image(im, idx)\n else:\n self._dump_image(val)\n\n def _dump_image(self, im, idx=None):\n assert im.ndim in [2, 3], str(im.ndim)\n fname = os.path.join(\n self.log_dir,\n self.prefix + '-ep{:03d}{}.png'.format(\n self.epoch_num, '-' + str(idx) if idx else ''))\n res = im * self.scale\n if self.clip:\n res = np.clip(res, 0, 255)\n cv2.imwrite(fname, res.astype('uint8'))\n\n"
] |
[
[
"tensorflow.constant_initializer",
"tensorflow.device",
"tensorflow.cast",
"tensorflow.FIFOQueue"
],
[
"numpy.clip"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.4",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
myychal/hypernet
|
[
"778e9c1a2f27ab1c664bb6d8ea49c65d0c7bdade",
"778e9c1a2f27ab1c664bb6d8ea49c65d0c7bdade"
] |
[
"python_research/preprocessing/attribute_profiles/max_tree/attribute_matrix_construction.py",
"python_research/experiments/multiple_feature_learning/utils/unbalanced_data.py"
] |
[
"import numpy as np\nfrom .attributes_incrementally import StandardDeviation, LengthOfDiagonal, \\\n FirstHuMoment, Area\nfrom ..utils.data_types import Pixel\n\n\ndef construct_area_matrix(image: np.ndarray) -> np.ndarray:\n matrix = np.ones(image.shape, dtype=Area)\n image_width = image.shape[1]\n for index, _ in enumerate(image.flatten()):\n x = index % image_width\n y = int(index / image_width)\n matrix[y, x] = Area()\n return matrix\n\n\ndef construct_std_dev_matrix(image: np.ndarray) -> np.ndarray:\n image_width = image.shape[1]\n std_dev_matrix = np.zeros(image.shape, dtype=StandardDeviation)\n for index, pixel_value in enumerate(image.flatten()):\n x = index % image_width\n y = int(index / image_width)\n std_dev_matrix[y, x] = StandardDeviation(value=pixel_value)\n return std_dev_matrix\n\n\ndef construct_length_of_diagonal_matrix(image: np.ndarray) -> np.ndarray:\n width = image.shape[1]\n image_size = image.size\n matrix = np.zeros(image.shape, dtype=LengthOfDiagonal)\n for index in range(0, image_size):\n x = index % width\n y = int(index / width)\n matrix[y, x] = LengthOfDiagonal(x, x, y, y)\n return matrix\n\n\ndef construct_first_hu_moment_matrix(image: np.ndarray) -> np.ndarray:\n width = image.shape[1]\n max_ = float(np.amax(image))\n min_ = float(np.amin(image))\n matrix = np.zeros(image.shape, dtype=FirstHuMoment)\n for index, pixel_value in enumerate(image.flatten()):\n x = index % width\n y = int(index / width)\n norm_pixel_value = (float(pixel_value) - min_) / (max_ - min_)\n matrix[y, x] = FirstHuMoment(Pixel(x, y, norm_pixel_value))\n return matrix\n\n\nmatrix_constructs = {\n 'area': construct_area_matrix,\n 'stddev': construct_std_dev_matrix,\n 'diagonal': construct_length_of_diagonal_matrix,\n 'moment': construct_first_hu_moment_matrix\n}\n\n\ndef construct_matrix(attribute_name: str, image: np.ndarray) -> np.ndarray:\n return matrix_constructs[attribute_name](image)\n",
"import numpy as np\nfrom copy import copy\nfrom random import shuffle\nfrom collections import OrderedDict\nfrom keras.utils import to_categorical\n\nBACKGROUND_LABEL = 0\n\n\nclass UnbalancedData:\n def __init__(self, file_path, gt_path, samples_number):\n self.x = np.load(file_path)\n self.y = np.load(gt_path)\n self.x_train = None\n self.y_train = None\n self.x_val = []\n self.y_val = []\n self.x_test = None\n self.y_test = None\n self.construct_train_val_sets(samples_number)\n self.normalize_sets()\n\n def reshape_data(self):\n data = []\n labels = []\n for i, row in enumerate(self.x):\n for j, pixel in enumerate(row):\n if self.y[i, j] != BACKGROUND_LABEL:\n sample = copy(self.x[i, j, :])\n data.append(sample.reshape((sample.shape[-1], 1)))\n labels.append(self.y[i, j])\n return np.array(data), np.array(labels)\n\n def construct_train_val_sets(self, samples_number):\n data, labels = self.reshape_data()\n samples_count = len(data)\n indexes = [index for index in range(samples_count)]\n shuffle(indexes)\n train_indexes = indexes[:samples_number]\n test_indexes = indexes[samples_number:]\n self.x_train = data[train_indexes]\n self.y_train = labels[train_indexes]\n val_indexes = dict.fromkeys(np.unique(self.y_train))\n indexes = []\n for label in val_indexes:\n label_indexes = np.where(self.y_train == label)[0]\n label_indexes = list(label_indexes[:int(len(label_indexes) * 0.1)])\n indexes += label_indexes\n self.x_val = self.x_train[indexes, ...]\n self.y_val = self.y_train[indexes]\n self.x_train = np.delete(self.x_train, indexes, axis=0)\n self.y_train = np.delete(self.y_train, indexes, axis=0)\n self.x_test = data[test_indexes]\n self.y_test = labels[test_indexes]\n\n train_labels = np.concatenate((self.y_train, self.y_val), axis=0)\n self.counts = OrderedDict.fromkeys(np.delete(np.unique(self.y), BACKGROUND_LABEL), 0)\n for sample in train_labels:\n self.counts[sample] += 1\n\n def normalize_sets(self):\n min_ = np.min(self.x_train) if np.min(self.x_train) < np.min(self.x_val) else np.min(self.x_val)\n max_ = np.max(self.x_train) if np.max(self.x_train) > np.max(self.x_val) else np.max(self.x_val)\n self.x_train = (self.x_train.astype(np.float64) - min_) / (max_ - min_)\n self.x_val = (self.x_val.astype(np.float64) - min_) / (max_ - min_)\n self.x_test = (self.x_test.astype(np.float64) - min_) / (max_ - min_)\n self.y_train = to_categorical(self.y_train - 1, len(np.unique(self.y)) - 1)\n self.y_val = to_categorical(self.y_val - 1, len(np.unique(self.y)) - 1)\n self.y_test = to_categorical(self.y_test - 1, len(np.unique(self.y)) - 1)"
] |
[
[
"numpy.amin",
"numpy.amax",
"numpy.zeros",
"numpy.ones"
],
[
"numpy.unique",
"numpy.min",
"numpy.concatenate",
"numpy.max",
"numpy.delete",
"numpy.load",
"numpy.array",
"numpy.where"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
kitstar/nni
|
[
"c5c0fa2e2dede71d2797a8bafa85c90f59d311f8",
"141f24d42d2e86ace3774d931bfab58dca0ef1ad"
] |
[
"src/sdk/pynni/tests/test_trial.py",
"examples/trials/kaggle-tgs-salt/loader.py"
] |
[
"# Copyright (c) Microsoft Corporation. All rights reserved.\n#\n# MIT License\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and\n# associated documentation files (the \"Software\"), to deal in the Software without restriction,\n# including without limitation the rights to use, copy, modify, merge, publish, distribute,\n# sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all copies or\n# substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT\n# NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\n# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,\n# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT\n# OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n# ==================================================================================================\n\n\nimport nni\nimport nni.platform.test as test_platform\nimport nni.trial\n\nimport numpy as np\nfrom unittest import TestCase, main\n\n\nclass TrialTestCase(TestCase):\n def setUp(self):\n self._trial_params = { 'msg': 'hi', 'x': 123, 'dict': { 'key': 'value', 'y': None } }\n nni.trial._params = { 'parameter_id': 'test_param', 'parameters': self._trial_params }\n\n def test_get_next_parameter(self):\n self.assertEqual(nni.get_next_parameter(), self._trial_params)\n\n def test_report_intermediate_result(self):\n nni.report_intermediate_result(123)\n self.assertEqual(test_platform.get_last_metric(), {\n 'parameter_id': 'test_param',\n 'trial_job_id': 'test_trial_job_id',\n 'type': 'PERIODICAL',\n 'sequence': 0,\n 'value': 123\n })\n\n def test_report_final_result_simple(self):\n self._test_report_final_result(123, 123)\n\n def test_report_final_result_object(self):\n obj = ['obj1', {'key1': 'v1', 'k2': None}, 233, 0.456]\n self._test_report_final_result(obj, obj)\n\n def test_report_final_result_numpy(self):\n self._test_report_final_result(np.float32(0.25), 0.25)\n\n def test_report_final_result_nparray(self):\n arr = np.array([[1, 2, 3], [4, 5, 6]])\n nni.report_final_result(arr)\n out = test_platform.get_last_metric()\n self.assertEqual(len(arr), 2)\n self.assertEqual(len(arr[0]), 3)\n self.assertEqual(len(arr[1]), 3)\n self.assertEqual(arr[0][0], 1)\n self.assertEqual(arr[0][1], 2)\n self.assertEqual(arr[0][2], 3)\n self.assertEqual(arr[1][0], 4)\n self.assertEqual(arr[1][1], 5)\n self.assertEqual(arr[1][2], 6)\n\n def _test_report_final_result(self, in_, out):\n nni.report_final_result(in_)\n self.assertEqual(test_platform.get_last_metric(), {\n 'parameter_id': 'test_param',\n 'trial_job_id': 'test_trial_job_id',\n 'type': 'FINAL',\n 'sequence': 0,\n 'value': out\n })\n\n\nif __name__ == '__main__':\n main()\n",
"# Copyright (c) Microsoft Corporation\n# All rights reserved.\n#\n# MIT License\n#\n# Permission is hereby granted, free of charge,\n# to any person obtaining a copy of this software and associated\n# documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish, distribute, sublicense,\n# and/or sell copies of the Software, and\n# to permit persons to whom the Software is furnished to do so, subject to the following conditions:\n# The above copyright notice and this permission notice shall be included\n# in all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING\n# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\n# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,\n# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\nimport os, cv2, glob\nimport numpy as np\nfrom PIL import Image\n\nimport torch\nimport torch.utils.data as data\nfrom torchvision import datasets, models, transforms\nfrom utils import read_masks, get_test_meta, get_nfold_split\nimport augmentation as aug\nfrom settings import *\n\nclass ImageDataset(data.Dataset):\n def __init__(self, train_mode, meta, augment_with_target=None,\n image_augment=None, image_transform=None, mask_transform=None):\n self.augment_with_target = augment_with_target\n self.image_augment = image_augment\n self.image_transform = image_transform\n self.mask_transform = mask_transform\n\n self.train_mode = train_mode\n self.meta = meta\n \n self.img_ids = meta[ID_COLUMN].values\n self.salt_exists = meta['salt_exists'].values\n self.is_train = meta['is_train'].values\n \n if self.train_mode:\n self.mask_filenames = meta[Y_COLUMN].values\n\n def __getitem__(self, index):\n base_img_fn = '{}.png'.format(self.img_ids[index])\n if self.is_train[index]: #self.train_mode:\n img_fn = os.path.join(TRAIN_IMG_DIR, base_img_fn)\n else:\n img_fn = os.path.join(TEST_IMG_DIR, base_img_fn)\n img = self.load_image(img_fn)\n\n if self.train_mode:\n base_mask_fn = '{}.png'.format(self.img_ids[index])\n if self.is_train[index]:\n mask_fn = os.path.join(TRAIN_MASK_DIR, base_mask_fn)\n else:\n mask_fn = os.path.join(TEST_DIR, 'masks', base_mask_fn)\n mask = self.load_image(mask_fn, True)\n img, mask = self.aug_image(img, mask)\n return img, mask, self.salt_exists[index]\n else:\n img = self.aug_image(img)\n return [img]\n\n def aug_image(self, img, mask=None):\n if mask is not None:\n if self.augment_with_target is not None:\n img, mask = self.augment_with_target(img, mask)\n if self.image_augment is not None:\n img = self.image_augment(img)\n if self.mask_transform is not None:\n mask = self.mask_transform(mask)\n if self.image_transform is not None:\n img = self.image_transform(img)\n return img, mask\n else:\n if self.image_augment is not None:\n img = self.image_augment(img)\n if self.image_transform is not None:\n img = self.image_transform(img)\n return img\n\n def load_image(self, img_filepath, grayscale=False):\n image = Image.open(img_filepath, 'r')\n if not grayscale:\n image = image.convert('RGB')\n else:\n image = image.convert('L').point(lambda x: 0 if x < 128 else 1, 'L')\n return image\n\n def __len__(self):\n return len(self.img_ids)\n\n def collate_fn(self, batch):\n imgs = [x[0] for x in batch]\n inputs = torch.stack(imgs)\n\n if self.train_mode:\n masks = [x[1] for x in batch]\n labels = torch.stack(masks)\n\n salt_target = [x[2] for x in batch]\n return inputs, labels, torch.FloatTensor(salt_target)\n else:\n return inputs\n\ndef mask_to_tensor(x):\n x = np.array(x).astype(np.float32)\n x = np.expand_dims(x, axis=0)\n x = torch.from_numpy(x)\n return x\n\nimg_transforms = [\n transforms.Grayscale(num_output_channels=3),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n ]\n\ndef get_tta_transforms(index, pad_mode):\n tta_transforms = {\n 0: [],\n 1: [transforms.RandomHorizontalFlip(p=2.)],\n 2: [transforms.RandomVerticalFlip(p=2.)],\n 3: [transforms.RandomHorizontalFlip(p=2.), transforms.RandomVerticalFlip(p=2.)]\n }\n if pad_mode == 'resize':\n return transforms.Compose([transforms.Resize((H, W)), *(tta_transforms[index]), *img_transforms])\n else:\n return transforms.Compose([*(tta_transforms[index]), *img_transforms])\n\ndef get_image_transform(pad_mode):\n if pad_mode == 'resize':\n return transforms.Compose([transforms.Resize((H, W)), *img_transforms])\n else:\n return transforms.Compose(img_transforms)\n\ndef get_mask_transform(pad_mode):\n if pad_mode == 'resize':\n return transforms.Compose(\n [\n transforms.Resize((H, W)),\n transforms.Lambda(mask_to_tensor),\n ]\n )\n else:\n return transforms.Compose(\n [\n transforms.Lambda(mask_to_tensor),\n ]\n )\n\ndef get_img_mask_augments(pad_mode, depths_channel=False):\n if depths_channel:\n affine_aug = aug.RandomAffineWithMask(5, translate=(0.1, 0.), scale=(0.9, 1.1), shear=None)\n else:\n affine_aug = aug.RandomAffineWithMask(15, translate=(0.1, 0.1), scale=(0.9, 1.1), shear=None)\n\n if pad_mode == 'resize':\n img_mask_aug_train = aug.Compose([\n aug.RandomHFlipWithMask(),\n affine_aug\n ])\n img_mask_aug_val = None\n else:\n img_mask_aug_train = aug.Compose([\n aug.PadWithMask((28, 28), padding_mode=pad_mode),\n aug.RandomHFlipWithMask(),\n affine_aug,\n aug.RandomResizedCropWithMask(H, scale=(1., 1.), ratio=(1., 1.))\n ])\n img_mask_aug_val = aug.PadWithMask((13, 13, 14, 14), padding_mode=pad_mode)\n\n return img_mask_aug_train, img_mask_aug_val\n\ndef get_train_loaders(ifold, batch_size=8, dev_mode=False, pad_mode='edge', meta_version=1, pseudo_label=False, depths=False):\n train_shuffle = True\n train_meta, val_meta = get_nfold_split(ifold, nfold=10, meta_version=meta_version)\n\n if pseudo_label:\n test_meta = get_test_meta()\n train_meta = train_meta.append(test_meta, sort=True)\n\n if dev_mode:\n train_shuffle = False\n train_meta = train_meta.iloc[:10]\n val_meta = val_meta.iloc[:10]\n #print(val_meta[X_COLUMN].values[:5])\n #print(val_meta[Y_COLUMN].values[:5])\n print(train_meta.shape, val_meta.shape)\n img_mask_aug_train, img_mask_aug_val = get_img_mask_augments(pad_mode, depths)\n\n train_set = ImageDataset(True, train_meta,\n augment_with_target=img_mask_aug_train,\n image_augment=transforms.ColorJitter(0.2, 0.2, 0.2, 0.2),\n image_transform=get_image_transform(pad_mode),\n mask_transform=get_mask_transform(pad_mode))\n\n train_loader = data.DataLoader(train_set, batch_size=batch_size, shuffle=train_shuffle, num_workers=4, collate_fn=train_set.collate_fn, drop_last=True)\n train_loader.num = len(train_set)\n\n val_set = ImageDataset(True, val_meta,\n augment_with_target=img_mask_aug_val,\n image_augment=None, \n image_transform=get_image_transform(pad_mode),\n mask_transform=get_mask_transform(pad_mode))\n val_loader = data.DataLoader(val_set, batch_size=batch_size, shuffle=False, num_workers=4, collate_fn=val_set.collate_fn)\n val_loader.num = len(val_set)\n val_loader.y_true = read_masks(val_meta[ID_COLUMN].values)\n\n return train_loader, val_loader\n\ndef get_test_loader(batch_size=16, index=0, dev_mode=False, pad_mode='edge'):\n test_meta = get_test_meta()\n if dev_mode:\n test_meta = test_meta.iloc[:10]\n test_set = ImageDataset(False, test_meta,\n image_augment=None if pad_mode == 'resize' else transforms.Pad((13,13,14,14), padding_mode=pad_mode), \n image_transform=get_tta_transforms(index, pad_mode))\n test_loader = data.DataLoader(test_set, batch_size=batch_size, shuffle=False, num_workers=4, collate_fn=test_set.collate_fn, drop_last=False)\n test_loader.num = len(test_set)\n test_loader.meta = test_set.meta\n\n return test_loader\n\ndepth_channel_tensor = None\n\ndef get_depth_tensor(pad_mode):\n global depth_channel_tensor\n\n if depth_channel_tensor is not None:\n return depth_channel_tensor\n \n depth_tensor = None\n\n if pad_mode == 'resize':\n depth_tensor = np.zeros((H, W))\n for row, const in enumerate(np.linspace(0, 1, H)):\n depth_tensor[row, :] = const \n else:\n depth_tensor = np.zeros((ORIG_H, ORIG_W))\n for row, const in enumerate(np.linspace(0, 1, ORIG_H)):\n depth_tensor[row, :] = const\n depth_tensor = np.pad(depth_tensor, (14,14), mode=pad_mode) # edge or reflect\n depth_tensor = depth_tensor[:H, :W]\n\n depth_channel_tensor = torch.Tensor(depth_tensor)\n return depth_channel_tensor\n\ndef add_depth_channel(img_tensor, pad_mode):\n '''\n img_tensor: N, C, H, W\n '''\n img_tensor[:, 1] = get_depth_tensor(pad_mode)\n img_tensor[:, 2] = img_tensor[:, 0] * get_depth_tensor(pad_mode)\n\n\ndef test_train_loader():\n train_loader, val_loader = get_train_loaders(1, batch_size=4, dev_mode=False, pad_mode='edge', meta_version=2, pseudo_label=True)\n print(train_loader.num, val_loader.num)\n for i, data in enumerate(train_loader):\n imgs, masks, salt_exists = data\n #pdb.set_trace()\n print(imgs.size(), masks.size(), salt_exists.size())\n print(salt_exists)\n add_depth_channel(imgs, 'resize')\n print(masks)\n break\n #print(imgs)\n #print(masks)\n\ndef test_test_loader():\n test_loader = get_test_loader(4, pad_mode='resize')\n print(test_loader.num)\n for i, data in enumerate(test_loader):\n print(data.size())\n if i > 5:\n break\n\nif __name__ == '__main__':\n test_test_loader()\n #test_train_loader()\n #small_dict, img_ids = load_small_train_ids()\n #print(img_ids[:10])\n #print(get_tta_transforms(3, 'edge'))\n"
] |
[
[
"numpy.array",
"numpy.float32"
],
[
"numpy.expand_dims",
"numpy.pad",
"torch.Tensor",
"numpy.linspace",
"torch.utils.data.DataLoader",
"torch.from_numpy",
"torch.utils.data.size",
"torch.FloatTensor",
"torch.stack",
"numpy.array",
"numpy.zeros"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
bgoli/stochpy
|
[
"ba06e5eaf1204dbc8ea39996ff8a08e9b0b5997d"
] |
[
"stochpy/modules/Analysis.py"
] |
[
"#! /usr/bin/env python\n\"\"\"\nAnalysis\n========\n\nThis module provides functions for Stochastic Simulation Algorithms Analysis (SSA). Implemented SSAs import this module to perform their analysis. Plotting of time series species, propensities), distributions (species, propensities, distributions), autocorrelations, and autocovariances (species, propensities) is possible.\n\nWritten by TR Maarleveld, Amsterdam, The Netherlands\nE-mail: [email protected]\n\n\"\"\"\n\nfrom __future__ import division, print_function, absolute_import\n\nfrom stochpy import _IsPlotting\nif _IsPlotting:\n from stochpy import plt\n from stochpy import matplotlib\n from matplotlib import gridspec,colors as clr\n\nfrom stochpy import _IsNumPy\nif _IsNumPy:\n import numpy as np\nelse:\n sys.exit()\n\nimport copy,sys\n\ndef getDataForTimeSimPlot(Arr_data,npoints = 100000,quiet=False):\n \"\"\"\n Input:\n - *Arr_data* (numpy array)\n - *npoints* [default = 10000] (integer)\n \"\"\"\n len_data = len(Arr_data)\n if (len_data > npoints): # use npoints only if datasets become too large\n L_data2plot = [Arr_data[0]]\n step_size = len_data//int(abs(npoints))\n for i in range(step_size,len_data,step_size):\n t = Arr_data[i][0]\n data_point = copy.deepcopy(L_data2plot[-1][1:].tolist())\n data_point.insert(0,t)\n L_data2plot.append(data_point)\n L_data2plot.append(Arr_data[i])\n if not quiet:\n print(\"Info: Plotting {0:d} out of {1:d} points. Use the argument 'npoints' to alter the number of plotted events.\".format(npoints,len_data) )\n else:\n L_data2plot = copy.deepcopy(Arr_data.tolist())\n j=1\n for i in range(1,len_data):\n t = Arr_data[i][0]\n data_prev = copy.deepcopy(Arr_data[i-1]) # data of previous ...\n data_prev[0] = t\n L_data2plot.insert(j,data_prev)\n j+=2\n return np.array(L_data2plot)\n\n\ndef Count(data,edges):\n \"\"\"\n Input:\n - *data* (list)\n - *edges* (list)\n \"\"\"\n n_edges = len(edges)\n L_output = np.zeros(n_edges)\n for value in data:\n for i in range(n_edges-1):\n if (value >= edges[i]) and (value < edges[i+1]):\n L_output[i]+=1\n return np.array(L_output)\n\n\ndef GetSpeciesDistributions(sim_output,species):\n \"\"\"\n Get distributions, means, standard deviations, and the (raw) moments\n\n Input:\n - *sim_output* (list)\n - *species* (list)\n\n Mean = mu = sum(x*P(x))\n Variance = sum(x^2 * p(x)) - mu**2\n\n Output:\n - *L_probability_mass*\n - *D_means*\n - *D_stds*\n - *D_moments*\n \"\"\"\n n_species = len(species)\n L_distributions = [{} for i in range(n_species)]\n starttime = sim_output[0][0]\n endtime = sim_output[-1][0]\n n_datapoints = len(sim_output)\n D_means = {}\n D_stds = {}\n D_moments = {}\n L_probability_mass = []\n if n_datapoints > 1:\n for t in range(n_datapoints-1):\n for i in range(n_species):\n try:\n L_distributions[i][int(sim_output[t][i+1])] += sim_output[t+1][0] - sim_output[t][0]\n except KeyError:\n L_distributions[i][int(sim_output[t][i+1])] = sim_output[t+1][0] - sim_output[t][0]\n for i,s_id in enumerate(species):\n x = np.array(sorted(L_distributions[i]),dtype=int)\n p_x = np.array([L_distributions[i][x_i] for x_i in x])/float(endtime-starttime) # probability = dt/T\n\n mu = (x*p_x).sum()\n mu_sq = (x**2*p_x).sum()\n var = mu_sq - mu**2\n std = var**0.5\n L_probability_mass.append([x,p_x])\n\n D_means[s_id] = mu\n D_stds[s_id] = std\n\n D_moments[s_id] = {}\n D_moments[s_id]['1'] = mu\n D_moments[s_id]['2'] = mu_sq\n D_moments[s_id]['3'] = (x**3*p_x).sum()\n D_moments[s_id]['4'] = (x**4*p_x).sum()\n\n return (L_probability_mass,D_means,D_stds,D_moments)\n\n\ndef GetDataDistributions(sim_output,identifiers):\n \"\"\"\n Get distributions, means, standard deviations, and the (raw) moments\n\n This function is different, because it does not assume integers, like GetSpeciesDistributions()\n\n Input:\n - *sim_output* (list)\n - *identifiers* (list)\n\n Mean = mu = sum(x*P(x))\n Variance = sum(x^2 * p(x)) - mu**2\n\n Output:\n - *L_probability_mass*\n - *D_means*\n - *D_stds*\n - *D_moments*\n \"\"\"\n n_identifiers = len(identifiers)\n L_distributions = [{} for i in range(n_identifiers)]\n starttime = sim_output[0][0]\n endtime = sim_output[-1][0]\n n_datapoints = len(sim_output)\n D_means = {}\n D_stds = {}\n D_moments = {}\n L_probability_mass = []\n if n_datapoints > 1:\n for t in range(n_datapoints-1):\n for i in range(n_identifiers):\n try:\n L_distributions[i][sim_output[t][i+1]] += sim_output[t+1][0] - sim_output[t][0]\n except KeyError:\n L_distributions[i][sim_output[t][i+1]] = sim_output[t+1][0] - sim_output[t][0]\n for i,id in enumerate(identifiers):\n x = np.array(sorted(L_distributions[i]))\n p_x = np.array([L_distributions[i][x_i] for x_i in x])/float(endtime-starttime) # probability = dt/T\n\n mu = (x*p_x).sum()\n mu_sq = (x**2*p_x).sum()\n var = mu_sq - mu**2\n std = var**0.5\n L_probability_mass.append([x,p_x])\n\n D_means[id] = mu\n D_stds[id] = std\n\n D_moments[id] = {}\n D_moments[id]['1'] = mu\n D_moments[id]['2'] = mu_sq\n D_moments[id]['3'] = (x**3*p_x).sum()\n D_moments[id]['4'] = (x**4*p_x).sum()\n\n return (L_probability_mass,D_means,D_stds,D_moments)\n\n\ndef LogBin(data,factor):\n \"\"\"\n Function that creates log bins\n\n Input:\n - *data* (list)\n - *factor* (float) determines the width of the bins\n Output:\n - *L_x* (list)\n - *L_y* (list)\n - *nbins* (integer)\n \"\"\"\n xmin = float(min(data))\n nbins = int(np.ceil(np.log(max(data)/xmin)/np.log(factor)))\n L_x = None\n L_y = None\n if nbins:\n L_edges = np.zeros(nbins)\n L_edges[0] = xmin\n for i in range(1,nbins): # 1,nbins\n L_edges[i] = L_edges[i-1]*factor\n\n L_x = L_edges[0:(nbins-1)]+np.diff(L_edges)/2\n L_dp = Count(data,L_edges)\n L_ry = np.array(L_dp[0:(nbins-1)])\n L_dedges = np.array(np.diff(L_edges))\n L_y = L_ry/(sum(L_ry)*L_dedges)\n return(L_x,L_y,nbins)\n\n\ndef ObtainWaitingtimes(data_stochsim,reactions):\n \"\"\"\n This function extracts the waiting times for each reaction of the model from the used SSA output.\n\n Input:\n - *data_stochsim* (python data object) that stores all simulation data\n - *reactions* (list)\n output:\n - *D_waiting_times* (dict)\n\n Note: It is impossible to use this function in combination with the Tau-leaping method, because the Tau-Leaping results are not exact!\n \"\"\"\n L_time = data_stochsim.time.flatten()\n L_fired_reactions = data_stochsim.fired_reactions # Reactions that fired at some time point\n D_waiting_times = {}\n D_last_time_fired = {}\n nreactions = len(reactions)\n for r_id in reactions:\n D_waiting_times[r_id] = [] # create a list that will contain event waiting times for reaction r\n\n for (current_time,r_index) in zip(L_time[1:],L_fired_reactions[1:]): # Updated Oktober 1st\n for i in range(1,nreactions+1): # fired reactions are (1,2,3, .... nreactions)\n if r_index == i:\n if r_index in D_last_time_fired:\n r_name = reactions[int(r_index-1)]\n D_waiting_times[r_name].append(current_time - D_last_time_fired[r_index]) # Add inter-arrival time\n D_last_time_fired[r_index] = current_time # Update last firing time\n else:\n D_last_time_fired[r_index] = current_time # Initial firing time\n\n elif r_index == -i: # Handle delayed completions 01-10-2014\n r_name_compl = reactions[ int(abs(r_index)-1) ] + '_Completion'\n if r_index in D_last_time_fired:\n D_waiting_times[r_name_compl].append(current_time - D_last_time_fired[r_index]) # Add inter-arrival time\n D_last_time_fired[r_index] = current_time # Update last firing time\n else:\n D_last_time_fired[r_index] = current_time # Initial firing time\n D_waiting_times.setdefault(r_name_compl, []) # Set keyname if not present\\\n #print current_time,D_last_time_fired\n return D_waiting_times\n\n\ndef GetAverageResults(regular_grid):\n \"\"\"\n Gets the averaged output of multiple trajectories\n\n Input:\n - *regular_grid* (nested list)\n \"\"\"\n means = []\n stds = []\n for data in regular_grid:\n means.append(np.mean(data,0))\n stds.append(np.std(data,0))\n return (np.array(means).transpose(),np.array(stds).transpose()) # test: 27 july 15\n\n\ndef RemoveBias(x, axis):\n \"Subtracts an estimate of the mean from signal x at axis\"\n padded_slice = [slice(d) for d in x.shape]\n padded_slice[axis] = np.newaxis\n mn = np.mean(x, axis=axis)\n return x - mn[tuple(padded_slice)]\n\n\ndef AutoCov(s, **kwargs):\n \"\"\"\n Returns the autocovariance of signal s at all lags.\n\n Notes:\n Adheres to the definition\n sxx[k] = E{S[n]S[n+k]} = cov{S[n],S[n+k]}\n where E{} is the expectation operator, and S is a zero mean process\n \"\"\"\n # only remove the mean once, if needed\n debias = kwargs.pop('debias', True)\n axis = kwargs.get('axis', -1)\n if debias:\n s = RemoveBias(s, axis)\n kwargs['debias'] = False\n return CrossCov(s, s, **kwargs)\n\n\ndef FFTconvolve(in1, in2, mode=\"full\", axis=None):\n \"\"\" Convolve two N-dimensional arrays using FFT. See convolve. \"\"\"\n s1 = np.array(in1.shape)\n s2 = np.array(in2.shape)\n complex_result = (np.issubdtype(in1.dtype, np.complex) or\n np.issubdtype(in2.dtype, np.complex))\n if axis is None:\n size = s1+s2-1\n fslice = tuple([slice(0, int(sz)) for sz in size])\n else:\n equal_shapes = s1==s2\n # allow equal_shapes[axis] to be False\n equal_shapes[axis] = True\n assert equal_shapes.all(), 'Shape mismatch on non-convolving axes'\n size = s1[axis]+s2[axis]-1\n fslice = [slice(l) for l in s1]\n fslice[axis] = slice(0, int(size))\n fslice = tuple(fslice)\n\n # Always use 2**n-sized FFT\n fsize = int(2**np.ceil(np.log2(size)))\n if axis is None:\n IN1 = np.fft.fftpack.fftn(in1,fsize)\n IN1 *= np.fft.fftpack.fftn(in2,fsize)\n ret = np.fft.fftpack.ifftn(IN1)[fslice].copy()\n else:\n IN1 = np.fft.fftpack.fft(in1,fsize,axis=axis)\n IN1 *= np.fft.fftpack.fft(in2,fsize,axis=axis)\n ret = np.fft.fftpack.ifft(IN1,axis=axis)[fslice].copy()\n del IN1\n if not complex_result:\n ret = ret.real\n if mode == \"full\":\n return ret\n elif mode == \"same\":\n if np.product(s1,axis=0) > np.product(s2,axis=0):\n osize = s1\n else:\n osize = s2\n return _centered(ret,osize)\n elif mode == \"valid\":\n return _centered(ret,abs(s2-s1)+1)\n\n\ndef CrossCov(x, y, axis=-1, all_lags=False, debias=True):\n \"\"\"\n Returns the crosscovariance sequence between two ndarrays.\n This is performed by calling fftconvolve on x, y[::-1]\n\n Input:\n - *x*: ndarray\n - *y*: ndarray\n - *axis*: time axis\n - *all_lags*: {True/False}\n whether to return all nonzero lags, or to clip the length of s_xy\n to be the length of x and y. If False, then the zero lag covariance\n is at index 0. Otherwise, it is found at (len(x) + len(y) - 1)/2\n - *debias*: {True/False}\n Always removes an estimate of the mean along the axis, unless\n told not to.\n\n Notes:\n cross covariance is defined as\n sxy[k] := E{X[t]*Y[t+k]}, where X,Y are zero mean random processes\n \"\"\"\n if x.shape[axis] != y.shape[axis]:\n raise ValueError('CrossCov() only works on same-length sequences for now')\n if debias:\n x = RemoveBias(x, axis)\n y = RemoveBias(y, axis)\n slicing = [slice(d) for d in x.shape]\n slicing[axis] = slice(None,None,-1)\n sxy = FFTconvolve(x, y[tuple(slicing)], axis=axis, mode='full')\n N = x.shape[axis]\n sxy /= N\n if all_lags:\n return sxy\n slicing[axis] = slice(N-1,2*N-1)\n return sxy[tuple(slicing)]\n\n\ndef Autocorrelation(s, **kwargs):\n \"\"\"\n Returns the autocorrelation of signal s at all lags.\n\n Notes:\n Adheres to the definition\n rxx[k] = E{S[n]S[n+k]}/E{S*S} = cov{S[n],S[n+k]}/sigma**2\n where E{} is the expectation operator, and S is a zero mean process\n \"\"\"\n # only remove the mean once, if needed\n debias = kwargs.pop('debias', True)\n axis = kwargs.get('axis', -1)\n if debias:\n s = RemoveBias(s, axis)\n kwargs['debias'] = False\n sxx = AutoCov(s, **kwargs)\n all_lags = kwargs.get('all_lags', False)\n if all_lags:\n i = (2*s.shape[axis]-1)/2\n sxx_0 = sxx[i]\n else:\n sxx_0 = sxx[0]\n if not sxx_0:\n sxx = [np.nan for i in range(len(sxx))] # Modification\n else:\n sxx /= sxx_0\n return sxx\n\n\nclass DoPlotting():\n \"\"\"\n This class initiates the plotting options.\n\n Input:\n - *species_labels* (list) [S1,S2, ..., Sn]\n - *rate_labels* (list) [R1, R2, ..., Rm]\n \"\"\"\n def __init__(self,species_labels,rate_labels,plotnum=1,quiet = False):\n self.species_labels = species_labels\n self.rate_labels = rate_labels\n self.number_of_rates = len(rate_labels)\n self.plotnum = plotnum\n # https://github.com/matplotlib/matplotlib/blob/master/lib/matplotlib/colors.py\n self.colors = ['#0000FF','#00CC00','#FF0033','#FF00CC','#6600FF','#FFFF00','#000000','#CCCCCC','#00CCFF','#99CC33','#FF6666', '#FF99CC','#CC6600','#003300','#CCFFFF','#9900FF','#CC6633','#FFD700','#C0C0C0']\n self.quiet = quiet\n\n\n def ResetPlotnum(self):\n \"\"\" Reset figure numbers if trajectories > 1 \"\"\"\n self.plotnum = 1\n\n\n def TimeSeries(self,data,npoints,datatype,labels,trajectory_index,linestyle,linewidth,marker,colors,title,xlabel,ylabel,is_legend,legend_location):\n \"\"\"\n Tracks the propensities and/or species over time.\n\n Input:\n - *data* (array)\n - *npoints* (integer)\n - *datatype* (list)\n - *labels* (list)\n - *trajectory_index* (integer)\n - *linestyle* (string)\n - *linewidth* (float)\n - *title* (string)\n - *xlabel* (string)\n - *ylabel* (string)\n - *is_legend* (boolean)\n \"\"\"\n plt.figure(self.plotnum)\n datatype_indices = [labels.index(Id) for Id in datatype]\n\n data = getDataForTimeSimPlot(data,npoints,self.quiet)\n\n Arr_time = data[:,0]\n if len(datatype) == 1:\n j = trajectory_index\n else:\n j=0\n\n for i in datatype_indices:\n y = data[:,i+1]\n if colors == None:\n if j >= len(self.colors):\n j=0\n elif isinstance(colors,list):\n if j >= len(colors):\n j=0\n elif isinstance(colors,str):\n colors = [colors]\n j=0\n\n if colors == None:\n if marker == '' and linestyle == 'solid':\n plt.plot(Arr_time,y, ls = linestyle,lw = linewidth,color = self.colors[j])\n else:\n plt.plot(Arr_time,y,marker,ls = linestyle,lw = linewidth,color = self.colors[j])\n else:\n if clr.is_color_like(colors[j]):\n plt.plot(Arr_time,y,marker,ls = linestyle,lw = linewidth,color = colors[j])\n else:\n print(\"*** WARNING ***: '{0}' is not recognized as a valid color code\".format(colors[j]) )\n plt.plot(Arr_time,y,marker,ls = linestyle,lw = linewidth,color = self.colors[j])\n colors = None\n j+=1\n if is_legend:\n plt.legend(datatype,numpoints=1,frameon=True,loc=legend_location)\n plt.title(title)\n plt.xlabel(xlabel)\n plt.ylabel(ylabel)\n\n\n def Autocorrelations(self,lags,data,datatype,labels,trajectory_index,linestyle,linewidth,marker,colors,title,xlabel,ylabel,is_legend,legend_location):\n \"\"\"\n Input:\n - *lags*\n - *data* (array)\n - *datatype* (list)\n - *labels* (list)\n - *trajectory_index* (integer)\n - *linestyle* (string)\n - *linewidth* (float)\n - *marker* string)\n - *colors* (list)\n - *title* (string)\n - *xlabel* (string)\n - *ylabel* (string)\n - *is_legend* (boolean)\n \"\"\"\n plt.figure(self.plotnum)\n datatype_indices = [labels.index(Id) for Id in datatype]\n if len(datatype) == 1:\n j = trajectory_index\n else:\n j=0\n\n for i in datatype_indices:\n if colors == None:\n if j >= len(self.colors):\n j=0\n elif isinstance(colors,list):\n if j >= len(colors):\n j=0\n elif isinstance(colors,str):\n colors = [colors]\n j=0\n\n y = data[i][0:len(lags)]\n if colors == None:\n plt.plot(lags,y,marker,ls = linestyle,lw = linewidth, color = self.colors[j])\n else:\n if clr.is_color_like(colors[j]):\n plt.plot(lags,y,marker,ls = linestyle,lw = linewidth, color = colors[j])\n else:\n print(\"*** WARNING ***: '{0}' is not recognized as a valid color code\".format(colors[j]) )\n plt.plot(lags,y,marker,ls = linestyle,lw = linewidth, color = self.colors[j])\n colors = None\n j+=1\n if is_legend:\n plt.legend(datatype,numpoints=1,frameon=True,loc=legend_location)\n plt.title(title)\n plt.xlabel(xlabel)\n plt.ylabel(ylabel)\n\n\n def Distributions(self,distributions,datatype,labels,trajectory_index,linestyle,linewidth,colors,title,xlabel,ylabel,is_legend=True,legend_location='upper right',bin_size=1,histtype = 'step',orientation='vertical',multiplotting=False):\n \"\"\"\n Plots the distributions of species and/or propensities\n\n density=False because the total probability is determined by summation not by integration.\n\n Input:\n - *distributions* (nested list)\n - *datatype* (list)\n - *labels* (list)\n - *trajectory_index* (integer)\n - *linestyle* (string)\n - *linewidth* (float)\n - *colors* (list)\n - *title* (string)\n - *xlabel* (string)\n - *ylabel* (string)\n - *is_legend* (boolean)\n - *legend_location* [default = 'upper right'] (string/integer)\n - *bin_size* (string) [default = 1]\n - *histtype* (string)) [default = 'step']\n - *orientation* (string) [default = 'vertical']\n - *multiplotting* (boolean) [default = False]\n \"\"\"\n plt.figure(self.plotnum)\n datatype_indices = [labels.index(Id) for Id in datatype]\n if len(datatype) == 1:\n j = trajectory_index\n else:\n j=0\n\n for i in datatype_indices:\n dat_min = distributions[i][0].min()\n dat_max = distributions[i][0].max()\n n_bins = 1 + (dat_max-dat_min) / bin_size # Just take one trajectory as reference\n L_bin_edges = np.linspace(dat_min-bin_size/2.0,dat_max+bin_size/2.0,int(n_bins+1))\n\n if colors == None:\n if j >= len(self.colors):\n j=0\n elif isinstance(colors,list):\n if j >= len(colors):\n j=0\n elif isinstance(colors,str):\n colors = [colors]\n j=0\n\n if colors == None:\n print('#'*20)\n output = plt.hist(distributions[i][0], L_bin_edges, weights = distributions[i][1], ls = linestyle, lw = linewidth, color = self.colors[j], histtype = histtype, orientation=orientation, )\n print('just ran this line')\n output = plt.hist(distributions[i][0], L_bin_edges, weights = distributions[i][1], ls = linestyle, lw = linewidth, color = self.colors[j], histtype = histtype, orientation=orientation, density=False)\n print('just ran this line')\n print('#'*20)\n\n else:\n if clr.is_color_like(colors[j]):\n output = plt.hist(distributions[i][0],L_bin_edges,weights = distributions[i][1],ls = linestyle,lw = linewidth,color = colors[j],histtype = histtype,orientation=orientation,density=False)\n else:\n print(\"*** WARNING ***: '{0}' is not recognized as a valid color code\".format(colors[j]) )\n output = plt.hist(distributions[i][0],L_bin_edges,weights = distributions[i][1],ls = linestyle,lw = linewidth,color = self.colors[j],histtype = histtype,orientation=orientation,density=False)\n colors = None\n j+=1\n if is_legend:\n plt.legend(datatype,numpoints=1,frameon=True,loc=legend_location)\n plt.title(title)\n if orientation.lower() == 'horizontal':\n plt.xlabel(ylabel)\n plt.ylabel(xlabel)\n if multiplotting:\n plt.xticks([0,max(output[0])*1.1])\n else:\n plt.xlabel(xlabel)\n plt.ylabel(ylabel)\n if multiplotting:\n plt.yticks([0,max(output[0])*1.1])\n\n\n def WaitingtimesDistributions(self,waiting_times,rates,trajectory_index,linestyle,linewidth, marker,colors,title,xlabel,ylabel,is_legend,legend_location):\n \"\"\"\n Plots the waiting times for each reaction in the model.\n Makes use of ObtainWaitingtimes to derive the waiting times out of the SSA output.\n\n Input:\n - *waiting_times* (dict)\n - *rates* (list)\n - *trajectory_index* (integer)\n - *linestyle* (string)\n - *linewith* (float)\n - *marker* (string)\n - *colors* (list)\n - *title* (string)\n - *xlabel* (string)\n - *ylabel* (string)\n - *is_legend* (boolean)\n - *legend_location* [default = 'upper right'] (string/integer)\n \"\"\"\n plt.figure(self.plotnum)\n if len(rates) == 1:\n j = trajectory_index\n else:\n j=0\n\n L_legend_names = []\n for r_id in rates:\n L_waiting_times = waiting_times[r_id] # get list of waiting times for a given reaction\n if len(L_waiting_times) > 1:\t\t\t # At least 2 waiting times are necessary per reaction\n (x,y,nbins) = LogBin(L_waiting_times,1.5) # Create logarithmic bins (HARDCODED 1.5)\n\n if x is not None:\n if colors == None:\n if j >= len(self.colors):\n j=0\n elif isinstance(colors,list):\n if j >= len(colors):\n j=0\n elif isinstance(colors,str):\n colors = [colors]\n j=0\n if colors == None:\n plt.loglog(x,y,marker,ls = linestyle,lw=linewidth,color = self.colors[j])\n else:\n if clr.is_color_like(colors[j]):\n plt.loglog(x,y,marker,ls = linestyle,lw=linewidth,color = colors[j])\n else:\n print(\"*** WARNING ***: '{0}' is not recognized as a valid color code\".format(colors[j]) )\n plt.loglog(x,y,marker,ls = linestyle,lw=linewidth,color = self.colors[j])\n colors = None\n L_legend_names.append(r_id)\n j+=1\n plt.title(title)\n plt.xlabel(xlabel)\n plt.ylabel(ylabel)\n if is_legend:\n plt.legend(L_legend_names,numpoints=1,frameon=True,loc=legend_location)\n\n\n def AverageTimeSeries(self,means,stds,time,nstd,datatype,labels,linestyle,linewidth,marker,ms,colors,title,xlabel,ylabel,is_legend,legend_location):\n \"\"\"\n Plots the average and standard deviation of datatype on a regular grid.\n\n Input:\n - *means* (array)\n - *stds* (array)\n - *time* (array)\n - *nstd* (float)\n - *datatype* (list)\n - *labels* (list)\n - *linestyle* (string)\n - *linewidth* (float)\n - *marker* (string)\n - *ms* (float)\n - *colors* (list)\n - *title* (string)\n - *xlabel* (string)\n - *ylabel* (string)\n - *is_legend* (boolean)\n - *legend_location* [default = 'upper right'] (string/integer)\n \"\"\"\n assert nstd > 0, \"Error: The number of STDs must be a value larger than zero\"\n plt.figure(self.plotnum)\n datatype_indices = [labels.index(Id) for Id in datatype]\n j=0\n for i in datatype_indices:\n if colors == None:\n if j >= len(self.colors):\n j=0\n elif isinstance(colors,list):\n if j >= len(colors):\n j=0\n elif isinstance(colors,str):\n colors = [colors]\n j=0\n\n # plot with y-axis error bars\n if colors == None:\n plt.errorbar(time,means[:,i],yerr = nstd*np.array(stds[:,i]),color = self.colors[j],ls = linestyle,lw=linewidth,marker = marker,ms=ms,label = labels[i])\n else:\n if clr.is_color_like(colors[j]):\n plt.errorbar(time,means[:,i],yerr = nstd*np.array(stds[:,i]),color = colors[j],ls = linestyle,lw=linewidth,marker = marker,ms=ms,label = labels[i])\n else:\n print(\"*** WARNING ***: '{0}' is not recognized as a valid color code\".format(colors[j]) )\n plt.errorbar(time,means[:,i],yerr = nstd*np.array(stds[:,i]),color = self.colors[j],ls = linestyle,lw=linewidth,marker = marker,ms=ms,label = labels[i])\n colors = None\n j+=1\n if is_legend:\n plt.legend(numpoints=1,frameon=True,loc=legend_location)\n plt.title(title)\n plt.xlabel(xlabel)\n plt.ylabel(ylabel)\n\n\n def AverageDistributions(self,means,stds,nstd,datatype,labels,linestyle,linewidth,marker,colors,title,xlabel,ylabel,is_legend,legend_location):\n \"\"\"\n Plots the average and standard deviation.\n\n Input:\n - *means* (nested list)\n - *stds* (nested list)\n - *nstd* (float)\n - *labels* (list)\n - *linestyle* (string)\n - *linewidth* (float)\n - *marker* (string)\n - *colors* (list)\n - *title* (string)\n - *xlabel* (string)\n - *ylabel* (string)\n - *is_legend* (boolean)\n - *legend_location* [default = 'upper right'] (string/integer)\n \"\"\"\n assert nstd > 0, \"Error: The number of STDs must be a value larger than zero\"\n plt.figure(self.plotnum)\n datatype_indices = [labels.index(Id) for Id in datatype]\n j=0\n for i in datatype_indices:\n if colors == None:\n if j >= len(self.colors):\n j=0\n elif isinstance(colors,list):\n if j >= len(colors):\n j=0\n elif isinstance(colors,str):\n colors = [colors]\n j=0\n if colors == None:\n plt.errorbar(means[i][0],means[i][1],yerr = nstd * np.array(stds[i][1]),color = self.colors[j],ls = linestyle,lw = linewidth,marker = marker,label = labels[i]) # plot with y-axis error bars\n else:\n if clr.is_color_like(colors[j]):\n plt.errorbar(means[i][0],means[i][1],yerr = nstd*np.array(stds[i][1]),color = colors[j],ls = linestyle,lw = linewidth,marker = marker,label = labels[i])\n else:\n print(\"*** WARNING ***: '{0}' is not recognized as a valid color code\".format(colors[j]) )\n plt.errorbar(means[i][0],means[i][1],yerr = nstd * np.array(stds[i][1]),color = self.colors[j],ls = linestyle,lw = linewidth,marker = marker,label = labels[i])\n colors = None\n j+=1\n if is_legend:\n plt.legend(numpoints=1,frameon=True,loc=legend_location)\n plt.title(title)\n plt.xlabel(xlabel)\n plt.ylabel(ylabel)\n\n\n def AverageDistributionsCI(self,means,stds,nstd,datatype,labels,colors,title,xlabel,ylabel,is_legend,legend_location):\n \"\"\"\n Plots the average and standard deviation.\n\n Input:\n - *means* (nested list)\n - *stds* (nested list)\n - *nstd* (float)\n - *labels* (list)\n - *linestyle* (string)\n - *linewidth* (float)\n - *marker* (string)\n - *colors* (list)\n - *title* (string)\n - *xlabel* (string)\n - *ylabel* (string)\n - *is_legend* (boolean)\n - *legend_location* [default = 'upper right'] (string/integer)\n \"\"\"\n assert nstd > 0, \"Error: The number of STDs must be a value larger than zero\"\n plt.figure(self.plotnum)\n datatype_indices = [labels.index(Id) for Id in datatype]\n for i in datatype_indices:\n L_s_amount = copy.copy(means[i][0])\n L_mu = copy.copy(means[i][1])\n L_sigma = copy.copy(stds[i][1])\n\n # Add an additional value\n L_s_amount.append(L_s_amount[-1]+1)\n L_mu.append(L_mu[-1])\n L_sigma.append(L_sigma[-1])\n\n X_i = []\n Y_i = []\n L_errors = []\n for j in range(len(L_s_amount)):\n if (not L_s_amount[j] == L_s_amount[0]) and (not L_s_amount[j] == L_s_amount[-1]):\n X_i.append(L_s_amount[j])\n Y_i.append(L_mu[j-1])\n L_errors.append(L_sigma[j-1])\n X_i.append(L_s_amount[j])\n Y_i.append(L_mu[j])\n L_errors.append(L_sigma[j])\n X_e = np.concatenate([X_i, X_i[::-1]])\n Y_e = np.concatenate([np.array(Y_i) - nstd*np.array(L_errors) ,(np.array(Y_i) + nstd*np.array(L_errors))[::-1]])\n\n if colors == None:\n if j >= len(self.colors):\n j=0\n elif isinstance(colors,list):\n if j >= len(colors):\n j=0\n elif isinstance(colors,str):\n colors = [colors]\n j=0\n if colors == None:\n plt.fill(X_e-0.5,Y_e, alpha=.25, ec='None', label='{0} STD confidence interval'.format(nstd),color = self.colors[j])\n plt.plot(np.array(X_i)-0.5,np.array(Y_i),color = self.colors[j])\n else:\n if clr.is_color_like(colors[j]):\n plt.fill(X_e-0.5,Y_e, alpha=.25, ec='None', label='{0} STD confidence interval'.format(nstd),color = colors[j])\n plt.plot(np.array(X_i)-0.5,np.array(Y_i),color = colors[j])\n else:\n print(\"*** WARNING ***: '{0}' is not recognized as a valid color code\".format(colors[j]) )\n plt.fill(X_e-0.5,Y_e, alpha=.25, ec='None', label='{0} STD confidence interval'.format(nstd),color = self.colors[j])\n plt.plot(np.array(X_i)-0.5,np.array(Y_i),color = self.colors[j])\n colors = None\n if is_legend:\n plt.legend(numpoints=1,frameon=True,loc=legend_location)\n plt.xlabel(xlabel)\n plt.ylabel(ylabel)\n plt.title(title)\n"
] |
[
[
"numpy.fft.fftpack.fftn",
"numpy.log",
"numpy.log2",
"numpy.product",
"matplotlib.colors.is_color_like",
"numpy.issubdtype",
"numpy.fft.fftpack.fft",
"numpy.fft.fftpack.ifft",
"numpy.concatenate",
"numpy.std",
"numpy.mean",
"numpy.diff",
"numpy.fft.fftpack.ifftn",
"numpy.array",
"numpy.zeros"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
mahdiqezlou/vw_spectra
|
[
"975b125a03b3f8505e01db7fd4b4c3c609271499"
] |
[
"vw_plotspectra.py"
] |
[
"# -*- coding: utf-8 -*-\n\"\"\"Contains the plotting-specific functions specific to the velocity width analysis.\"\"\"\n\nfrom __future__ import print_function\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom fake_spectra import plot_spectra as ps\nfrom fake_spectra import haloassigned_spectra as hs\nimport kstest as ks\nimport vw_spectra as vw\ntry:\n xrange(1)\nexcept NameError:\n xrange = range\n\ndef _bootstrap_sample(vel_data, v_table, samples, error):\n \"\"\"Generate a Monte Carlo error sample of the differential distribution.\"\"\"\n # Generate some Monte Carlo samples where each element is perturbed by\n # a Gaussian, sigma given by error.\n index = np.random.random_integers(0, np.size(vel_data)-1, samples)\n bootstrap = vel_data[index]\n if error > 0.:\n bootstrap += np.random.normal(0,error,size=samples)\n nn = np.histogram(bootstrap,v_table)[0]\n return nn\n\nclass VWPlotSpectra(hs.HaloAssignedSpectra, ps.PlottingSpectra, vw.VWSpectra):\n \"\"\"Extends PlottingSpectra with velocity width specific code.\"\"\"\n def plot_vel_width(self, elem, ion, dv=0.17, color=\"red\", ls=\"-\"):\n \"\"\"Plot the velocity widths of this snapshot\n Parameters:\n elem - element to use\n ion - ionisation state: 1 is neutral.\n dv - bin spacing\n \"\"\"\n (vbin, vels) = self.vel_width_hist(elem, ion, dv)\n plt.semilogx(vbin, vels, color=color, lw=3, ls=ls,label=self.label)\n\n def plot_cum_vel_width(self, elem, ion, norm, dv=0.1, color=\"red\", ls=\"-\"):\n \"\"\"Plot the velocity widths of this snapshot\n Parameters:\n elem - element to use\n ion - ionisation state: 1 is neutral.\n dv - bin spacing\n \"\"\"\n (vbin, vels) = self.vel_width_hist(elem, ion, dv)\n cvels = np.cumsum(vels)\n cvels = cvels*norm/cvels[-1]\n plt.semilogx(vbin, cvels, color=color, lw=3, ls=ls,label=self.label)\n\n def plot_cum_f_peak(self, elem, ion, norm, dv=0.01, color=\"red\", ls=\"-\"):\n \"\"\"Plot the velocity widths of this snapshot\n Parameters:\n elem - element to use\n ion - ionisation state: 1 is neutral.\n dv - bin spacing\n \"\"\"\n (vbin, vels) = self.f_peak_hist(elem, ion, dv)\n cvels = np.cumsum(vels)\n cvels = cvels*norm/cvels[-1]\n plt.plot(vbin, cvels, color=color, lw=3, ls=ls,label=self.label)\n plt.xlabel(r\"$f_\\mathrm{edg}$\")\n\n def plot_f_meanmedian_errors(self, elem, ion, samples, cumulative=False, nv_table = 11, color=\"red\"):\n \"\"\"Plot 68% contour for error on the fmm distribution\"\"\"\n f_peak = self.vel_mean_median(elem, ion)\n ind = self.get_filt(elem, ion)\n f_peak = f_peak[ind]\n v_table=np.linspace(0,1,nv_table)\n self._plot_errors(f_peak, v_table, samples, 0., cumulative, False, color)\n\n def plot_f_peak_errors(self, elem, ion, samples, cumulative=False, nv_table=11, color=\"red\"):\n \"\"\"Plot 68% contour for error on the fpeak distribution\"\"\"\n f_peak = self.vel_peak(elem, ion)\n ind = self.get_filt(elem, ion)\n f_peak = f_peak[ind]\n v_table=np.linspace(0,1,nv_table)\n self._plot_errors(f_peak, v_table, samples, 0., cumulative, False, color)\n\n def plot_eq_width_errors(self, elem, ion, line, samples, cumulative=False, min_width = -1.6, nv_table=11, color=\"red\"):\n \"\"\"Plot 68% contour for error on the fpeak distribution\"\"\"\n eq_width = self.equivalent_width(elem, ion, line)\n ind = self.get_filt(elem, ion)\n eq_width = eq_width[ind]\n v_table = np.logspace(min_width, np.log10(np.max(eq_width)), nv_table)\n self._plot_errors(np.log10(eq_width), np.log10(v_table), samples, 0.05, cumulative, False, color)\n\n def plot_vw_errors(self, elem, ion, samples, cumulative=False, nv_table=11, color=\"red\"):\n \"\"\"Plot 68% contour for error on the velocity width distribution\"\"\"\n vel_width = self.vel_width(elem, ion)\n ind = self.get_filt(elem, ion)\n vel_width = vel_width[ind]\n v_table=np.logspace(1,np.log10(np.max(vel_width)+10),nv_table)\n self._plot_errors(vel_width, v_table, samples, 5, cumulative, True, color)\n\n def _plot_errors(self, vel_data, v_table, samples, error, cumulative=False, lognorm=True, color=\"red\"):\n \"\"\"Find and plot a 68% contour for a subsample of size samples, by Monte Carlo.\"\"\"\n vbin = np.array([(v_table[i]+v_table[i+1])/2. for i in range(0,np.size(v_table)-1)])\n #Get a subsample\n cdfs = np.array([_bootstrap_sample(vel_data, v_table, samples, error) for _ in xrange(10000)])\n if cumulative:\n cdfs = np.cumsum(cdfs, axis=1)\n norm = 1\n else:\n if lognorm:\n v_table = np.log10(v_table)\n norm = samples * np.array([(-v_table[i]+v_table[i+1]) for i in xrange(np.size(v_table)-1)])\n\n lower = np.percentile(cdfs, 16, axis=0)/norm\n upper = np.percentile(cdfs, 84, axis=0)/norm\n plt.fill_between(vbin, lower, upper, color=color, alpha=0.3)\n\n def plot_f_meanmedian(self, elem, ion, dv=0.06, color=\"red\", ls=\"-\"):\n \"\"\"\n Plot an f_mean_median histogram\n For args see plot_vel_width\n \"\"\"\n (vbin, vels) = self.f_meanmedian_hist(elem, ion, dv)\n plt.plot(vbin, vels, color=color, lw=3, ls=ls,label=self.label)\n plt.xlabel(r\"$f_\\mathrm{mm}$\")\n\n def plot_f_peak(self, elem, ion, dv=0.06, color=\"red\", ls=\"-\"):\n \"\"\"\n Plot an f_peak histogram\n For args see plot_vel_width\n \"\"\"\n (vbin, vels) = self.f_peak_hist(elem, ion, dv)\n plt.plot(vbin, vels, color=color, lw=3, ls=ls,label=self.label)\n plt.xlabel(r\"$f_\\mathrm{edg}$\")\n\n def plot_sep_frac(self,elem = \"Si\", ion = 2, thresh = 1e-1, mindist = 15, dv = 0.2, color=\"blue\", ls=\"-\"):\n \"\"\"\n Plots the fraction of spectra in each velocity width bin which are separated.\n Threshold is as a percentage of the maximum value.\n mindist is in km/s\n \"\"\"\n sep = self.get_separated(elem, ion, thresh,mindist)\n vels = self.vel_width(elem, ion)\n ind = self.get_filt(elem, ion)\n v_table = 10**np.arange(1, 3, dv)\n vbin = np.array([(v_table[i]+v_table[i+1])/2. for i in range(0,np.size(v_table)-1)])\n hist1 = np.histogram(vels[ind], v_table)\n hist2 = np.histogram(vels[ind][sep],v_table)\n hist1[0][np.where(hist1[0] == 0)] = 1\n plt.semilogx(vbin, hist2[0]/(1.*hist1[0]), color=color, ls=ls, label=self.label)\n\n def plot_vel_width_breakdown(self, elem = \"Si\", ion = 2, dv = 0.1):\n \"\"\"\n Plots the fraction of the total velocity width histogram in a series of virial velocity bins\n \"\"\"\n #Find velocity width\n vels = self.vel_width(elem, ion)\n ii = self.get_filt(elem, ion)\n self._plot_breakdown(vels,ii,(0, 60, 120), (60, 120, 900), (\"< 60\", \"60-120\", \"> 120\"),dv)\n plt.xlabel(r\"$v_\\mathrm{90}$ (km s$^{-1}$)\")\n plt.ylim(0,1)\n\n\n def plot_f_peak_breakdown(self, elem = \"Si\", ion = 2, dv = 0.05):\n \"\"\"\n Plots the fraction of the total fedge histogram in a series of virial velocity bins\n \"\"\"\n #Find velocity width\n vels = self.vel_peak(elem, ion)\n ii = self.get_filt(elem, ion)\n self._plot_breakdown(vels,ii,(0, 50), (50, 900), (\"< 50\", \"> 50\"),dv, False)\n plt.xlabel(r\"$f_\\mathrm{edg}$\")\n plt.ylim(0,1)\n plt.xlim(0,1)\n plt.legend(loc=1,ncol=2)\n\n def plot_mult_halo_frac(self,elem = \"Si\", ion = 2, dv = 0.2, color=\"blue\", ls=\"-\"):\n \"\"\"\n Plots the fraction of spectra in each velocity width bin which are separated.\n Threshold is as a percentage of the maximum value.\n mindist is in km/s\n \"\"\"\n #Find velocity width\n (halos, subhalos) = self.find_nearby_halos()\n vels = self.vel_width(elem, ion)\n ii = self.get_filt(elem, ion)\n #Find virial velocity\n (halo, _) = self.find_nearest_halo()\n ind = np.where(halo[ii] > 0)\n# virial = np.ones_like(halo, dtype=np.double)\n# virial[ind] = self.virial_vel(halo[ind])\n vwvir = vels[ii][ind] #/virial[ind]\n #Make bins\n v_table = 10**np.arange(np.min(np.log10(vwvir)),np.max(np.log10(vwvir)) , dv)\n vbin = np.array([(v_table[i]+v_table[i+1])/2. for i in range(0,np.size(v_table)-1)])\n #Histogram of vel width / virial vel\n hist1 = np.histogram(vwvir, v_table)\n hist1[0][np.where(hist1[0] == 0)] = 1\n #Find places with multiple halos\n subhalo_parent = [list(self.sub_sub_index[ss]) for ss in subhalos]\n allh = np.array([list(set(subhalo_parent[ii] + halos[ii])) for ii in xrange(self.NumLos)])\n indmult = np.where([len(aa) > 1 for aa in allh[ind]])\n histmult = np.histogram(vwvir[indmult],v_table)\n plt.semilogx(vbin, histmult[0]/(1.*hist1[0]), color=color, ls=ls, label=self.label)\n\n def plot_Z_vs_vel_width(self,elem=\"Si\", ion=2, color=\"blue\",color2=\"darkblue\"):\n \"\"\"Plot the correlation between metallicity and velocity width\"\"\"\n vel = self.vel_width(elem, ion)\n met = self.get_metallicity()\n #Ignore objects too faint to be seen\n ind2 = np.where(met > 1e-4)\n met = met[ind2]\n vel = vel[ind2]\n self._plot_2d_contour(vel, met, 10, \"Z vel sim\", color, color2, fit=True)\n plt.plot(vel, met, 'o', color=color)\n plt.xlim(10,2e3)\n plt.ylabel(r\"$\\mathrm{Z} / \\mathrm{Z}_\\odot$\")\n plt.xlabel(r\"$v_\\mathrm{90}$ (km s$^{-1}$)\")\n\n def plot_vel_vs_mass(self,elem, ion, color=\"blue\",color2=\"darkblue\"):\n \"\"\"Plot the correlation between mass and metallicity, with a fit\"\"\"\n vel = self.vel_width(elem, ion)\n self._plot_xx_vs_mass(vel, \"vel\",color,color2)\n\n def kstest(self, Zdata, veldata, elem=\"Si\", ion=2):\n \"\"\"Find the 2D KS test value of the vel width and log metallicity\n with respect to an external dataset, veldata and Z data\"\"\"\n met = self.get_metallicity()\n ind = self.get_filt(elem, ion)\n met = np.log10(met[ind])\n vel = np.log10(self.vel_width(elem, ion)[ind])\n data2 = np.array([met,vel]).T\n data = np.array([np.log10(Zdata), np.log10(veldata)]).T\n return ks.ks_2d_2samp(data,data2)\n\n def plot_virial_vel_vs_vel_width(self,elem, ion,color=\"red\", ls=\"-\", label=\"\", dm=0.1):\n \"\"\"Plot a histogram of the velocity widths vs the halo virial velocity\"\"\"\n (halos, _) = self.find_nearest_halo()\n ind = self.get_filt(elem,ion)\n f_ind = np.where(halos[ind] != -1)\n vel = self.vel_width(elem, ion)[ind][f_ind]\n virial = self.virial_vel(halos[ind][f_ind])+0.1\n vvvir = vel/virial\n m_table = 10**np.arange(np.log10(np.min(vvvir)), np.log10(np.max(vvvir)), dm)\n mbin = np.array([(m_table[i]+m_table[i+1])/2. for i in range(0,np.size(m_table)-1)])\n pdf = np.histogram(np.log10(vvvir),np.log10(m_table), density=True)[0]\n print(\"median v/vir: \",np.median(vvvir))\n plt.semilogx(mbin, pdf, color=color, ls=ls, label=label)\n return (mbin, pdf)\n\n def plot_vbars(self, tau):\n \"\"\"Plot the vertical bars marking the velocity widths\"\"\"\n (low, high) = self._vel_width_bound(tau)\n xaxis = np.arange(0,np.size(tau))*self.dvbin - (high+low)/2\n if high - low > 0:\n plt.plot([xaxis[0]+low,xaxis[0]+low],[-1,20], color=\"green\")\n plt.plot([xaxis[0]+high,xaxis[0]+high],[-1,20],color=\"red\")\n if high - low > 30:\n tpos = xaxis[0]+low+5\n else:\n tpos = xaxis[0]+high+5\n if high - low > 60:\n tpos = xaxis[0]+low+25\n if high - low > 150:\n tpos = xaxis[0]+low+35\n ypos = np.max(tau) -0.2\n if np.max(tau) < 0.8:\n ypos = 0.7\n elif np.max(tau) > 4.:\n ypos = 3.5\n plt.text(tpos,ypos,r\"$\\Delta v_{90} = \"+str(np.round(high-low,1))+r\"$\", size=14)\n xlims = (np.max((xaxis[0],xaxis[0]+low-20)),np.min((xaxis[-1],xaxis[0]+high+20)))\n return (xaxis, xlims)\n\n def plot_spectrum(self, elem, ion, line, spec_num, flux=True, xlims=None, color=\"blue\", ls=\"-\", offset=None):\n \"\"\"Plot an spectrum, centered on the maximum tau,\n and marking the 90% velocity width.\n offset: offset in km/s for the x-axis labels\"\"\"\n if line == -1:\n tau_no = self.get_observer_tau(elem, ion, spec_num, noise=False)\n tau = self.get_observer_tau(elem, ion, spec_num, noise=True)\n else:\n tau_no = self.get_tau(elem, ion, line, spec_num, noise=False)\n tau = self.get_tau(elem, ion, line, spec_num, noise=True)\n (low, high, offset_def) = self.find_absorber_width(elem, ion)\n if offset is None:\n offset = offset_def\n tau_l = np.roll(tau_no, offset[spec_num])[low[spec_num]:high[spec_num]]\n (xaxis, xlims_def) = self.plot_vbars(tau_l)\n if xlims is None:\n xlims = xlims_def\n tau_l = np.roll(tau, offset[spec_num])[low[spec_num]:high[spec_num]]\n return self.plot_spectrum_raw(tau_l,xaxis, xlims, flux=flux, color=color, ls=ls)\n\n def get_filt(self, elem, ion, thresh = 100):\n \"\"\"\n Get an index list to exclude spectra where the ion is too small, or velocity width < 20\n thresh - observable density threshold\n \"\"\"\n return vw.VWSpectra.get_filt(self, elem, ion, thresh)\n"
] |
[
[
"matplotlib.pyplot.legend",
"numpy.linspace",
"numpy.cumsum",
"matplotlib.pyplot.plot",
"numpy.max",
"numpy.round",
"numpy.histogram",
"numpy.where",
"numpy.roll",
"numpy.arange",
"numpy.size",
"matplotlib.pyplot.semilogx",
"numpy.min",
"matplotlib.pyplot.ylim",
"numpy.median",
"numpy.log10",
"matplotlib.pyplot.fill_between",
"numpy.array",
"matplotlib.pyplot.ylabel",
"numpy.percentile",
"numpy.random.normal",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.xlabel"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
pw0908/RMG-Py
|
[
"3846fcce701f2a5fd12dbfa429687e9fcd647298"
] |
[
"rmgpy/tools/data.py"
] |
[
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n###############################################################################\n# #\n# RMG - Reaction Mechanism Generator #\n# #\n# Copyright (c) 2002-2019 Prof. William H. Green ([email protected]), #\n# Prof. Richard H. West ([email protected]) and the RMG Team ([email protected]) #\n# #\n# Permission is hereby granted, free of charge, to any person obtaining a #\n# copy of this software and associated documentation files (the 'Software'), #\n# to deal in the Software without restriction, including without limitation #\n# the rights to use, copy, modify, merge, publish, distribute, sublicense, #\n# and/or sell copies of the Software, and to permit persons to whom the #\n# Software is furnished to do so, subject to the following conditions: #\n# #\n# The above copyright notice and this permission notice shall be included in #\n# all copies or substantial portions of the Software. #\n# #\n# THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR #\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, #\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE #\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER #\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING #\n# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER #\n# DEALINGS IN THE SOFTWARE. #\n# #\n###############################################################################\n\nimport numpy\n\nclass GenericData(object):\n \"\"\"\n A generic data class for the purpose of plotting.\n ======================= ==============================================================================================\n Attribute Description\n ======================= ==============================================================================================\n `label` A string label describing the data, can be used in a plot legend or in an axis label\n `data` A numpy array of the data\n `uncertainty` An uncertainty value associated with the data. Either a scalar or a numpy array with same \n length as `data`\n `species` Contains species associated with the data, often used with a Species object\n `reaction` Contains reaction associated with the data, often used with a Reaction object\n `units` Contains a string describing the units associated with the data\n `index` An integer containing the index associated with the data\n ======================= ==============================================================================================\n \"\"\"\n def __init__(self, label='', data=None, uncertainty=None, species=None, reaction=None, units=None, index=None):\n \n self.label = str(label) if label else None\n \n if isinstance(data, list):\n self.data = numpy.array(data)\n elif isinstance(data, numpy.ndarray):\n self.data = data\n else:\n raise Exception('Data for GenericData object must be initialized as a list or numpy.array of values.')\n \n self.uncertainty = uncertainty\n self.species = species\n self.reaction = reaction\n self.units = str(units) if units else None\n self.index = int(index) if index else None\n"
] |
[
[
"numpy.array"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
michael-weinstein/Elevation
|
[
"cd783ed7ea09d6d7c8c13dbba0c5f7daf5fa1719"
] |
[
"tests/predict_test.py"
] |
[
"import os\nimport sys\nimport shutil\n\nimport unittest\n# from mock import patch, Mock, PropertyMock, MagicMock\n\nimport pandas as pd\nimport numpy as np\nfrom warnings import warn\n\n\nclass PredictTest(unittest.TestCase):\n\n def __init__(self, *args, **kwargs):\n super(PredictTest, self).__init__(*args, **kwargs)\n\n # @unittest.skip(\"ignore\")\n def test_predict_hmg(self):\n sys.stdout = sys.__stdout__\n\n import elevation.load_data\n from elevation import settings, options\n from elevation.cmds import predict\n\n hmg = predict.Predict(init_models=False).get_hmg_data()\n wildtype = list(hmg['30mer'])[:settings.pred_test_num]\n offtarget = list(hmg['30mer_mut'])[:settings.pred_test_num]\n\n predictions = predict.Predict().execute(wildtype, offtarget)\n pred_df_data = {key: val.reshape(val.shape[0]) for key, val in predictions.iteritems()}\n pred_df = pd.DataFrame(data=pred_df_data)\n\n truth_df = pd.read_excel(settings.pred_default_fixture_file)[:settings.pred_test_num]\n for column in pred_df:\n if np.any(np.abs(pred_df[column] - truth_df[column]) > 0):\n warn(\"predictions don't exactly match expected for %s\" % column)\n idx = np.abs(pred_df[column] - truth_df[column]) > 0\n x = pred_df[column][idx] - truth_df[column][idx]\n # for i, value in enumerate(x):\n # warn(\"Inequality %s %s: %s\" % (column, i, value))\n assert np.allclose(pred_df[column], truth_df[column], atol=1e-08, rtol=0.0), \"%s doesn't match\" % column\n\n # @unittest.skip(\"ignore\")\n def test_agg_nicolo(self):\n import pickle\n from elevation import settings\n from elevation import aggregation\n\n with open(settings.agg_nicolo_fixture_file, \"r\") as fh:\n nicolo_results = pickle.load(fh)\n\n model = aggregation.get_aggregation_model()\n y_pred = model.predict(nicolo_results[0])\n assert np.allclose(y_pred, nicolo_results[1])\n\n\n @unittest.skip(\"ignore\")\n def test_predict_nicolo(self):\n import pickle\n from elevation import settings\n from elevation.cmds.predict import Predict\n preds_file = settings.pj(settings.repo_root, 'tests', 'fixtures', 'preds.lrs.hmg_v1v2.gsgr1.boxcox1.pkl')\n with open(preds_file, 'r') as f:\n preds = pickle.load(f)\n p = Predict() # updated (new) Hauessler & GUIDE-seq\n p.hmg_data = p.get_hmg_data(force_compute=True)\n guides, offtargets = p.hmg_data['30mer'].values, p.hmg_data['30mer_mut'].values\n hmg_preds = p.execute(guides, offtargets)['linear-raw-stacker']\n assert np.allclose(preds, hmg_preds)\n\n\n # @unittest.skip(\"ignore\")\n def test_agg_hauessler(self):\n sys.stdout = sys.__stdout__\n\n import pickle\n from elevation import settings\n from elevation import aggregation\n\n with open(settings.agg_model_file) as fh:\n final_model, other = pickle.load(fh)\n\n inputs = pd.read_excel(settings.pred_default_fixture_file)\n results = []\n rs = np.random.RandomState(settings.default_random_seed)\n perm = rs.permutation(inputs.shape[0])\n stacker = inputs[\"linear-raw-stacker\"].values[perm]\n cfd = inputs[\"CFD\"].values[perm]\n isgenic = rs.random_sample(inputs.shape[0]) > 0.5\n pos = 0\n while pos < perm.shape[0]:\n end = pos + rs.randint(1, 2000)\n if end > perm.shape[0]:\n end = perm.shape[0]\n result = aggregation.get_aggregated_score(\n stacker[pos:end],\n cfd[pos:end],\n isgenic[pos:end],\n final_model)\n results += list(result)\n pos = end\n\n pred_df = pd.DataFrame(data={\"agg_score\": results})\n truth_df = pd.read_excel(settings.agg_default_fixture_file)\n for column in pred_df:\n if np.any(np.abs(pred_df[column] - truth_df[column]) > 0):\n warn(\"aggregate predictions don't exactly match expected for %s\" % column)\n idx = np.abs(pred_df[column] - truth_df[column]) > 0\n x = pred_df[column][idx] - truth_df[column][idx]\n for i, value in enumerate(x):\n warn(\"Inequality %s %s: %s\" % (column, i, value))\n assert np.allclose(pred_df[column], truth_df[column], atol=1e-10, rtol=0.0), \"%s doesn't match\" % column\n\n\n# class FitTest(unittest.TestCase):\n#\n# def __init__(self, *args, **kwargs):\n# super(FitTest, self).__init__(*args, **kwargs)\n#\n# def setUp(self):\n# from elevation import settings\n#\n# self.cachedir = settings.pj(settings.repo_root, \"tests\", \"cache\")\n# self.cachedir_patch = patch('elevation.settings.cachedir', self.cachedir)\n# self.cachedir_patch.start()\n#\n# self.tmpdir = settings.pj(settings.repo_root, \"tests\", \"tmp\")\n# self.tmpdir_patch = patch('elevation.settings.tmpdir', self.tmpdir)\n# self.tmpdir_patch.start()\n#\n# print self.tmpdir\n# if os.path.exists(self.cachedir):\n# shutil.rmtree(self.cachedir)\n# os.mkdir(self.cachedir)\n#\n# if os.path.exists(self.tmpdir):\n# shutil.rmtree(self.tmpdir)\n# os.mkdir(self.tmpdir)\n#\n# def tearDown(self):\n# self.cachedir_patch.stop()\n# self.tmpdir_patch.stop()\n#\n# @unittest.skip(\"ignore\")\n# def test_settings_mock(self):\n# sys.stdout = sys.__stdout__\n#\n# from elevation import settings, prediction_pipeline, load_data\n# from elevation.cmds import fit, predict\n# import elevation\n#\n# assert self.cachedir == settings.cachedir\n# assert self.cachedir == prediction_pipeline.settings.cachedir\n# assert self.cachedir == load_data.settings.cachedir\n# assert self.cachedir == fit.settings.cachedir\n# assert self.cachedir == predict.settings.cachedir\n# assert self.cachedir == elevation.settings.cachedir\n# assert self.cachedir == elevation.prediction_pipeline.settings.cachedir\n# assert self.cachedir == elevation.load_data.settings.cachedir\n# assert self.cachedir == elevation.cmds.fit.settings.cachedir\n# assert self.cachedir == elevation.cmds.predict.settings.cachedir\n#\n# assert self.tmpdir == settings.tmpdir\n# assert self.tmpdir == prediction_pipeline.settings.tmpdir\n# assert self.tmpdir == load_data.settings.tmpdir\n# assert self.tmpdir == fit.settings.tmpdir\n# assert self.tmpdir == predict.settings.tmpdir\n# assert self.tmpdir == elevation.settings.tmpdir\n# assert self.tmpdir == elevation.prediction_pipeline.settings.tmpdir\n# assert self.tmpdir == elevation.load_data.settings.tmpdir\n# assert self.tmpdir == elevation.cmds.fit.settings.tmpdir\n# assert self.tmpdir == elevation.cmds.predict.settings.tmpdir\n\n# @unittest.skip(\"ignore\")\n# def test_retrain_predict_hauessler(self):\n# from elevation.cmds import predict, fit\n#\n# learn_options_override = {\n# \"seed\": 12345\n# }\n#\n# fit.Fit().execute(learn_options_override=learn_options_override, force_rebuild=True)\n#\n# @unittest.skip(\"ignore\")\n# def test_retrain_new_seed_predict_hauessler(self):\n# pass\n"
] |
[
[
"pandas.read_excel",
"numpy.abs",
"numpy.allclose",
"pandas.DataFrame",
"numpy.random.RandomState"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
talumbau/blaze
|
[
"66c9e61476f11d53f7b734664214537182397739"
] |
[
"blaze/objects/array.py"
] |
[
"\"\"\"This file defines the Concrete Array --- a leaf node in the expression graph\n\nA concrete array is constructed from a Data Descriptor Object which handles the\n indexing and basic interpretation of bytes\n\"\"\"\n\nfrom __future__ import absolute_import, division, print_function\n\nimport datashape\n\nfrom ..compute.ops import ufuncs\nfrom .. import compute\n\nfrom ..datadescriptor import (DDesc, DeferredDescriptor, ddesc_as_py)\nfrom ..io import _printing\n\n\nclass Array(object):\n \"\"\"An Array contains:\n\n DDesc\n Sequence of Bytes (where are the bytes)\n Index Object (how do I get to them)\n Data Shape Object (what are the bytes? how do I interpret them)\n axis and dimension labels\n user-defined meta-data (whatever are needed --- provenance propagation)\n \"\"\"\n def __init__(self, data, axes=None, labels=None, user={}):\n if not isinstance(data, DDesc):\n raise TypeError(('Constructing a blaze array directly '\n 'requires a data descriptor, not type '\n '%r') % (type(data)))\n self.ddesc = data\n self.axes = axes or [''] * (len(self.ddesc.dshape) - 1)\n self.labels = labels or [None] * (len(self.ddesc.dshape) - 1)\n self.user = user\n self.expr = None\n\n if isinstance(data, DeferredDescriptor):\n # NOTE: we need 'expr' on the Array to perform dynamic programming:\n # Two concrete arrays should have a single Op! We cannot\n # store this in the data descriptor, since there are many\n self.expr = data.expr # hurgh\n\n # Inject the record attributes.\n injected_props = {}\n # This is a hack to help get the blaze-web server onto blaze arrays.\n ds = data.dshape\n ms = ds[-1] if isinstance(ds, datashape.DataShape) else ds\n if isinstance(ms, datashape.Record):\n for name in ms.names:\n injected_props[name] = _named_property(name)\n\n # Need to inject attributes on the Array depending on dshape\n # attributes, in cases other than Record\n if data.dshape in [datashape.dshape('int32'), datashape.dshape('int64')]:\n def __int__(self):\n # Evaluate to memory\n e = compute.eval.eval(self)\n return int(e.ddesc.dynd_arr())\n injected_props['__int__'] = __int__\n elif data.dshape in [datashape.dshape('float32'), datashape.dshape('float64')]:\n def __float__(self):\n # Evaluate to memory\n e = compute.eval.eval(self)\n return float(e.ddesc.dynd_arr())\n injected_props['__float__'] = __float__\n elif ms in [datashape.complex_float32, datashape.complex_float64]:\n if len(data.dshape) == 1:\n def __complex__(self):\n # Evaluate to memory\n e = compute.eval.eval(self)\n return complex(e.ddesc.dynd_arr())\n injected_props['__complex__'] = __complex__\n injected_props['real'] = _ufunc_to_property(ufuncs.real)\n injected_props['imag'] = _ufunc_to_property(ufuncs.imag)\n\n if injected_props:\n self.__class__ = type('Array', (Array,), injected_props)\n\n\n @property\n def dshape(self):\n return self.ddesc.dshape\n\n @property\n def deferred(self):\n return self.ddesc.capabilities.deferred\n\n\n def __array__(self):\n import numpy as np\n\n # TODO: Expose PEP-3118 buffer interface\n\n if hasattr(self.ddesc, \"__array__\"):\n return np.array(self.ddesc)\n\n return np.array(self.ddesc.dynd_arr())\n\n def __iter__(self):\n if len(self.dshape.shape) == 1:\n return iter(ddesc_as_py(self.ddesc))\n return (Array(dd) for dd in self.ddesc.__iter__())\n\n def __getitem__(self, key):\n return Array(self.ddesc.__getitem__(key))\n\n def __setitem__(self, key, val):\n self.ddesc.__setitem__(key, val)\n\n def __len__(self):\n shape = self.dshape.shape\n if shape:\n return shape[0]\n raise IndexError('Scalar blaze arrays have no length')\n\n def __nonzero__(self):\n # For Python 2\n if len(self.dshape.shape) == 0:\n # Evaluate to memory\n e = compute.eval.eval(self)\n return bool(e.ddesc.dynd_arr())\n else:\n raise ValueError(\"The truth value of an array with more than one \"\n \"element is ambiguous. Use a.any() or a.all()\")\n\n def __bool__(self):\n # For Python 3\n if len(self.dshape.shape) == 0:\n # Evaluate to memory\n e = compute.eval.eval(self)\n return bool(e.ddesc.dynd_arr())\n else:\n raise ValueError(\"The truth value of an array with more than one \"\n \"element is ambiguous. Use a.any() or a.all()\")\n\n def __str__(self):\n if hasattr(self.ddesc, '_printer'):\n return self.ddesc._printer()\n return _printing.array_str(self)\n\n def __repr__(self):\n if hasattr(self.ddesc, \"_printer_repr\"):\n return self.ddesc._printer_repr()\n return _printing.array_repr(self)\n\n\ndef _named_property(name):\n @property\n def getprop(self):\n return Array(self.ddesc.getattr(name))\n return getprop\n\n\ndef _ufunc_to_property(uf):\n @property\n def getprop(self):\n return uf(self)\n return getprop\n\n\ndef binding(f):\n def binder(self, *args):\n return f(self, *args)\n return binder\n\n\ndef __rufunc__(f):\n def __rop__(self, other):\n return f(other, self)\n return __rop__\n\n\ndef _inject_special_binary(names):\n for ufunc_name, special_name in names:\n ufunc = getattr(ufuncs, ufunc_name)\n setattr(Array, '__%s__' % special_name, binding(ufunc))\n setattr(Array, '__r%s__' % special_name, binding(__rufunc__(ufunc)))\n\n\ndef _inject_special(names):\n for ufunc_name, special_name in names:\n ufunc = getattr(ufuncs, ufunc_name)\n setattr(Array, '__%s__' % special_name, binding(ufunc))\n\n\n_inject_special_binary([\n ('add', 'add'),\n ('subtract', 'sub'),\n ('multiply', 'mul'),\n ('true_divide', 'truediv'),\n ('mod', 'mod'),\n ('floor_divide', 'floordiv'),\n ('equal', 'eq'),\n ('not_equal', 'ne'),\n ('greater', 'gt'),\n ('greater_equal', 'ge'),\n ('less_equal', 'le'),\n ('less', 'lt'),\n ('divide', 'div'),\n ('bitwise_and', 'and'),\n ('bitwise_or', 'or'),\n ('bitwise_xor', 'xor'),\n ('power', 'pow'),\n ])\n_inject_special([\n ('bitwise_not', 'invert'),\n ('negative', 'neg'),\n ])\n\n\n\"\"\"\nThese should be functions\n\n @staticmethod\n def fromfiles(list_of_files, converters):\n raise NotImplementedError\n\n @staticmethod\n def fromfile(file, converter):\n raise NotImplementedError\n\n @staticmethod\n def frombuffers(list_of_buffers, converters):\n raise NotImplementedError\n\n @staticmethod\n def frombuffer(buffer, converter):\n raise NotImplementedError\n\n @staticmethod\n def fromobjects():\n raise NotImplementedError\n\n @staticmethod\n def fromiterator(buffer):\n raise NotImplementedError\n\n\"\"\"\n\n"
] |
[
[
"numpy.array"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
LaiaTarres/TransferLearningMusic
|
[
"d662327d320031ea3492720b5134ccc01d17983a"
] |
[
"src/vggish_input.py"
] |
[
"# Copyright 2017 The TensorFlow Authors All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Compute input examples for VGGish from audio waveform.\"\"\"\n\nimport numpy as np\nimport resampy\nfrom scipy.io import wavfile\n\nfrom src.utils import wavefile_to_waveform\nimport src.mel_features as mel_features\nimport src.vggish_params as vggish_params\n\n\n\ndef waveform_to_examples(data, sample_rate):\n \"\"\"Converts audio waveform into an array of examples for VGGish.\n\n Args:\n data: np.array of either one dimension (mono) or two dimensions\n (multi-channel, with the outer dimension representing channels).\n Each sample is generally expected to lie in the range [-1.0, +1.0],\n although this is not required.\n sample_rate: Sample rate of data.\n\n Returns:\n 3-D np.array of shape [num_examples, num_frames, num_bands] which represents\n a sequence of examples, each of which contains a patch of log mel\n spectrogram, covering num_frames frames of audio and num_bands mel frequency\n bands, where the frame length is vggish_params.STFT_HOP_LENGTH_SECONDS.\n \"\"\"\n # Convert to mono.\n if len(data.shape) > 1:\n data = np.mean(data, axis=1)\n # Resample to the rate assumed by VGGish.\n if sample_rate != vggish_params.SAMPLE_RATE:\n data = resampy.resample(data, sample_rate, vggish_params.SAMPLE_RATE)\n\n # Compute log mel spectrogram features.\n log_mel = mel_features.log_mel_spectrogram(\n data,\n audio_sample_rate=vggish_params.SAMPLE_RATE,\n log_offset=vggish_params.LOG_OFFSET,\n window_length_secs=vggish_params.STFT_WINDOW_LENGTH_SECONDS,\n hop_length_secs=vggish_params.STFT_HOP_LENGTH_SECONDS,\n num_mel_bins=vggish_params.NUM_MEL_BINS,\n lower_edge_hertz=vggish_params.MEL_MIN_HZ,\n upper_edge_hertz=vggish_params.MEL_MAX_HZ)\n\n # Frame features into examples.\n features_sample_rate = 1.0 / vggish_params.STFT_HOP_LENGTH_SECONDS\n example_window_length = int(round(\n vggish_params.EXAMPLE_WINDOW_SECONDS * features_sample_rate))\n example_hop_length = int(round(\n vggish_params.EXAMPLE_HOP_SECONDS * features_sample_rate))\n log_mel_examples = mel_features.frame(\n log_mel,\n window_length=example_window_length,\n hop_length=example_hop_length)\n return log_mel_examples\n\n\ndef wavfile_to_examples(wav_file):\n \"\"\"Convenience wrapper around waveform_to_examples() for a common WAV format.\n\n Args:\n wav_file: String path to a file, or a file-like object. The file\n is assumed to contain WAV audio data with signed 16-bit PCM samples.\n\n Returns:\n See waveform_to_examples.\n \"\"\"\n samples, sr = wavefile_to_waveform(wav_file, 'vggish')\n\n return waveform_to_examples(samples, sr)\n"
] |
[
[
"numpy.mean"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ajschmidt8/cugraph
|
[
"7ad8fc36254bbc7d1a1dc7d321a93b08a66c26ab"
] |
[
"python/cugraph/dask/common/input_utils.py"
] |
[
"# Copyright (c) 2020-2021, NVIDIA CORPORATION.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nfrom collections.abc import Sequence\n\nfrom collections import OrderedDict\nfrom dask_cudf.core import DataFrame as dcDataFrame\nfrom dask_cudf.core import Series as daskSeries\n\nimport cugraph.comms.comms as Comms\nfrom cugraph.raft.dask.common.utils import get_client\nfrom cugraph.dask.common.part_utils import _extract_partitions\nfrom dask.distributed import default_client\nfrom toolz import first\nfrom functools import reduce\n\n\nclass DistributedDataHandler:\n \"\"\"\n Class to centralize distributed data management. Functionalities include:\n - Data colocation\n - Worker information extraction\n - GPU futures extraction,\n\n Additional functionality can be added as needed. This class **does not**\n contain the actual data, just the metadata necessary to handle it,\n including common pieces of code that need to be performed to call\n Dask functions.\n\n The constructor is not meant to be used directly, but through the factory\n method DistributedDataHandler.create\n\n \"\"\"\n\n def __init__(self, gpu_futures=None, workers=None,\n datatype=None, multiple=False, client=None):\n self.client = get_client(client)\n self.gpu_futures = gpu_futures\n self.worker_to_parts = _workers_to_parts(gpu_futures)\n self.workers = workers\n self.datatype = datatype\n self.multiple = multiple\n self.worker_info = None\n self.total_rows = None\n self.max_vertex_id = None\n self.ranks = None\n self.parts_to_sizes = None\n self.local_data = None\n\n @classmethod\n def get_client(cls, client=None):\n return default_client() if client is None else client\n\n \"\"\" Class methods for initalization \"\"\"\n\n @classmethod\n def create(cls, data, client=None):\n \"\"\"\n Creates a distributed data handler instance with the given\n distributed data set(s).\n\n Parameters\n ----------\n\n data : dask.array, dask.dataframe, or unbounded Sequence of\n dask.array or dask.dataframe.\n\n client : dask.distributedClient\n \"\"\"\n\n client = cls.get_client(client)\n\n multiple = isinstance(data, Sequence)\n\n if isinstance(first(data) if multiple else data,\n (dcDataFrame, daskSeries)):\n datatype = 'cudf'\n else:\n raise Exception(\"Graph data must be dask-cudf dataframe\")\n\n gpu_futures = client.sync(_extract_partitions, data, client)\n workers = tuple(OrderedDict.fromkeys(map(lambda x: x[0], gpu_futures)))\n return DistributedDataHandler(gpu_futures=gpu_futures, workers=workers,\n datatype=datatype, multiple=multiple,\n client=client)\n\n \"\"\" Methods to calculate further attributes \"\"\"\n\n def calculate_worker_and_rank_info(self, comms):\n\n self.worker_info = comms.worker_info(comms.worker_addresses)\n self.ranks = dict()\n\n for w, futures in self.worker_to_parts.items():\n self.ranks[w] = self.worker_info[w][\"rank\"]\n\n def calculate_parts_to_sizes(self, comms=None, ranks=None):\n\n if self.worker_info is None and comms is not None:\n self.calculate_worker_and_rank_info(comms)\n\n self.total_rows = 0\n\n self.parts_to_sizes = dict()\n\n parts = [(wf[0], self.client.submit(\n _get_rows,\n wf[1],\n self.multiple,\n workers=[wf[0]],\n pure=False))\n for idx, wf in enumerate(self.worker_to_parts.items())]\n\n sizes = self.client.compute(parts, sync=True)\n\n for w, sizes_parts in sizes:\n sizes, total = sizes_parts\n self.parts_to_sizes[self.worker_info[w][\"rank\"]] = \\\n sizes\n\n self.total_rows += total\n\n def calculate_local_data(self, comms, by):\n\n if self.worker_info is None and comms is not None:\n self.calculate_worker_and_rank_info(comms)\n\n local_data = dict([(self.worker_info[wf[0]][\"rank\"],\n self.client.submit(\n _get_local_data,\n wf[1],\n by,\n workers=[wf[0]]))\n for idx, wf in enumerate(self.worker_to_parts.items()\n )])\n\n _local_data_dict = self.client.compute(local_data, sync=True)\n local_data_dict = {'edges': [], 'offsets': [], 'verts': []}\n max_vid = 0\n for rank in range(len(_local_data_dict)):\n data = _local_data_dict[rank]\n local_data_dict['edges'].append(data[0])\n if rank == 0:\n local_offset = 0\n else:\n prev_data = _local_data_dict[rank-1]\n local_offset = prev_data[1] + 1\n local_data_dict['offsets'].append(local_offset)\n local_data_dict['verts'].append(data[1] - local_offset + 1)\n if data[2] > max_vid:\n max_vid = data[2]\n\n import numpy as np\n local_data_dict['edges'] = np.array(local_data_dict['edges'],\n dtype=np.int32)\n local_data_dict['offsets'] = np.array(local_data_dict['offsets'],\n dtype=np.int32)\n local_data_dict['verts'] = np.array(local_data_dict['verts'],\n dtype=np.int32)\n self.local_data = local_data_dict\n self.max_vertex_id = max_vid\n\n\ndef _get_local_data(df, by):\n df = df[0]\n num_local_edges = len(df)\n local_by_max = df[by].iloc[-1]\n local_max = df[['src', 'dst']].max().max()\n return num_local_edges, local_by_max, local_max\n\n\n\"\"\" Internal methods, API subject to change \"\"\"\n\n\ndef _workers_to_parts(futures):\n \"\"\"\n Builds an ordered dict mapping each worker to their list\n of parts\n :param futures: list of (worker, part) tuples\n :return:\n \"\"\"\n w_to_p_map = OrderedDict.fromkeys(Comms.get_workers())\n for w, p in futures:\n if w_to_p_map[w] is None:\n w_to_p_map[w] = []\n w_to_p_map[w].append(p)\n return w_to_p_map\n\n\ndef _get_rows(objs, multiple):\n def get_obj(x): return x[0] if multiple else x\n total = list(map(lambda x: get_obj(x).shape[0], objs))\n return total, reduce(lambda a, b: a + b, total)\n\n\ndef get_mg_batch_data(dask_cudf_data):\n data = DistributedDataHandler.create(data=dask_cudf_data)\n return data\n\n\ndef get_distributed_data(input_ddf):\n ddf = input_ddf\n comms = Comms.get_comms()\n data = DistributedDataHandler.create(data=ddf)\n if data.worker_info is None and comms is not None:\n data.calculate_worker_and_rank_info(comms)\n return data\n\n\ndef get_vertex_partition_offsets(input_graph):\n import cudf\n renumber_vertex_count = input_graph.renumber_map.implementation.ddf.\\\n map_partitions(len).compute()\n renumber_vertex_cumsum = renumber_vertex_count.cumsum()\n vertex_dtype = input_graph.edgelist.edgelist_df['src'].dtype\n vertex_partition_offsets = cudf.Series([0], dtype=vertex_dtype)\n vertex_partition_offsets = vertex_partition_offsets.append(cudf.Series(\n renumber_vertex_cumsum, dtype=vertex_dtype))\n return vertex_partition_offsets\n"
] |
[
[
"numpy.array"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Suke-H/LearningToPaint
|
[
"444e01477ff1b632df931da67af67464c0f9cb98"
] |
[
"copy_logo.py"
] |
[
"from glob import glob\nimport numpy as np\nimport shutil\nimport os\n\nSOURCE_PATH = \"data/LLD-logo-files/\"\nMV_PATH = \"image/\"\n\nimgs = np.array(glob(SOURCE_PATH + \"**\"))\nN = len(imgs)\nprint(N)\n\nchoice = np.random.choice(N, 10)\nprint(choice)\n\nfor img in imgs[choice]:\n # img_name = os.path.basename(img)\n # os.remove(MV_PATH + img_name)\n shutil.copy(img, MV_PATH)"
] |
[
[
"numpy.random.choice"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
OrtnerMichael/magPyLib
|
[
"4c7e7f56f6e0b915ec0e024c172c460fa80126e5",
"4c7e7f56f6e0b915ec0e024c172c460fa80126e5",
"4c7e7f56f6e0b915ec0e024c172c460fa80126e5"
] |
[
"magpylib/_src/display/plotly/plotly_sensor_mesh.py",
"tests/test_exceptions.py",
"tests/test_obj_Dipole.py"
] |
[
"import numpy as np\n\n\ndef get_sensor_mesh(\n x_color=\"red\",\n y_color=\"green\",\n z_color=\"blue\",\n center_color=\"grey\",\n x_show=True,\n y_show=True,\n z_show=True,\n center_show=True,\n colorize_tails=True,\n):\n \"\"\"\n returns a plotly mesh3d dictionary of a x,y,z arrows oriented in space accordingly\n and colored respectively in red,green,blue with a central cube of size 1\n \"\"\"\n # fmt: off\n trace = {\n \"type\": \"mesh3d\",\n \"i\": np.array([75, 64, 2, 75, 76, 65, 65, 64, 2, 0, 1, 0, 84, 86, 86, 90, 90, 92,\n 92, 91, 91, 87, 87, 85, 85, 83, 83, 82, 82, 84, 94, 86, 86, 84, 84, 82,\n 82, 83, 83, 85, 85, 87, 87, 87, 91, 91, 92, 92, 90, 90, 94, 95, 88, 78,\n 79, 81, 80, 93, 96, 89, 77, 70, 72, 72, 74, 74, 73, 73, 50, 52, 52, 44,\n 44, 32, 32, 22, 22, 14, 14, 20, 20, 30, 30, 41, 41, 50, 57, 52, 52, 50,\n 50, 41, 41, 30, 30, 20, 20, 14, 14, 14, 22, 22, 32, 32, 44, 44, 57, 11,\n 4, 12, 58, 62, 8, 7, 39, 61, 42, 51, 43, 43, 31, 31, 21, 21, 48, 54,\n 54, 47, 47, 37, 37, 25, 25, 17, 17, 18, 18, 26, 26, 38, 38, 48, 59, 54,\n 54, 48, 48, 38, 38, 26, 26, 18, 18, 17, 17, 17, 25, 25, 37, 37, 47, 47,\n 59, 27, 5, 10, 56, 60, 6, 9, 55, 63, 28, 53, 45, 45, 35, 35, 23, 23],\n dtype=\"int64\"),\n \"j\": np.array([76, 76, 3, 3, 3, 3, 1, 1, 75, 75, 3, 3, 70, 70, 72, 72, 74, 74,\n 73, 73, 71, 71, 69, 69, 67, 67, 66, 66, 68, 68, 89, 89, 81, 81, 79, 79,\n 77, 77, 78, 78, 80, 80, 88, 93, 93, 95, 95, 96, 96, 94, 97, 97, 97, 97,\n 97, 97, 97, 97, 97, 97, 97, 68, 68, 66, 66, 67, 67, 69, 51, 51, 43, 43,\n 31, 31, 21, 21, 13, 13, 19, 19, 29, 29, 40, 40, 49, 49, 61, 61, 62, 62,\n 58, 58, 42, 42, 12, 12, 8, 8, 4, 7, 7, 11, 11, 39, 39, 57, 34, 34,\n 34, 34, 34, 34, 34, 34, 34, 34, 34, 49, 49, 40, 40, 29, 29, 19, 53, 53,\n 45, 45, 35, 35, 23, 23, 15, 15, 16, 16, 24, 24, 36, 36, 46, 46, 63, 63,\n 60, 60, 56, 56, 28, 28, 10, 10, 6, 6, 5, 9, 9, 27, 27, 55, 55, 59,\n 33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 33, 46, 46, 36, 36, 24, 24, 16],\n dtype=\"int64\"),\n \"k\": np.array([64, 65, 75, 76, 65, 1, 64, 0, 0, 64, 0, 2, 86, 72, 90, 74, 92, 73,\n 91, 71, 87, 69, 85, 67, 83, 66, 82, 68, 84, 70, 86, 81, 84, 79, 82, 77,\n 83, 78, 85, 80, 87, 88, 93, 91, 95, 92, 96, 90, 94, 86, 89, 96, 93, 80,\n 77, 79, 88, 95, 94, 81, 78, 72, 66, 74, 67, 73, 69, 71, 52, 43, 44, 31,\n 32, 21, 22, 13, 14, 19, 20, 29, 30, 40, 41, 49, 50, 51, 52, 62, 50, 58,\n 41, 42, 30, 12, 20, 8, 14, 4, 7, 22, 11, 32, 39, 44, 57, 52, 61, 39,\n 7, 8, 42, 58, 4, 11, 57, 62, 12, 43, 40, 31, 29, 21, 19, 13, 54, 45,\n 47, 35, 37, 23, 25, 15, 17, 16, 18, 24, 26, 36, 38, 46, 48, 53, 54, 60,\n 48, 56, 38, 28, 26, 10, 18, 6, 17, 5, 9, 25, 27, 37, 55, 47, 59, 54,\n 63, 55, 9, 6, 28, 56, 5, 27, 59, 60, 10, 45, 36, 35, 24, 23, 16, 15],\n dtype=\"int64\"),\n \"x\": np.array([-5.00000000e-01, -5.00000000e-01, -5.00000000e-01, -5.00000000e-01,\n -2.99849272e-01, -2.87847906e-01, -2.87847906e-01, -2.57389992e-01,\n -2.47108519e-01, -1.96458220e-01, -1.96458220e-01, -1.33211225e-01,\n -1.15912557e-01, -9.99495536e-02, -9.99495536e-02, -9.39692631e-02,\n -9.39692631e-02, -9.39692631e-02, -9.39692631e-02, -7.86073282e-02,\n -7.86073282e-02, -7.45242685e-02, -7.45242685e-02, -5.00000007e-02,\n -5.00000007e-02, -5.00000007e-02, -5.00000007e-02, -4.26944532e-02,\n -4.26944532e-02, -2.04838570e-02, -2.04838570e-02, -1.42282564e-02,\n -1.42282564e-02, -2.08166817e-16, -1.91079873e-16, 1.73648186e-02,\n 1.73648186e-02, 1.73648186e-02, 1.73648186e-02, 3.32611799e-02,\n 4.72242348e-02, 4.72242348e-02, 5.20848148e-02, 5.27253151e-02,\n 5.27253151e-02, 7.66044408e-02, 7.66044408e-02, 7.66044408e-02,\n 7.66044408e-02, 9.28355828e-02, 9.28355828e-02, 9.50081274e-02,\n 9.50081274e-02, 9.99999940e-02, 9.99999940e-02, 1.24624498e-01,\n 1.24624498e-01, 1.89173400e-01, 2.03545630e-01, 2.52376080e-01,\n 2.52376080e-01, 2.85024375e-01, 2.90382177e-01, 2.99999982e-01,\n 5.00000000e-01, 5.00000000e-01, 5.00000000e-01, 5.00000000e-01,\n 5.00000000e-01, 5.00000000e-01, 5.00000000e-01, 5.00000000e-01,\n 5.00000000e-01, 5.00000000e-01, 5.00000000e-01, 5.00000000e-01,\n 5.00000000e-01, 1.48038471e+00, 1.48038471e+00, 1.48038471e+00,\n 1.48038471e+00, 1.48038471e+00, 1.48038471e+00, 1.48038471e+00,\n 1.48038471e+00, 1.48038471e+00, 1.48038471e+00, 1.48038471e+00,\n 1.48038471e+00, 1.48038471e+00, 1.48038471e+00, 1.48038471e+00,\n 1.48038471e+00, 1.48038471e+00, 1.48038471e+00, 1.48038471e+00,\n 1.48038471e+00, 2.00000000e+00]),\n \"y\": np.array([-5.00000000e-01, -5.00000000e-01, 5.00000000e-01, 5.00000000e-01,\n -9.50860139e-03, 1.48038471e+00, 1.48038471e+00, 1.54111609e-01,\n -1.70109898e-01, 1.48038471e+00, 1.48038471e+00, 2.68802464e-01,\n -2.76702493e-01, 3.17605096e-03, 3.17605096e-03, 5.00000000e-01,\n 5.00000000e-01, 1.48038471e+00, 1.48038471e+00, -6.18133359e-02,\n -6.18133359e-02, 6.66793287e-02, 6.66793287e-02, 5.00000000e-01,\n 5.00000000e-01, 1.48038471e+00, 1.48038471e+00, 1.48038471e+00,\n 1.48038471e+00, -9.78795737e-02, -9.78795737e-02, 9.89826098e-02,\n 9.89826098e-02, 2.00000000e+00, -6.27497823e-17, 5.00000000e-01,\n 5.00000000e-01, 1.48038471e+00, 1.48038471e+00, 2.98150450e-01,\n -8.81468803e-02, -8.81468803e-02, -2.95444012e-01, 8.49708244e-02,\n 8.49708244e-02, 5.00000000e-01, 5.00000000e-01, 1.48038471e+00,\n 1.48038471e+00, -3.71692739e-02, -3.71692739e-02, 3.12002450e-02,\n 3.12002450e-02, 5.00000000e-01, 1.48038471e+00, 1.48038471e+00,\n 1.48038471e+00, 2.32837781e-01, -2.20384151e-01, 1.48038471e+00,\n 1.48038471e+00, 9.36007351e-02, -7.53538683e-02, 1.48038471e+00,\n -5.00000000e-01, -5.00000000e-01, -9.44050848e-02, -9.35176238e-02,\n -5.11193462e-02, -4.88722362e-02, 1.60856955e-02, 1.86410155e-02,\n 7.57640675e-02, 7.74319321e-02, 9.99915898e-02, 5.00000000e-01,\n 5.00000000e-01, -2.99947202e-01, -2.55374074e-01, -2.49289244e-01,\n -1.29721463e-01, -1.19483687e-01, -9.44050848e-02, -9.35176238e-02,\n -5.11193462e-02, -4.88722362e-02, 1.60856955e-02, 1.86410155e-02,\n 3.71167921e-02, 4.82570902e-02, 7.57640675e-02, 7.74319321e-02,\n 9.99915898e-02, 1.92170724e-01, 2.00676590e-01, 2.86211818e-01,\n 2.89382666e-01, -3.23514738e-17]),\n \"z\": np.array([-5.00000000e-01, 5.00000000e-01, -5.00000000e-01, 5.00000000e-01,\n 1.48038471e+00, -8.45197663e-02, 8.45197663e-02, 1.48038471e+00,\n 1.48038471e+00, -2.26724878e-01, 2.26724878e-01, 1.48038471e+00,\n 1.48038471e+00, 5.00000000e-01, 1.48038471e+00, -3.42020132e-02,\n 3.42020132e-02, -3.42020132e-02, 3.42020132e-02, 5.00000000e-01,\n 1.48038471e+00, 5.00000000e-01, 1.48038471e+00, -8.66025388e-02,\n 8.66025388e-02, -8.66025388e-02, 8.66025388e-02, -2.96946436e-01,\n 2.96946436e-01, 5.00000000e-01, 1.48038471e+00, 5.00000000e-01,\n 1.48038471e+00, 0.00000000e+00, 2.00000000e+00, -9.84807760e-02,\n 9.84807760e-02, -9.84807760e-02, 9.84807760e-02, 1.48038471e+00,\n 5.00000000e-01, 1.48038471e+00, 1.48038471e+00, 5.00000000e-01,\n 1.48038471e+00, -6.42787591e-02, 6.42787591e-02, -6.42787591e-02,\n 6.42787591e-02, 5.00000000e-01, 1.48038471e+00, 5.00000000e-01,\n 1.48038471e+00, 0.00000000e+00, 0.00000000e+00, -2.72889614e-01,\n 2.72889614e-01, 1.48038471e+00, 1.48038471e+00, -1.62192255e-01,\n 1.62192255e-01, 1.48038471e+00, 1.48038471e+00, 0.00000000e+00,\n -5.00000000e-01, 5.00000000e-01, -3.29800062e-02, 3.54182646e-02,\n -8.59465674e-02, 8.72439370e-02, -9.86977741e-02, 9.82472003e-02,\n -6.52671903e-02, 6.32795095e-02, -1.29736937e-03, -5.00000000e-01,\n 5.00000000e-01, -5.62742725e-03, 1.57429606e-01, -1.66897804e-01,\n 2.70503879e-01, -2.75179297e-01, -3.29800062e-02, 3.54182646e-02,\n -8.59465674e-02, 8.72439370e-02, -9.86977741e-02, 9.82472003e-02,\n 2.97695041e-01, -2.96093315e-01, -6.52671903e-02, 6.32795095e-02,\n -1.29736937e-03, 2.30370149e-01, -2.22999811e-01, 8.99043754e-02,\n -7.91054145e-02, 1.98500464e-16]),\n }\n # fmt: on\n x_color_tail = x_color\n y_color_tail = y_color\n z_color_tail = z_color\n if colorize_tails:\n x_color_tail = center_color\n y_color_tail = center_color\n z_color_tail = center_color\n N, N2 = 56, 18\n trace[\"facecolor\"] = np.concatenate(\n [\n [center_color] * 12,\n [x_color_tail] * (N2),\n [x_color] * (N - N2),\n [y_color_tail] * (N2),\n [y_color] * (N - N2),\n [z_color_tail] * (N2),\n [z_color] * (N - N2),\n ]\n )\n indices = ((0, 12), (12, 68), (68, 124), (124, 180))\n show = (center_show, x_show, z_show, y_show)\n for k in (\"i\", \"j\", \"k\", \"facecolor\"):\n t = []\n for i, s in zip(indices, show):\n if s:\n t.extend(trace[k][i[0] : i[1]])\n trace[k] = np.array(t)\n return trace\n",
"import unittest\n\nimport numpy as np\nfrom scipy.spatial.transform import Rotation as R\n\nimport magpylib as magpy\nfrom magpylib._src.exceptions import MagpylibBadUserInput\nfrom magpylib._src.exceptions import MagpylibInternalError\nfrom magpylib._src.fields.field_wrap_BH_level1 import getBH_level1\nfrom magpylib._src.fields.field_wrap_BH_level2 import getBH_level2\nfrom magpylib._src.fields.field_wrap_BH_level2_dict import getBH_dict_level2\nfrom magpylib._src.input_checks import check_format_input_observers\nfrom magpylib._src.utility import format_obj_input\nfrom magpylib._src.utility import format_src_inputs\nfrom magpylib._src.utility import test_path_format as tpf\n\n\ndef getBHv_unknown_source_type():\n \"\"\"unknown source type\"\"\"\n getBH_dict_level2(\n source_type=\"badName\",\n magnetization=(1, 0, 0),\n dimension=(0, 2, 1, 0, 360),\n position=(0, 0, -0.5),\n observers=(1.5, 0, -0.1),\n field=\"B\",\n )\n\n\ndef getBH_level1_internal_error():\n \"\"\"bad source_type input should not happen\"\"\"\n x = np.array([(1, 2, 3)])\n rot = R.from_quat((0, 0, 0, 1))\n getBH_level1(\n field=\"B\",\n source_type=\"woot\",\n magnetization=x,\n dimension=x,\n observers=x,\n position=x,\n orientation=rot,\n )\n\n\ndef getBH_level2_bad_input1():\n \"\"\"test BadUserInput error at getBH_level2\"\"\"\n src = magpy.magnet.Cuboid((1, 1, 2), (1, 1, 1))\n sens = magpy.Sensor()\n getBH_level2([src, sens], (0, 0, 0), sumup=False, squeeze=True, field=\"B\")\n\n\ndef getBH_level2_bad_input2():\n \"\"\"different pixel shapes\"\"\"\n mag = (1, 2, 3)\n dim_cuboid = (1, 2, 3)\n pm1 = magpy.magnet.Cuboid(mag, dim_cuboid)\n sens1 = magpy.Sensor()\n sens2 = magpy.Sensor(pixel=[(0, 0, 0), (0, 0, 1), (0, 0, 2)])\n magpy.getB(pm1, [sens1, sens2])\n\n\ndef getBH_level2_internal_error1():\n \"\"\"somehow an unrecognized objects end up in get_src_dict\"\"\"\n # pylint: disable=protected-access\n sens = magpy.Sensor()\n x = np.zeros((10, 3))\n magpy._src.fields.field_wrap_BH_level2.get_src_dict([sens], 10, 10, x)\n\n\n# getBHv missing inputs ------------------------------------------------------\ndef getBHv_missing_input1():\n \"\"\"missing bh\"\"\"\n x = np.array([(1, 2, 3)])\n getBH_dict_level2(source_type=\"Cuboid\", observers=x, magnetization=x, dimension=x)\n\n\ndef getBHv_missing_input2():\n \"\"\"missing source_type\"\"\"\n x = np.array([(1, 2, 3)])\n getBH_dict_level2(bh=True, observers=x, magnetization=x, dimension=x)\n\n\ndef getBHv_missing_input3():\n \"\"\"missing observers\"\"\"\n x = np.array([(1, 2, 3)])\n getBH_dict_level2(bh=True, source_type=\"Cuboid\", magnetization=x, dimension=x)\n\n\ndef getBHv_missing_input4_cuboid():\n \"\"\"missing Cuboid mag\"\"\"\n x = np.array([(1, 2, 3)])\n getBH_dict_level2(bh=True, source_type=\"Cuboid\", observers=x, dimension=x)\n\n\ndef getBHv_missing_input5_cuboid():\n \"\"\"missing Cuboid dim\"\"\"\n x = np.array([(1, 2, 3)])\n getBH_dict_level2(bh=True, source_type=\"Cuboid\", observers=x, magnetization=x)\n\n\ndef getBHv_missing_input4_cyl():\n \"\"\"missing Cylinder mag\"\"\"\n x = np.array([(1, 2, 3)])\n y = np.array([(1, 2)])\n getBH_dict_level2(bh=True, source_type=\"Cylinder\", observers=x, dimension=y)\n\n\ndef getBHv_missing_input5_cyl():\n \"\"\"missing Cylinder dim\"\"\"\n x = np.array([(1, 2, 3)])\n getBH_dict_level2(bh=True, source_type=\"Cylinder\", observers=x, magnetization=x)\n\n\ndef getBHv_missing_input4_sphere():\n \"\"\"missing Sphere mag\"\"\"\n x = np.array([(1, 2, 3)])\n getBH_dict_level2(bh=True, source_type=\"Sphere\", observers=x, dimension=1)\n\n\ndef getBHv_missing_input5_sphere():\n \"\"\"missing Sphere dim\"\"\"\n x = np.array([(1, 2, 3)])\n getBH_dict_level2(bh=True, source_type=\"Sphere\", observers=x, magnetization=x)\n\n\n# bad inputs -------------------------------------------------------------------\ndef getBHv_bad_input1():\n \"\"\"different input lengths\"\"\"\n x = np.array([(1, 2, 3)])\n x2 = np.array([(1, 2, 3)] * 2)\n getBH_dict_level2(\n bh=True, source_type=\"Cuboid\", observers=x, magnetization=x2, dimension=x\n )\n\n\ndef getBHv_bad_input2():\n \"\"\"bad source_type string\"\"\"\n x = np.array([(1, 2, 3)])\n getBH_dict_level2(\n bh=True, source_type=\"Cubooid\", observers=x, magnetization=x, dimension=x\n )\n\n\ndef getBHv_bad_input3():\n \"\"\"mixed input\"\"\"\n x = np.array([(1, 2, 3)])\n s = magpy.Sensor()\n getBH_dict_level2(\n bh=True, source_type=\"Cuboid\", observers=s, magnetization=x, dimension=x\n )\n\n\ndef utility_format_obj_input():\n \"\"\"bad input object\"\"\"\n pm1 = magpy.magnet.Cuboid((1, 2, 3), (1, 2, 3))\n pm2 = magpy.magnet.Cuboid((1, 2, 3), (1, 2, 3))\n format_obj_input([pm1, pm2, 333])\n\n\ndef utility_format_src_inputs():\n \"\"\"bad src input\"\"\"\n pm1 = magpy.magnet.Cuboid((1, 2, 3), (1, 2, 3))\n pm2 = magpy.magnet.Cuboid((1, 2, 3), (1, 2, 3))\n format_src_inputs([pm1, pm2, 1])\n\n\ndef utility_format_obs_inputs():\n \"\"\"bad src input\"\"\"\n sens1 = magpy.Sensor()\n sens2 = magpy.Sensor()\n possis = [1, 2, 3]\n check_format_input_observers([sens1, sens2, possis, \"whatever\"])\n\n\ndef utility_test_path_format():\n \"\"\"bad path format input\"\"\"\n # pylint: disable=protected-access\n pm1 = magpy.magnet.Cuboid((1, 2, 3), (1, 2, 3))\n pm1._position = [(1, 2, 3), (1, 2, 3)]\n tpf(pm1)\n\n\n###############################################################################\n# BAD INPUT SHAPE EXCEPTIONS\ndef bad_input_shape_basegeo_pos():\n \"\"\"bad position input shape\"\"\"\n vec3 = (1, 2, 3)\n vec4 = (1, 2, 3, 4)\n magpy.magnet.Cuboid(vec3, vec3, vec4)\n\n\ndef bad_input_shape_cuboid_dim():\n \"\"\"bad cuboid dimension shape\"\"\"\n vec3 = (1, 2, 3)\n vec4 = (1, 2, 3, 4)\n magpy.magnet.Cuboid(vec3, vec4)\n\n\ndef bad_input_shape_cuboid_mag():\n \"\"\"bad cuboid magnetization shape\"\"\"\n vec3 = (1, 2, 3)\n vec4 = (1, 2, 3, 4)\n magpy.magnet.Cuboid(vec4, vec3)\n\n\ndef bad_input_shape_cyl_dim():\n \"\"\"bad cylinder dimension shape\"\"\"\n vec3 = (1, 2, 3)\n vec4 = (1, 2, 3, 4)\n magpy.magnet.Cylinder(vec3, vec4)\n\n\ndef bad_input_shape_cyl_mag():\n \"\"\"bad cylinder magnetization shape\"\"\"\n vec3 = (1, 2, 3)\n vec4 = (1, 2, 3, 4)\n magpy.magnet.Cylinder(vec4, vec3)\n\n\ndef bad_input_shape_sphere_mag():\n \"\"\"bad sphere magnetization shape\"\"\"\n vec4 = (1, 2, 3, 4)\n magpy.magnet.Sphere(vec4, 1)\n\n\ndef bad_input_shape_sensor_pix_pos():\n \"\"\"bad sensor pix_pos input shape\"\"\"\n vec4 = (1, 2, 3, 4)\n vec3 = (1, 2, 3)\n magpy.Sensor(vec3, vec4)\n\n\ndef bad_input_shape_dipole_mom():\n \"\"\"bad sphere magnetization shape\"\"\"\n vec4 = (1, 2, 3, 4)\n magpy.misc.Dipole(moment=vec4)\n\n\n#####################################################################\nclass TestExceptions(unittest.TestCase):\n \"\"\"test class for exception testing\"\"\"\n\n def test_except_utility(self):\n \"\"\"utility\"\"\"\n self.assertRaises(MagpylibBadUserInput, utility_test_path_format)\n self.assertRaises(MagpylibBadUserInput, utility_format_obj_input)\n self.assertRaises(MagpylibBadUserInput, utility_format_src_inputs)\n self.assertRaises(MagpylibBadUserInput, utility_format_obs_inputs)\n\n def test_except_getBHv(self):\n \"\"\"getBHv\"\"\"\n self.assertRaises(KeyError, getBHv_missing_input1)\n self.assertRaises(MagpylibBadUserInput, getBHv_missing_input2)\n self.assertRaises(MagpylibBadUserInput, getBHv_missing_input3)\n self.assertRaises(MagpylibBadUserInput, getBHv_missing_input4_cuboid)\n self.assertRaises(MagpylibBadUserInput, getBHv_missing_input4_cyl)\n self.assertRaises(MagpylibBadUserInput, getBHv_missing_input4_sphere)\n self.assertRaises(MagpylibBadUserInput, getBHv_missing_input5_cuboid)\n self.assertRaises(MagpylibBadUserInput, getBHv_missing_input5_cyl)\n self.assertRaises(MagpylibBadUserInput, getBHv_missing_input5_sphere)\n self.assertRaises(MagpylibBadUserInput, getBHv_bad_input1)\n self.assertRaises(MagpylibBadUserInput, getBHv_bad_input2)\n self.assertRaises(MagpylibBadUserInput, getBHv_bad_input3)\n self.assertRaises(MagpylibBadUserInput, getBHv_unknown_source_type)\n\n def test_except_getBH_lev1(self):\n \"\"\"getBH_level1 exception testing\"\"\"\n self.assertRaises(MagpylibInternalError, getBH_level1_internal_error)\n\n def test_except_getBH_lev2(self):\n \"\"\"getBH_level2 exception testing\"\"\"\n self.assertRaises(MagpylibBadUserInput, getBH_level2_bad_input1)\n self.assertRaises(MagpylibBadUserInput, getBH_level2_bad_input2)\n self.assertRaises(MagpylibInternalError, getBH_level2_internal_error1)\n\n def test_except_bad_input_shape_basegeo(self):\n \"\"\"BaseGeo bad input shapes\"\"\"\n self.assertRaises(MagpylibBadUserInput, bad_input_shape_basegeo_pos)\n self.assertRaises(MagpylibBadUserInput, bad_input_shape_cuboid_dim)\n self.assertRaises(MagpylibBadUserInput, bad_input_shape_cuboid_mag)\n self.assertRaises(MagpylibBadUserInput, bad_input_shape_cyl_dim)\n self.assertRaises(MagpylibBadUserInput, bad_input_shape_cyl_mag)\n self.assertRaises(MagpylibBadUserInput, bad_input_shape_sphere_mag)\n self.assertRaises(MagpylibBadUserInput, bad_input_shape_sensor_pix_pos)\n self.assertRaises(MagpylibBadUserInput, bad_input_shape_dipole_mom)\n",
"import numpy as np\n\nimport magpylib as magpy\n\n\ndef test_Dipole_basicB():\n \"\"\"Basic dipole class test\"\"\"\n src = magpy.misc.Dipole(moment=(1, 2, 3), position=(1, 2, 3))\n sens = magpy.Sensor()\n\n B = src.getB(sens)\n Btest = np.array([0.00303828, 0.00607656, 0.00911485])\n assert np.allclose(B, Btest)\n\n\ndef test_Dipole_basicH():\n \"\"\"Basic dipole class test\"\"\"\n src = magpy.misc.Dipole(moment=(1, 2, 3), position=(1, 2, 3))\n sens = magpy.Sensor()\n H = src.getH(sens)\n Htest = np.array([0.00241779, 0.00483558, 0.00725336])\n assert np.allclose(H, Htest)\n\n\ndef test_Dipole_zero_position():\n \"\"\"Basic dipole class test\"\"\"\n src = magpy.misc.Dipole(moment=(1, 2, 3))\n sens = magpy.Sensor()\n np.seterr(all=\"ignore\")\n B = magpy.getB(src, sens)\n np.seterr(all=\"print\")\n assert all(np.isnan(B))\n\n\ndef test_repr():\n \"\"\"test __repr__\"\"\"\n dip = magpy.misc.Dipole(moment=(1, 2, 3))\n assert dip.__repr__()[:6] == \"Dipole\", \"Dipole repr failed\"\n"
] |
[
[
"numpy.concatenate",
"numpy.array"
],
[
"numpy.array",
"numpy.zeros",
"scipy.spatial.transform.Rotation.from_quat"
],
[
"numpy.isnan",
"numpy.seterr",
"numpy.array",
"numpy.allclose"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.5",
"1.2",
"1.3",
"1.4"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
hadrianl/rqalpha_kairui
|
[
"eb0e0de1d69f1a4d6f349d4dc011e1c9eccfe2d8"
] |
[
"rqalpha/examples/extend_api/HKMod/realtime_data_source.py"
] |
[
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 2018/10/30 0030 9:43\n# @Author : Hadrianl \n# @File : realtime_data_source\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nimport os\nimport six\nimport numpy as np\n\nfrom rqalpha.interface import AbstractDataSource\nfrom rqalpha.const import MARGIN_TYPE\nfrom rqalpha.utils.py2 import lru_cache\nfrom rqalpha.utils.datetime_func import convert_date_to_int, convert_int_to_date\nfrom rqalpha.utils.i18n import gettext as _\n\nfrom rqalpha.data.future_info_cn import CN_FUTURE_INFO\nfrom rqalpha.data.adjust import adjust_bars, FIELDS_REQUIRE_ADJUSTMENT\nfrom rqalpha.data.public_fund_commission import PUBLIC_FUND_COMMISSION\nfrom rqalpha.const import COMMISSION_TYPE\nfrom spapi.spAPI import *\nfrom spapi.sp_struct import *\nimport datetime as dt\nfrom rqalpha.api import logger\nfrom queue import Queue, Empty\nimport pymongo as pmg\nfrom threading import Thread\nfrom collections import deque\nimport pandas as pd\nfrom rqalpha.events import EVENT\nimport time\nfrom rqalpha.environment import Environment\nfrom rqalpha.model.instrument import Instrument\nfrom .util import _convert_from_ctype\n\nclass RealtimeDataSource(AbstractDataSource):\n def __init__(self, db_info, server_info):\n mongo_cli = pmg.MongoClient(db_info.host)\n if db_info.user and db_info.pwd:\n admin_db = mongo_cli.get_database('admin')\n admin_db.authenticate(db_info.user, db_info.pwd)\n self._db = mongo_cli.get_database(db_info.dbname)\n self._col = self._db.get_collection('realtime_future_1min_')\n self._col.create_index([('datetime', pmg.DESCENDING), ('code', pmg.ASCENDING)], unique=True)\n self._col.create_index([('code', pmg.ASCENDING)])\n self.bar_trigger_thread = Thread(target=self.trigger_bar_from_server, args=(server_info.host, server_info.port))\n self.bar_trigger_thread.setDaemon(True)\n\n def trigger_bar_from_server(self, host, port):\n import zmq\n ctx = zmq.Context()\n self.trigger_socket = ctx.socket(zmq.SUB)\n self.trigger_socket.set_string(zmq.SUBSCRIBE, '')\n self.trigger_socket.setsockopt(zmq.RCVTIMEO, 5000)\n addr = f'tcp://{host}:{port}'\n self.trigger_socket.connect(addr)\n env = Environment.get_instance()\n event_queue = env.event_source.event_queue\n while True:\n try:\n d = self.trigger_socket.recv_pyobj()\n event_queue.put((d, EVENT.BAR))\n except zmq.ZMQError:\n ...\n\n def get_trading_minutes_for(self, order_book_id, trading_dt):\n raise NotImplementedError\n\n def get_trading_calendar(self):\n Collection = self._db.future_contract_info\n trading_calendar = [pd.Timestamp(td) for td in Collection.distinct('DATE')]\n trading_calendar.sort(key=lambda x: x.timestamp())\n return np.array(trading_calendar)\n\n def get_all_instruments(self):\n con_col = self._db.realtime_future_contract_info\n prod_col = self._db.realtime_future_product_info\n code_list = con_col.distinct('CODE')\n inst_list = []\n\n for c in code_list:\n con_info = con_col.find_one({'CODE': c}, sort=[('DATE', pmg.DESCENDING)])\n prod_info = prod_col.find_one({'CLASS_CODE': con_info['CLASS_CODE']}, sort=[('DATE', pmg.DESCENDING)])\n inst = {\n # 'abbrev_symbol': 'null',\n 'contract_multiplier': con_info['CON_SIZE'],\n 'de_listed_date': con_info['DATE_TO'].strftime('%Y-%m-%d'),\n 'exchange': 'HKEX',\n 'listed_date': con_info['DATE_FROM'].strftime('%Y-%m-%d'),\n 'margin_rate': 0.05,\n 'maturity_date': con_info['EXPIRY_DATE'].strftime('%Y-%m-%d'),\n 'order_book_id': con_info['CODE'],\n 'product': 'Index',\n 'round_lot': 1.0,\n 'settlement_method': 'CashSettlementRequired',\n 'symbol': prod_info['PROD_NAME'],\n # 'trading_unit': '5',\n 'type': 'Future',\n 'underlying_order_book_id': con_info['Filler'],\n 'underlying_symbol': con_info['CLASS_CODE']}\n inst_list.append(Instrument(inst))\n return inst_list\n\n\n # INSTRUMENT_TYPE_MAP = {\n # 'CS': 0,\n # 'INDX': 1,\n # 'Future': 2,\n # 'ETF': 3,\n # 'LOF': 3,\n # 'FenjiA': 3,\n # 'FenjiB': 3,\n # 'FenjiMu': 3,\n # 'PublicFund': 4\n # }\n\n def get_bar(self, instrument, dt, frequency):\n\n if frequency in ['1m', '1min']:\n frequency = '1min'\n order_book_id = instrument.order_book_id\n Collection = self._db.get_collection(f'realtime_future_{frequency}_')\n if frequency in ['1m', '1min']:\n data = Collection.find_one(\n {'code': order_book_id, \"datetime\": dt})\n else:\n data = None\n\n if data is None:\n return {'code': order_book_id, 'datetime': dt.strftime('%Y-%m-%d %H:%M:%S'), 'open': np.nan, 'high': np.nan,\n 'low': np.nan, 'close': np.nan, 'volume': np.nan}\n else:\n data['datetime'] = data['datetime'].strftime('%Y-%m-%d %H:%M:%S')\n return data\n\n def get_settle_price(self, instrument, date):\n order_book_id = instrument.order_book_id\n Collection = self._db.realtime_future_1min_\n _d = dt.datetime(date.year, date.month, date.day, 16, 29)\n data = Collection.find_one({'code': order_book_id, 'datetime': {'$lte': _d}}, ['close'])\n _close = data['close']\n return _close\n\n def history_bars(self, instrument, bar_count, frequency, fields, dt,\n skip_suspended=True, include_now=False,\n adjust_type='pre', adjust_orig=None):\n order_book_id = instrument.order_book_id\n Collection = self._db.get_collection(f'realtime_future_{frequency}_')\n query_type = '$lte' if include_now else '$lt'\n cur = Collection.find({'code': order_book_id, 'datetime':{query_type: dt}}, limit=bar_count, sort=[('datetime', pmg.DESCENDING)])\n data = deque()\n for c in cur:\n c['datetime'] = c['datetime'].timestamp()\n data.appendleft(c)\n\n _d = pd.DataFrame(list(data))\n # _d['datetime'] = _d['datetime'].apply(lambda x: x.timestamp())\n fields = [field for field in fields if field in _d.columns]\n return _d.loc[:, fields].T.as_matrix()\n\n def get_yield_curve(self, start_date, end_date, tenor=None):\n ...\n\n def get_risk_free_rate(self, start_date, end_date):\n return 0.028\n\n def current_snapshot(self, instrument, frequency, dt):\n raise NotImplementedError\n\n def available_data_range(self, frequency):\n if frequency == '1m':\n return (dt.date(2011, 1, 1), dt.date.today() + dt.timedelta(days=1))\n\n def get_margin_info(self, instrument):\n return {\n 'margin_type': MARGIN_TYPE.BY_MONEY,\n 'long_margin_ratio': instrument.margin_rate,\n 'short_margin_ratio': instrument.margin_rate,\n }\n\n def get_commission_info(self, instrument):\n order_book_id = instrument.order_book_id\n if 'HSI' in order_book_id:\n commission_info = {'commission_type': COMMISSION_TYPE.BY_VOLUME, 'open_commission_ratio': 33.54, 'close_commission_ratio': 33.54, 'close_commission_today_ratio': 33.54}\n elif 'MHI' in order_book_id:\n commission_info = {'commission_type': COMMISSION_TYPE.BY_VOLUME, 'open_commission_ratio': 13.6,\n 'close_commission_ratio': 13.6, 'close_commission_today_ratio': 13.6}\n else:\n commission_info = super(RealtimeDataSource, self).get_commission_info(instrument)\n return commission_info\n\n def get_ticks(self, order_book_id, date):\n raise NotImplementedError\n\n"
] |
[
[
"numpy.array",
"pandas.Timestamp"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ryanmdavis/classifyHistology
|
[
"563687250f6d81a7e2596607587238354e7279e5"
] |
[
"train_net/read_and_reshape_data.py"
] |
[
"from tensorflow.examples.tutorials.mnist import input_data\n\ndef readReshapeData(path):\n # extract data\n data = input_data.read_data_sets(path,one_hot=True)\n \n # print info about the datasets\n # Shapes of training set\n print(\"Training set (images) shape: {shape}\".format(shape=data.train.images.shape))\n print(\"Training set (labels) shape: {shape}\".format(shape=data.train.labels.shape))\n \n # Shapes of test set\n print(\"Test set (images) shape: {shape}\".format(shape=data.test.images.shape))\n print(\"Test set (labels) shape: {shape}\".format(shape=data.test.labels.shape))\n \n # Reshape training and testing image\n train_X = data.train.images.reshape(-1, 28, 28, 1)\n test_X = data.test.images.reshape(-1,28,28,1)\n \n # set the correct classes\n train_y = data.train.labels\n test_y = data.test.labels\n \n return train_X,test_X, train_y,test_y"
] |
[
[
"tensorflow.examples.tutorials.mnist.input_data.read_data_sets"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
}
] |
ty-97/SDA
|
[
"047d8e6ed238f77d8a7846cf3ac9916c4f0d8fbc"
] |
[
"test_models.py"
] |
[
"# Code for \"TSM: Temporal Shift Module for Efficient Video Understanding\"\r\n# arXiv:1811.08383\r\n# Ji Lin*, Chuang Gan, Song Han\r\n# {jilin, songhan}@mit.edu, [email protected]\r\n\r\nimport os\r\nimport time\r\nimport shutil\r\nimport torch.nn.parallel\r\nimport torch.backends.cudnn as cudnn\r\nimport torch.optim\r\nfrom torch.nn.utils import clip_grad_norm_\r\nfrom torch.nn import functional as F\r\nfrom sklearn.metrics import confusion_matrix\r\n\r\nfrom ops.dataset import TSNDataSet\r\n# from ops.models import VideoNet\r\nfrom ops.models_test import VideoNet \r\nfrom ops.transforms import *\r\nfrom opts_test import parser\r\nfrom ops import dataset_config\r\nfrom ops.utils import AverageMeter, accuracy\r\n# from ops.temporal_shift import make_temporal_pool\r\n\r\nfrom tensorboardX import SummaryWriter\r\n#os.environ['CUDA_VISIBLE_DEVICES'] = '0,1,2,3'\r\n#os.environ['CUDA_VISIBLE_DEVICES'] = '4,5,6,7'\r\nos.environ['CUDA_VISIBLE_DEVICES'] = '4'\r\ndef eval_video(video_data, net):\r\n net.eval()\r\n with torch.no_grad():\r\n i, data, label = video_data\r\n batch_size = label.numel()\r\n # print(data.size())\r\n # print(label.size())\r\n #+++++++++++++++++\r\n if args.dense_sample:\r\n num_crop = 10*args.test_crops\r\n elif args.twice_sample:\r\n num_crop = 2*args.test_crops\r\n else:\r\n num_crop = 1*args.test_crops\r\n #++++++++++++++++\r\n rst, weights = net(data)\r\n rst = rst.reshape(batch_size, num_crop, -1).mean(1)\r\n #\r\n if args.softmax:\r\n # take the softmax to normalize the output to probability\r\n rst = F.softmax(rst, dim=1)\r\n\r\n rst = rst.data.cpu().numpy().copy()\r\n\r\n return i, rst, label, weights\r\n\r\n\r\ndef main():\r\n global args\r\n args = parser.parse_args()\r\n\r\n num_class, args.train_list, args.val_list, args.root_path, prefix = dataset_config.return_dataset(args.dataset,\r\n args.modality)\r\n\r\n assert args.modality == 'RGB'\r\n if args.test_list:\r\n test_list = args.test_list\r\n else:\r\n test_list = args.val_list\r\n\r\n # ==== get test args ====\r\n test_weights_list = args.test_weights.split(',')\r\n test_nets_list = args.test_nets.split(',')\r\n test_segments_list = [int(s) for s in args.test_segments.split(',')]\r\n assert len(test_nets_list) == len(test_segments_list)\r\n # test_cdivs_list = [int(s) for s in args.test_cdivs.split(',')]\r\n # =======================\r\n data_iter_list = []\r\n net_list = []\r\n\r\n scale_size = 256\r\n crop_size = 256 if args.full_res else 224 # 224 or 256 (scale_size)\r\n if args.test_crops == 1:\r\n cropping = torchvision.transforms.Compose([\r\n GroupScale(scale_size),\r\n GroupCenterCrop(crop_size),\r\n ])\r\n elif args.test_crops == 3: # do not flip, so only 5 crops\r\n cropping = torchvision.transforms.Compose([\r\n GroupFullResSample(crop_size, scale_size, flip=False)\r\n ])\r\n elif args.test_crops == 5: # do not flip, so only 5 crops\r\n cropping = torchvision.transforms.Compose([\r\n GroupOverSample(crop_size, scale_size, flip=False)\r\n ])\r\n elif args.test_crops == 10:\r\n cropping = torchvision.transforms.Compose([\r\n GroupOverSample(crop_size, scale_size)\r\n ])\r\n else:\r\n raise ValueError(\"Only 1, 5, 10 crops are supported while we got {}\".format(args.test_crops))\r\n\r\n test_log = 'test_logs_256'\r\n if not os.path.exists(test_log):\r\n os.mkdir(test_log)\r\n\r\n log_path = './{}/log_{}_{}_{}_a{}_b{}_seg{}_{}.txt'.format(test_log, args.arch, args.dataset, \"-\".join(test_nets_list), \\\r\n \"-\".join(str(a) for a in test_segments_list), \\\r\n crop_size)\r\n\r\n for this_net, this_segment, this_weight in zip(test_nets_list, test_segments_list, test_weights_list):\r\n \r\n model = VideoNet(num_class, this_segment, args.modality,\r\n backbone=args.arch, net=this_net,\r\n consensus_type=args.consensus_type,\r\n element_filter=args.element_filter,\r\n cdiv=args.cdiv)\r\n\r\n \r\n\r\n # weights_path = \"./checkpoints/%s/%s_%s_c%d_s%d.pth\"%(args.dataset, args.model, this_net, this_cdiv, this_segment)\r\n print(this_weight)\r\n if not os.path.exists(this_weight):\r\n raise ValueError('the checkpoint file doesnot exist: %s'%this_weight)\r\n\r\n checkpoint = torch.load(this_weight)\r\n print(checkpoint['best_prec1'])\r\n checkpoint_sd = checkpoint['state_dict']\r\n #print(checkpoint_sd.keys())\r\n \r\n base_dict = {'.'.join(k.split('.')[1:]): v for k, v in list(checkpoint_sd.items())}\r\n for ks in list(base_dict.keys()):\r\n if ks.split('.')[-1] in ['total_params','total_ops']:\r\n base_dict.pop(ks)\r\n #print(ks)\r\n model.load_state_dict(base_dict)\r\n\r\n # crop_size = model.scale_size if args.full_res else model.input_size # 224 or 256 (scale_size)\r\n # scale_size = model.scale_size # 256\r\n input_mean = model.input_mean\r\n input_std = model.input_std\r\n\r\n # Data loading code\r\n if args.modality != 'RGBDiff':\r\n normalize = GroupNormalize(input_mean, input_std)\r\n else:\r\n normalize = IdentityTransform()\r\n\r\n if args.modality == 'RGB':\r\n data_length = 1\r\n elif args.modality in ['Flow', 'RGBDiff']:\r\n data_length = 5\r\n\r\n # print('----Validation----')\r\n print('batch size', args.batch_size)\r\n\r\n test_loader = torch.utils.data.DataLoader(\r\n TSNDataSet(args.root_path, test_list, num_segments=this_segment,\r\n new_length=data_length,\r\n modality=args.modality,\r\n image_tmpl=prefix,\r\n test_mode=True,\r\n random_shift=False,\r\n transform=torchvision.transforms.Compose([\r\n \t cropping,\r\n GroupScale(224), \r\n # GroupScale(int(scale_size)),\r\n \r\n #GroupScale(256),\r\n #GroupCenterCrop(224),\r\n Stack(roll=(args.arch in ['BNInception', 'InceptionV3'])),\r\n ToTorchFormatTensor(div=(args.arch not in ['BNInception', 'InceptionV3'])),\r\n normalize,\r\n ]), dense_sample=args.dense_sample, twice_sample=args.twice_sample),\r\n batch_size=args.batch_size, shuffle=False,\r\n num_workers=args.workers, pin_memory=True)\r\n\r\n \r\n\r\n #\r\n total_num = len(test_loader.dataset)\r\n print('total test number:', total_num)\r\n #\r\n #model = torch.nn.DataParallel(model).cuda()\r\n model.eval()\r\n\r\n net_list.append(model)\r\n\r\n data_gen = enumerate(test_loader)\r\n data_iter_list.append(data_gen)\r\n #\r\n top1 = AverageMeter()\r\n top5 = AverageMeter()\r\n batch_times = AverageMeter()\r\n\r\n #\r\n proc_start_time = time.time()\r\n\r\n output = []\r\n fw = open(log_path, 'w')\r\n weights_data = np.zeros((num_class, 4, 4))\r\n for i, data_label_pairs in enumerate(zip(*data_iter_list)):\r\n with torch.no_grad():\r\n this_rst_list = []\r\n \r\n this_label = None\r\n # end = time.time()\r\n weight_data = []\r\n for (_, (data, label)), net in zip(data_label_pairs, net_list):\r\n end = time.time()\r\n rst = eval_video((i, data, label), net)\r\n batch_times.update(time.time()-end, label.size(0))\r\n this_rst_list.append(rst[1])\r\n weight_data = rst[3] #bsz, 4, num_blocks, 4\r\n this_label = label\r\n # assert len(this_rst_list) == len(coeff_list)\r\n # for i_coeff in range(len(this_rst_list)):\r\n # this_rst_list[i_coeff] *= coeff_list[i_coeff]\r\n ensembled_predict = sum(this_rst_list) / len(this_rst_list)\r\n\r\n for p, g in zip(ensembled_predict, this_label.cpu().numpy()):\r\n output.append([p[None, ...], g])\r\n\r\n for j in range(len(weight_data)):\r\n weight_data[j] = sum(weight_data[j]).cpu().numpy()\r\n \r\n weight_data = np.array(weight_data) # 4 bsz 4 \r\n \r\n \r\n weight_data = weight_data.transpose(1,0,2) # bsz 4 4\r\n #print(weight_data.shape)\r\n\r\n for weight, l in zip(weight_data, this_label.cpu().numpy()): # 4, num_blocks, 4\r\n weights_data[l] = weights_data[l] + weight\r\n \r\n cnt_time = time.time() - proc_start_time\r\n prec1, prec5 = accuracy(torch.from_numpy(ensembled_predict), this_label, topk=(1, 5))\r\n top1.update(prec1.item(), this_label.numel())\r\n top5.update(prec5.item(), this_label.numel())\r\n if i % 20 == 0:\r\n txt = 'video {} done, total {}/{}, average {:.3f} sec/video, moving Prec@1 {:.3f} Prec@5 {:.3f}'.format(i * args.batch_size, i * args.batch_size, total_num,\r\n float(cnt_time) / (i+1) / args.batch_size, top1.avg, top5.avg)\r\n print(txt)\r\n fw.write(txt+'\\n')\r\n fw.flush()\r\n\r\n # fw.close()\r\n\r\n print('avg computing time', batch_times.avg)\r\n video_pred = [np.argmax(x[0]) for x in output]\r\n video_pred_top5 = [np.argsort(np.mean(x[0], axis=0).reshape(-1))[::-1][:5] for x in output]\r\n\r\n video_labels = [x[1] for x in output]\r\n\r\n cf = confusion_matrix(video_labels, video_pred).astype(float)\r\n\r\n # np.save('cm.npy', cf)\r\n cls_cnt = cf.sum(axis=1)\r\n cls_hit = np.diag(cf)\r\n\r\n cls_acc = cls_hit / cls_cnt\r\n print(cls_acc*100)\r\n # upper = np.mean(np.max(cf, axis=1) / cls_cnt)\r\n # print('upper bound: {}'.format(upper))\r\n cls_acc_avg = np.sum(cls_acc*cls_cnt)/cls_cnt.sum()\r\n print(cls_acc_avg)\r\n weights_data = weights_data/np.expand_dims(np.expand_dims(cls_cnt,-1).repeat(4,axis=-1),-1).repeat(4,axis=-1)\r\n\r\n import csv\r\n with open(args.test_nets+'_cls_acc.csv','w') as f:\r\n f_csv = csv.writer(f)\r\n f_csv.writerow(cls_acc)\r\n # with open('cls_count.csv','w') as f:\r\n # f_csv = csv.writer(f)\r\n # f_csv.writerow(cls_cnt.tolist())\r\n # return 0\r\n # with open('cls_weight_layer1.csv','w') as f:\r\n # f_csv = csv.writer(f)\r\n # f_csv.writerows((weights_data[:,0,:]/3).tolist())\r\n # with open('cls_weight_layer2.csv','w') as f:\r\n # f_csv = csv.writer(f)\r\n # f_csv.writerows((weights_data[:,1,:]/4).tolist())\r\n # with open('cls_weight_layer3.csv','w') as f:\r\n # f_csv = csv.writer(f)\r\n # f_csv.writerows((weights_data[:,2,:]/6).tolist())\r\n # with open('cls_weight_layer4.csv','w') as f:\r\n # f_csv = csv.writer(f)\r\n # f_csv.writerows((weights_data[:,3,:]/3).tolist())\r\n \r\n\r\n\r\n print('-----Evaluation is finished------')\r\n print('Class Accuracy {:.02f}%'.format(cls_acc_avg*100))\r\n txt = 'Overall Prec@1 {:.02f}% Prec@5 {:.02f}%'.format(top1.avg, top5.avg)\r\n fw.write(txt)\r\n fw.close()\r\n print(txt)\r\n\r\n\r\nif __name__ == '__main__':\r\n main()"
] |
[
[
"torch.nn.functional.softmax",
"sklearn.metrics.confusion_matrix"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
1512474508/deep-generative-models
|
[
"67d65d63f9f67050c29ae500bdd6b4518da14f7c",
"67d65d63f9f67050c29ae500bdd6b4518da14f7c"
] |
[
"src/train_DBM.py",
"src/train_CVAE.py"
] |
[
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport os\nimport pickle\nimport datetime\nimport argparse\nimport re\nimport glob\nfrom obj.DBM import DBM\nimport tensorflow as tf\nimport numpy as np\nimport matplotlib.image as mpimg\nfrom skimage.transform import resize\n\n################################\n# train DBM from input data\n################################\n\ndef trainDBM(data, learning_rate, k1, k2, epochs, batch_size, dims):\n # import data\n print(\"importing training data\")\n if data == \"fashion_mnist\":\n fashion_mnist = tf.keras.datasets.fashion_mnist\n (x_train, _), (_,_) = fashion_mnist.load_data()\n elif data == \"mnist\":\n mnist = tf.keras.datasets.mnist\n (x_train, _), (_,_) = mnist.load_data()\n elif data == \"faces\":\n x_train = [resize(mpimg.imread(file),(28,28)) for file in glob.glob(\"data/faces/*\")]\n x_train = np.asarray(x_train)\n # make images sparse for easier distinctions\n for img in x_train:\n img[img < np.mean(img)+0.5*np.std(img)] = 0\n else:\n raise NameError(\"unknown data type: %s\" % data)\n if data == \"mnist\" or data == \"fashion_mnist\":\n x_train = x_train/255.0\n x_train = [tf.cast(tf.reshape(x,shape=(784,1)),\"float32\") for x in x_train]\n elif data == \"faces\":\n # auto conversion to probabilities in earlier step\n x_train = [tf.cast(tf.reshape(x,shape=(784,1)),\"float32\") for x in x_train]\n # create log directory\n current_time = getCurrentTime()+\"_\"+re.sub(\",\",\"_\",dims)+\"_\"+data+\"_dbm\"\n os.makedirs(\"pickles/\"+current_time)\n # parse string input into integer list\n dims = [int(el) for el in dims.split(\",\")]\n dbm = DBM(dims, learning_rate, k1, k2, epochs, batch_size)\n dbm.train_PCD(x_train)\n # dump dbm pickle\n f = open(\"pickles/\"+current_time+\"/dbm.pickle\", \"wb\")\n pickle.dump(dbm, f, protocol=pickle.HIGHEST_PROTOCOL)\n f.close()\n\ndef getCurrentTime():\n return datetime.datetime.now().strftime(\"%Y_%m_%d_%H_%M_%S\")\n\n####################################\n# main command call\n####################################\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--data\", type=str, default=\"mnist\",\n help=\"data source to train DBM, possibilities are 'mnist', 'fashion_mnist' and 'faces' <default: 'mnist'>\")\n parser.add_argument(\"--learning-rate\", type=float, default=0.01,\n help=\"learning rate for stacked RBMs <default: 0.01>\")\n parser.add_argument(\"--k1\", type=int, default=1,\n help=\"number of Gibbs-sampling steps pre-PCD-k algorithm <default: 1>\")\n parser.add_argument(\"--k2\", type=int, default=5,\n help=\"number of Gibbs-sampling steps during PCD-k algorithm <default: 5>\")\n parser.add_argument(\"--epochs\", type=int, default=1,\n help=\"number of overall training data passes for each RBM <default: 1>\")\n parser.add_argument(\"--batch-size\", type=int, default=5,\n help=\"size of training data batches <default: 5>\")\n requiredNamed = parser.add_argument_group('required named arguments')\n requiredNamed.add_argument('-d', '--dimensions', type=str, \n help=\"consecutive enumeration of visible and hidden layers separated by a comma character, eg. 784,500,784,500\", \n required=True)\n args = parser.parse_args()\n # train DBM based on parameters\n trainDBM(args.data,args.learning_rate,args.k1,args.k2,args.epochs,args.batch_size,args.dimensions)\n",
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport os\nimport datetime\nimport argparse\nimport re\nimport csv\nimport tensorflow as tf\nimport numpy as np\nimport glob\nfrom skimage.transform import resize\nimport matplotlib.image as mpimg\nfrom obj.CVAE import CVAE\n\n################################\n# train CVAE from input data\n################################\n\ndef trainCVAE(data,learning_rate,epochs,batch_size,im_dim,num_filters,latent_dimensions):\n # import data\n print(\"importing training data\")\n if data == \"fashion_mnist\":\n (train_images, _), (_, _) = tf.keras.datasets.fashion_mnist.load_data()\n elif data == \"mnist\":\n (train_images, _), (_, _) = tf.keras.datasets.mnist.load_data()\n elif data == \"faces\":\n train_images = [resize(mpimg.imread(file),(28,28)) for file in glob.glob(\"./data/faces/*\")]\n train_images = np.asarray(train_images,dtype=\"float32\")\n train_images = train_images.reshape(train_images.shape[0], 28, 28, 1).astype('float32')\n train_images[train_images >= np.mean(train_images)+0.5*np.std(train_images)] = 1.\n train_images[train_images != 1] = 0.\n else:\n raise NameError(\"unknown data type: %s\" % data)\n if data == \"mnist\" or data == \"fashion_mnist\":\n train_images = train_images.reshape(train_images.shape[0], 28, 28, 1).astype('float32')\n train_images /= 255.\n train_images[train_images >= .5] = 1.\n train_images[train_images < .5] = 0.\n # create log directory\n current_time = getCurrentTime()+\"_\"+re.sub(\",\",\"_\",str(latent_dimensions))+\"_\"+data+\"_cvae\"\n os.makedirs(\"pickles/\"+current_time)\n # create model\n model = CVAE(latent_dim=latent_dimensions, epochs = epochs, batch_size = batch_size, \n learning_rate = learning_rate, im_dim = im_dim, n_filters = num_filters)\n model.train(train_images)\n # save model\n model.save_weights(\"pickles/\"+current_time+\"/cvae\")\n csvfile = open('pickles/'+ current_time + '/' + 'log.csv', 'w')\n fieldnames = [\"data\", \"learning_rate\", \"epochs\", \"batch_size\", \"im_dim\", \"num_filters\", \"latent_dimensions\"]\n writer = csv.DictWriter(csvfile, fieldnames=fieldnames)\n writer.writeheader()\n writer.writerow({\"data\":data, \"learning_rate\":learning_rate, \"epochs\":epochs, \n \"batch_size\":batch_size, \"im_dim\":im_dim, \"num_filters\":num_filters, \n \"latent_dimensions\":latent_dimensions})\n csvfile.close()\n\ndef getCurrentTime():\n return datetime.datetime.now().strftime(\"%Y_%m_%d_%H_%M_%S\")\n\n####################################\n# main command call\n####################################\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--data\", type=str, default=\"mnist\",\n help=\"data source to train CVAE, possibilities are 'mnist', 'fashion_mnist' and 'faces' <default: 'mnist'>\")\n parser.add_argument(\"--learning-rate\", type=float, default=0.001,\n help=\"learning rate, <default: 0.001>\")\n parser.add_argument(\"--epochs\", type=int, default=5,\n help=\"number of epochs for training <default: 5>\")\n parser.add_argument(\"--batch-size\", type=int, default=50,\n help=\"size of training data batches <default: 50>\")\n parser.add_argument(\"--im-dim\", type=int, default=28,\n help=\"square dimensionality of input images <default: 28>\")\n parser.add_argument(\"--num-filters\", type=int, default=32,\n help=\"number of filters to be used in convolutional layers <default: 32>\")\n requiredNamed = parser.add_argument_group('required named arguments')\n requiredNamed.add_argument('-l', '--latent-dimensions', type=int,\n help=\"number of central latent dimensions in CVAE, 2 dimensions are recommended for quick manifold visualization\", \n required=True)\n args = parser.parse_args()\n # train CVAE based on parameters\n trainCVAE(args.data,args.learning_rate,args.epochs,args.batch_size,args.im_dim,args.num_filters,args.latent_dimensions)\n"
] |
[
[
"numpy.asarray",
"tensorflow.reshape",
"matplotlib.image.imread",
"numpy.std",
"numpy.mean"
],
[
"tensorflow.keras.datasets.fashion_mnist.load_data",
"numpy.asarray",
"tensorflow.keras.datasets.mnist.load_data",
"matplotlib.image.imread",
"numpy.std",
"numpy.mean"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"1.12",
"2.6",
"2.2",
"1.13",
"2.3",
"2.4",
"1.4",
"2.9",
"1.5",
"1.7",
"2.5",
"0.12",
"1.0",
"2.8",
"1.2",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"2.6",
"2.4",
"2.3",
"2.5",
"2.2"
]
}
] |
deecamp2019-group20/CNN_PokerNet
|
[
"751576cb941be57c8a37656feaff14b414c3dcb2"
] |
[
"game/engine.py"
] |
[
"# -*- coding: utf-8 -*-\n\"\"\"\n自定义相关类\n\"\"\"\nimport numpy as np\nfrom typing import List, Tuple, Dict\nimport pandas as pd\nfrom collections import defaultdict\nfrom os.path import join, abspath, dirname\nfrom .card_util import All as backup, cache\nfrom .gameutil import card_show\nfrom copy import copy\nfrom .r import get_moves\n\n############################################\n# 游戏类 #\n############################################\nclass GameState():\n def __init__(self):\n self.hand = None\n self.out = None\n self.up_out = None\n self.down_out = None\n self.self_out = None\n self.other_hand = None\n self.last_move = [0]*15 # 上一个有效出牌,全零表示主动权\n self.last_pid = -1 # 上一个有效出牌的玩家编号,-1表示主动权\n self.last_move_ = np.zeros(15, dtype=int) # 上一个出牌,不管有效与否\n self.last_last_move_ = np.zeros(15, dtype=int) # 上上个出牌,不管有效与否\n\nclass Game(object):\n def __init__(self, agents: List['Agent']):\n # 初始化players\n self.players = agents\n for p in agents:\n p.game = self\n self.game_reset()\n\n def get_state(self)->GameState:\n state = GameState()\n state.hand = self.players[self.index].get_hand_card().copy()\n tmp, state.out = Card.vectorized_card_out(self.cards_out, len(self.players))\n state.up_out = tmp[self.get_up_index()]\n state.down_out = tmp[self.get_down_index()]\n state.self_out = tmp[self.index]\n state.other_hand = (np.array([4]*13+[1,1]) - state.hand - state.out).tolist()\n state.last_move = self.last_move.copy()\n state.last_pid = self.last_pid\n if len(self.cards_out)>=1:\n self.last_move_ = self.cards_out[-1][-1].copy()\n if len(self.cards_out)>=2:\n self.last_last_move_ = self.cards_out[-2][-1].copy()\n return state\n\n def get_up_index(self):\n return len(self.players)-1 if self.index==0 else self.index-1\n \n def get_down_index(self):\n return 0 if self.index==len(self.players)-1 else self.index+1\n\n # 游戏环境重置\n def game_reset(self):\n #初始化一副扑克牌类\n cards = Card.init_card_suit()\n #洗牌\n np.random.shuffle(cards)\n #发牌并排序\n self.mingpai = cards[:3]\n p1_cards = cards[:20]\n p1_cards.sort(key=lambda x: x.rank)\n p2_cards = cards[20:37]\n p2_cards.sort(key=lambda x: x.rank)\n p3_cards = cards[37:]\n p3_cards.sort(key=lambda x: x.rank)\n self.players[0].set_hand_card( p1_cards )\n self.players[1].set_hand_card( p2_cards )\n self.players[2].set_hand_card( p3_cards )\n self.cards_out = []\n\n #play相关参数\n self.end = False # 游戏是否结束\n self.last_move = [0]*15\n self.last_pid = -1\n self.playround = 1 # 回合数\n self.index = 0 # 当前玩家的id,0代表地主,1代表地主下家,2代表地主上家\n self.yaobuqis = []\n return self.players[0].get_hand_card(),\\\n self.players[1].get_hand_card(),\\\n self.players[2].get_hand_card(),\\\n Card.vectorized_card_list(self.mingpai)\n\n \n #游戏进行 \n def step(self):\n player = self.players[self.index]\n state = self.get_state()\n state, cur_moves, cur_move, self.end, info = player.step(state) #返回:在状态state下,当前玩家的出牌列表、游戏是否结束、choose自定义返回值\n if sum(cur_move)==0:\n self.yaobuqis.append(self.index)\n #都要不起\n if len(self.yaobuqis) == len(self.players)-1:\n self.yaobuqis = []\n self.last_move = [0]*15\n self.last_pid = -1\n else:\n self.yaobuqis = []\n self.last_move = cur_move\n self.last_pid = self.index\n\n winner = -1\n if self.end:\n winner = self.index\n\n self.index = self.index + 1\n #一轮结束\n if self.index >= len(self.players):\n self.playround = self.playround + 1\n self.index = 0\n \n return player.player_id, state, cur_moves, cur_move, winner, info\n\n def show(self):\n for i in range(len(self.players)):\n card_show(self.players[i].get_hand_card(), \"Player {}\".format(i), 1)\n\n############################################\n# 扑克牌相关类 #\n############################################\n\nclass Card(object):\n \"\"\"\n 扑克牌类\n \"\"\"\n color_show = {}\n #color_show = {'a': '♠', 'b':'♥', 'c':'♣', 'd':'♦'}\n name_show = {'11':'J', '12':'Q', '13':'K', '14':'B', '15':'R'}\n name_to_rank = {'3':1, '4':2, '5':3, \\\n '6':4, '7':5, '8':6, '9':7, '10':8, '11':9, '12':10, '13':11, \\\n '1':12, '2':13, '14':14, '15':15}\n all_card_type = ['1-a', '1-b','1-c','1-d',\n '2-a', '2-b','2-c','2-d',\n '3-a', '3-b','3-c','3-d',\n '4-a', '4-b','4-c','4-d',\n '5-a', '5-b','5-c','5-d',\n '6-a', '6-b','6-c','6-d',\n '7-a', '7-b','7-c','7-d',\n '8-a', '8-b','8-c','8-d',\n '9-a', '9-b','9-c','9-d',\n '10-a', '10-b','10-c','10-d',\n '11-a', '11-b','11-c','11-d',\n '12-a', '12-b','12-c','12-d',\n '13-a', '13-b','13-c','13-d',\n '14-a', '15-a']\n\n all_card_name = [str(i) for i in range(3, 14)] + ['1', '2', '14', '15']\n\n @staticmethod\n def visual_card(cards):\n c = []\n for i, n in enumerate(Card.all_card_name):\n c.extend([n]*cards[i])\n return c\n\n @staticmethod\n def vectorized_card_list(cards: List):\n v = [0] * len(Card.all_card_name)\n for c in cards:\n if isinstance(c, int):\n i = Card.name_to_rank[str(c)]-1\n elif isinstance(c, str):\n i = Card.name_to_rank[c]-1\n elif isinstance(c, Card):\n i = c.rank-1\n else:\n print(\"Warn: Unkown card.\")\n v[ i ]+=1\n return v\n\n @staticmethod\n def vectorized_card_out(cards_out: List[Tuple[int, np.array]], total_player=3):\n cnt = {}\n for rec in cards_out:\n a = cnt.get(rec[0], np.zeros( 15, dtype=int )) # 15\n b = np.array( rec[-1], dtype=int )\n cnt[rec[0]] = a+b\n a = np.zeros( 15, dtype=int )\n for v in cnt.values():\n a+=v\n res = []\n for i in range(total_player):\n res.append(cnt.get(i, np.zeros( 15, dtype=int )).tolist())\n return res, a.tolist()\n\n @staticmethod\n def init_card_suit():\n cards = []\n for card_type in Card.all_card_type:\n cards.append(Card(card_type))\n return cards\n\n\n def __init__(self, card_type):\n self.card_type = card_type # '牌面数字-花色' 举例来说,红桃A的card_type为'1-a'\n self.name = self.card_type.split('-')[0] # 名称,即牌面数字\n self.color = self.card_type.split('-')[1] # 花色\n # 大小\n self.rank = Card.name_to_rank[self.name]\n\n\n def __str__(self):\n return Card.name_show.get(self.name, self.name)\n #return Card.name_show.get(self.name, self.name) + Card.color_show.get(self.color, self.color)\n \n __repr__ = __str__\n \ndef get_move_desc(move: List[int]):\n \"\"\"\n 输入出牌, 返回牌型描述:总张数,主牌rank,类型\n move: 长度为15的数组,元素表示3/4/5/...15出多少张。全零表示不要。\n \"\"\"\n lst = []\n for i, n in enumerate(Card.all_card_name):\n lst.extend([int(n)]*move[i])\n key = str(sorted(lst))\n return cache[key]\n\ndef group_by_type(moves: List[Dict]):\n \"\"\"\n 输入moves, 返回按牌型分组的描述。\n 返回值:\n { 'type1': [(move1, desc1), ...], ... }\n move1 是一个15的列表,desc1是namedtuple,可用属性:sum/type/main/kicker\n \"\"\"\n res = defaultdict(list)\n for m in moves:\n desc = get_move_desc(m)\n res[desc.type].append( (m, desc) )\n return res\n\n############################################\n# 玩家相关类 #\n############################################\nclass Agent(object):\n \"\"\"\n 玩家类,所有模型都应继承此类并重写choose方法\n \"\"\"\n def __init__(self, player_id):\n self.player_id = player_id # 0代表地主,1代表地主下家,2代表地主上家\n self.__cards_left = np.zeros(15, dtype=int) # 表示3/4/5.../15有多少张\n self.game = None\n self.state = None # 当前游戏状态\n\n def set_hand_card(self, cards):\n self.__cards_left = np.zeros(15, dtype=int) # 表示3/4/5.../15有多少张\n for c in cards:\n self.__cards_left[c.rank-1]+=1\n\n def get_hand_card(self):\n return self.__cards_left\n\n def get_public_card(self):\n public_cards = self.game.mingpai\n v = np.zeros(15, dtype=int)\n for c in public_cards:\n if isinstance(c, int):\n i = Card.name_to_rank[str(c)]-1\n elif isinstance(c, str):\n i = Card.name_to_rank[c]-1\n elif isinstance(c, Card):\n # Card.rank starts from 1\n i = c.rank-1\n else:\n print(\"Warn: Unkown card.\")\n v[ i ]+=1\n return v\n\n def get_moves(self):\n '''\n 根据前面玩家的出牌来选牌,返回下一步所有合法出牌。\n '''\n moves = get_moves(self.__cards_left, self.game.last_move)\n return moves\n \n # 模型选择如何出牌\n def choose(self, state: GameState) -> Tuple[List[int], object]:\n return [], None\n\n # 进行一步之后的公共操作\n def __common_step(self, move):\n #移除出掉的牌; 记录\n try:\n assert( np.all(self.__cards_left>=move) )\n assert( np.all(self.__cards_left[:-2]<=4) and np.all(self.__cards_left[-2:])<=1 )\n except AssertionError:\n print(\"手牌:\", self.__cards_left)\n print(\"出牌:\", move)\n raise AssertionError()\n self.__cards_left -= move\n self.game.cards_out.append( (self.player_id, move) )\n\n #是否牌局结束\n end = False\n if self.__cards_left.sum() == 0:\n end = True\n return end\n\n # 出牌\n def step(self, state):\n self.move_list = self.get_moves() # 可在self.choose里使用\n move, info = self.choose(state)\n end = self.__common_step(move)\n return state, self.move_list, move, end, info\n\n def observation(self):\n return self.game.get_state(), self.get_moves()\n\n\n"
] |
[
[
"numpy.all",
"numpy.array",
"numpy.zeros",
"numpy.random.shuffle"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
marcelooyaneder/Arboretum_Antumapu
|
[
"bc1d850ea0c6d45368b3bdb8b834b05dd49f9a57"
] |
[
"main.py"
] |
[
"# -*- coding: utf-8 -*-\r\n\r\n\"\"\" # CRÉDITOS\r\nSoftware desarrllado en el laboratorio de biología de plantas ubicado en el campus Antumapu perteneciente a la Universidad de Chile.\r\n - Autores: \r\n - Paulette Naulin Gysling. \r\n - Marcelo Oyaneder Labarca.\r\n - Contacto:\r\n - [email protected]\r\n - [email protected] \"\"\"\r\n\r\n#package imports\r\n\r\nimport pandas as pd\r\nimport os\r\nimport errno\r\nimport pyqrcode\r\nfrom pathlib import Path\r\nimport filecmp\r\nimport shutil\r\nfrom python_firebase_url_shortener.url_shortener import UrlShortener\r\nimport time\r\nimport sys\r\nimport easygui as eg\r\nimport numpy \r\nfrom PIL import Image\r\n\r\n#autoidenficar el separator en csv ; o ,\r\nclass file_manager:\r\n def file_opener(self):\r\n #search if a csv file has been created previusly \r\n try:\r\n data=pd.read_csv('dataframe.csv',header=0,sep=';') #ver como variar de ; o ,\r\n except:\r\n file_path=eg.fileopenbox(msg='pick the file wish contain your data',title='directory',default='*',filetypes=None,multiple=False)\r\n if file_path.endswith('.xlsx') or file_path.endswith('.xls'):\r\n data=pd.read_excel(file_path,sheet_name='Hoja1',header=0)\r\n elif file_path.endswith('.csv'):\r\n data=pd.read_csv(file_path,header=0,sep=';') #ver como variar de ; o ,\r\n columns_df=data.columns.tolist()\r\n msg='select a column to be the index of the dataframe'\r\n title='select index' \r\n indexo=eg.choicebox(msg,title,columns_df)\r\n data=data.set_index(indexo, drop = True)\r\n og_data=data.copy()\r\n og_columns_df=og_data.columns.tolist()\r\n columns_dwc=pd.read_csv('documents\\dwc_terms\\simple_dwc_horizontal.csv',header=0,sep=';').columns.tolist() #ver como variar de ; o , \r\n columns_difference=list(set(columns_df)-set(columns_dwc))\r\n if not columns_difference:\r\n pass\r\n else:\r\n msg='the followings columns do not belong to DwC, select the ones you wish to delete'\r\n title='select to delete' \r\n choicebox=eg.multchoicebox(msg,title,columns_difference)\r\n try:\r\n for label in choicebox:\r\n data.drop(label,axis=1,inplace=True)\r\n except:\r\n pass\r\n empty_columns_drop_answer=eg.ynbox(msg='Do you wish to delete the empty columns?...',title='Drop empty columns') #a way to drop fully empty columns\r\n if empty_columns_drop_answer==True:\r\n data.dropna(axis=1, how='all',inplace=True)\r\n og_data.dropna(axis=1, how='all',inplace=True)\r\n og_data.to_csv('online_dataframe.csv',sep=',')\r\n else:\r\n pass\r\n return og_data,data,indexo,og_columns_df\r\n \r\n def file_creation(self):\r\n Record_level=pd.read_csv('documents\\dwc_terms\\Record_level.csv',header=0,sep=';',encoding = 'unicode_escape')\r\n Ocurrence=pd.read_csv('documents\\dwc_terms\\Ocurrence.csv',header=0,sep=';',encoding = 'unicode_escape')\r\n Organism=pd.read_csv('documents\\dwc_terms\\organism.csv',header=0,sep=';',encoding = 'unicode_escape')\r\n Material_sample=pd.read_csv('documents\\dwc_terms\\MaterialSample.csv',header=0,sep=';',encoding = 'unicode_escape')\r\n Event=pd.read_csv('documents\\dwc_terms\\event.csv',header=0,sep=';',encoding = 'unicode_escape')\r\n Location=pd.read_csv('documents\\dwc_terms\\location.csv',header=0,sep=';',encoding = 'unicode_escape')\r\n Geological_Context=pd.read_csv('documents\\dwc_terms\\GeologicalContext.csv',header=0,sep=';',encoding = 'unicode_escape')\r\n Identification=pd.read_csv('documents\\dwc_terms\\identification.csv',header=0,sep=';',encoding = 'unicode_escape')\r\n Taxon=pd.read_csv('documents\\dwc_terms\\Taxon.csv',header=0,sep=';',encoding = 'unicode_escape')\r\n columns_dwc=[Record_level,Ocurrence,Organism,Material_sample,Event,Location,Geological_Context,Identification,Taxon]\r\n dwc_columns=[]\r\n for dataframe in columns_dwc:\r\n level_list=[]\r\n for rows in dataframe.itertuples():\r\n # Create list for the current row \r\n my_list =f'{rows.standardFieldName}-{rows.verbatimFieldName}-{rows.uri}' \r\n # append the list to the final list \r\n level_list.append(my_list)\r\n msg='select the terms for your custom dwc dataframe'\r\n title='select terms' \r\n choicebox=eg.multchoicebox(msg,title,level_list)\r\n try:\r\n for elements in choicebox:\r\n try:\r\n indice=level_list.index(elements)\r\n value=dataframe['standardFieldName'][indice]\r\n dwc_columns.append(value)\r\n except:\r\n pass\r\n except:\r\n pass\r\n dataframe=pd.DataFrame(columns=dwc_columns)\r\n return dataframe\r\n\r\nclass subject:\r\n def __init__(self,data):\r\n self.data=data\r\n\r\n def datafiltering(self,data):\r\n columns_df=data.columns.tolist()\r\n msg='select a value to query'\r\n title='select'\r\n choicebox=eg.choicebox(msg,title,columns_df)\r\n querys=data[choicebox].unique()\r\n query_choicebox=eg.choicebox(msg,title,querys)\r\n data.query(f\"{choicebox}=='{query_choicebox}'\",inplace=True) \r\n return data\r\n \r\n def datafiltering_predef(self,data,column):\r\n msg='select a value to query'\r\n title='select'\r\n querys=data[column].unique()\r\n query_choicebox=eg.choicebox(msg,title,querys)\r\n data.query(f\"{column}=='{query_choicebox}'\",inplace=True) \r\n return data\r\n\r\n def change_values(self,data,og_data,subjects): \r\n IDs_for_change=eg.multchoicebox(msg='Select the subject(s) for a change: ',title='Select...',choices=subjects) \r\n columns=data.columns.tolist()\r\n new_value_change=True\r\n while new_value_change==True:\r\n values_to_change=eg.choicebox(msg='The following values are available for change: ',title='Select...',choices=columns)\r\n set_value=eg.enterbox(msg=f'Enter a new value for {values_to_change}: ',title='New value...')\r\n for values in IDs_for_change:\r\n try:\r\n data.at[values,values_to_change]=set_value\r\n data.at[values,'acceptedNameUsage']= '{0} {1} {2}'.format(data.at[values,'genus'],data.at[values,'specificEpithet'],data.at[values,'nameAcordingTo'])\r\n og_data.at[values,values_to_change]=set_value\r\n og_data.at[values,'acceptedNameUsage']= '{0} {1} {2}'.format(data.at[values,'genus'],data.at[values,'specificEpithet'],data.at[values,'nameAcordingTo'])\r\n except:\r\n print('The changes can not be made')\r\n pass\r\n new_value_change=eg.ynbox(msg='Do you want to change another values in this subjects?',title='Value change')\r\n return data\r\n \r\n def add_values(self,data):\r\n msg = \"Enter information about the new subject\"\r\n title = \"New subject entry \"\r\n last_indexo =data.index[-1]\r\n new = int(last_indexo, 36) + 1\r\n new_id=numpy.base_repr(new, 36)\r\n fieldNames = data.columns.tolist()[1:]\r\n fieldValues = []\r\n fieldValues = eg.multenterbox(msg,title, fieldNames)\r\n fieldValues.insert(0,new_id)\r\n data.loc[fieldValues[0]]=fieldValues\r\n return data\r\n\r\n def save_values(self,data): #programar para que tire a csv\r\n path_choice=eg.diropenbox(msg='choose a folder to save a file',title='select a path')\r\n folder_name=eg.enterbox(msg='Enter the filename', title='Filename', default='DataFrame', strip=True, image=None, root=None)\r\n with pd.ExcelWriter(f\"{path_choice}\\{folder_name}.xlsx\") as writer:\r\n data.to_excel(writer, sheet_name='DataFrame')\r\n \r\n\r\ndef comparefiles(ID,info,option): #option 1 for showroom, 0 files \r\n filename1 = f\"temp/{ID}.txt\"\r\n if option==1:\r\n filename2= f\"showroom_files/{ID}.txt\"\r\n elif option==0:\r\n filename2= f\"files/{ID}.txt\"\r\n os.makedirs(os.path.dirname(filename1), exist_ok=True)\r\n with open(filename1,'w') as fil:\r\n fil.write(str(info))\r\n if os.path.isfile(filename2)==True:\r\n if filecmp.cmp(filename1,filename2)==False:\r\n print(f'ive found some changes since the last time, on file... {ID}.txt')\r\n print('changes has been saved')\r\n shutil.move(filename1,filename2)\r\n else:\r\n pass\r\n else:\r\n print(f'a new entry has been found, file... {ID}.txt has been created.')\r\n os.makedirs(os.path.dirname(filename2), exist_ok=True)\r\n with open(filename2,'w') as fil:\r\n fil.write(str(info))\r\n shutil.rmtree('temp/', ignore_errors=False, onerror=None)\r\n return \r\n\r\ndef infowriting(ID,info,option): #option 1 for showroom, 0 files\r\n try: \r\n if option ==0:\r\n filename = f\"files/{ID}.txt\" \r\n elif option==1:\r\n filename = f\"showroom_files/{ID}.txt\" \r\n os.makedirs(os.path.dirname(filename), exist_ok=True)\r\n with open(filename,'w') as fil:\r\n fil.write(str(info))\r\n print(f'a new entry has been found, file...{ID}.txt has been created.')\r\n except:\r\n print(f'permission to write in {filename} has been denied...')\r\n return \r\n\r\ndef dynamiclinks(longurl):\r\n user_info=pd.read_csv(\"documents\\dynamiclinks_user_info.csv\",header=0,sep=';')\r\n api_key=user_info['api_key'][0] #this need to be created on the firebase webpage\r\n sub_domain=user_info['sub_domain'][0] #this need to be created on firebase webpage\r\n try:\r\n url_shortener = UrlShortener(api_key,sub_domain)\r\n shorturl=url_shortener.get_short_link(longurl)\r\n except:\r\n print('Oops! you have reached the limit of urls')\r\n time.sleep(0.2) #to not break the limits of firebase\r\n return shorturl\r\n\r\n#crear un Qr para showroom \r\n#Crear un Qr para manejo del lab\r\ndef qr_manager(ID,short_url,option): #option 1 for showroom, 0 files\r\n try:\r\n if option ==0:\r\n filename = f\"qrs/{ID}.png\"\r\n elif option==1:\r\n filename = f\"qrs_showroom/{ID}.png\"\r\n os.makedirs(os.path.dirname(filename), exist_ok=True)\r\n quick_response_code= pyqrcode.create(short_url)\r\n with open(filename, 'wb') as f:\r\n quick_response_code.png(f, scale=8,module_color=(0,102,0,255),background=(255, 255, 255, 255))\r\n try:\r\n img = Image.open(filename)\r\n width, height = img.size\r\n logo_size =50\r\n logo = Image.open('documents\\logo.png')\r\n xmin = ymin = int((width / 2) - (logo_size / 2))\r\n xmax = ymax = int((width / 2) + (logo_size / 2))\r\n logo = logo.resize((xmax - xmin, ymax - ymin))\r\n img.paste(logo, (xmin, ymin, xmax, ymax))\r\n img.save(filename)\r\n except:\r\n pass\r\n except:\r\n print(f'permission to write in {filename} has been denied...')\r\n\r\n####################################################################\r\n##############################MAIN##################################\r\n####################################################################\r\n\r\n#######################################\r\n########FILE MANAGEMENT SECTION########\r\n#######################################\r\n\r\ndataframe=file_manager()\r\nfile_mng_button=eg.buttonbox(msg='select an option',title='select an option',choices=['Open a file','Create a custom dwc file'])\r\nif file_mng_button=='Open a file':\r\n og_data,data,indexo,og_columns_df=dataframe.file_opener() #no considerar para file_creation\r\n IDs=data.index.tolist() #no considerar para file_creation \r\n showroom_option_button=eg.buttonbox(msg='do you wish to create files for a showroom',title='select a option',choices=['Yes','No'])\r\n if showroom_option_button=='Yes':\r\n data_showroom=og_data.copy()\r\n msg='select the columns to keep on your showroom dataframe'\r\n title='select'\r\n choicebox=eg.multchoicebox(msg,title,og_columns_df)\r\n try:\r\n data_showroom=data_showroom[choicebox]\r\n except:\r\n pass\r\n elif showroom_option_button=='No':\r\n pass\r\nelif file_mng_button=='Create a custom dwc file':\r\n data=dataframe.file_creation() #no considerar para file_opener\r\n data.to_csv('custom_dwc_frame.csv',sep=';', encoding='utf-8') #considerar para file opener\r\n print ('your file is ready....')\r\n print(data)\r\n exit()\r\n\r\nprint(data)\r\n\r\n##################################\r\n########QUERY DATA SECTION########\r\n##################################\r\n\r\nquery_choicebox_options=['Yes...Custom query','Query by: order-family-genus-specificEpithet','Query by: Class-order-family-genus-specificEpithet','No']\r\nquery_choicebox=eg.choicebox(msg='Do you wish to query your data...',title='Query options',choices=query_choicebox_options)\r\nif query_choicebox==query_choicebox_options[0]:\r\n data_for_query=data.copy()\r\n r1=subject(data_for_query)\r\n answer_query_choicebox=True\r\n while answer_query_choicebox==True:\r\n r1.datafiltering(data_for_query)\r\n print(data_for_query)\r\n answer_query_choicebox=eg.ynbox(msg='Do you wish to make a new query?',title='Select an option')\r\n print('Your query has been finished....')\r\n print(data_for_query)\r\nelif query_choicebox==query_choicebox_options[1]:\r\n data_for_query=data.copy()\r\n r1=subject(data_for_query)\r\n column_query_predef=['order','family','genus','specificEpithet']\r\n for columns_predef in column_query_predef:\r\n r1.datafiltering_predef(data_for_query,columns_predef)\r\n print('Your query has been finished....')\r\n print(data_for_query)\r\nelif query_choicebox==query_choicebox_options[2]:\r\n data_for_query=data.copy()\r\n r1=subject(data_for_query)\r\n column_query_predef=['Class','order','family','genus','specificEpithet']\r\n for columns_predef in column_query_predef:\r\n r1.datafiltering_predef(data_for_query,columns_predef)\r\n print('Your query has been finished....')\r\n print(data_for_query)\r\nelse:\r\n pass\r\n\r\n\"\"\" DEBO DECIDIR SI HARE LA FUNCION PARA OBTENER UN CSV O EXCEL \r\nADEMAS DE REOBTENER EL VALOR SUBJECTS DE DATA_FOR_QUERY PARA CAMBIAR LOS VALORES SI ES QUE SE QUIERE \r\nDAR OPCION DE GUARDAR, CAMBIAR VALORES O MOSTRAR SUJETOS QUE CUMPLEN LOS REQUISITOS\r\nSECCION EN DESARROLLO\r\n\"\"\"\r\nif not query_choicebox==query_choicebox_options[3] or query_choicebox==None:\r\n choicebox_for_after_query_options=['export your query to a xlsx file (readable for excel)','make changes on your query and export them to a xlsx file (this changes will be saved on your original file)','show the subjects wich match your query']\r\n choicebox_for_after_query=eg.choicebox(msg='Choose an option for your query...',title='Query options',choices=choicebox_for_after_query_options)\r\n if choicebox_for_after_query==choicebox_for_after_query_options[0]:\r\n #export your query to a xlsx file (readable for excel)\r\n r1.save_values(data_for_query)\r\n elif choicebox_for_after_query==choicebox_for_after_query_options[1]:\r\n #make changes on your query and export them to a xlsx file (this changes will be saved on your original file)\r\n query_subjects=data_for_query[indexo].tolist()\r\n r1.change_values(data,og_data,query_subjects) \r\n r1.save_values(og_data) #for saving original data\r\n elif choicebox_for_after_query==choicebox_for_after_query_options[2]:\r\n #show the subjects wich match your query\r\n query_subjects=data_for_query[indexo].tolist()\r\n for values in query_subjects:\r\n print(values)\r\n else:\r\n pass\r\n\r\n#Add values \r\n#r1.add_values(data)\r\n\r\n#compare files or create them\r\nprint('compare/create files...')\r\nif os.path.isdir('files')==True:\r\n for id in IDs:\r\n comparefiles(id,data.loc[id],0)\r\nelse:\r\n for id in IDs:\r\n infowriting(id,data.loc[id],0)\r\n\r\nif showroom_option_button=='Yes':\r\n if os.path.isdir('showroom_files')==True:\r\n for id in IDs:\r\n comparefiles(id,data_showroom.loc[id],1)\r\n else:\r\n for id in IDs:\r\n infowriting(id,data_showroom.loc[id],1)\r\nprint ('there is nothing more to do here...')\r\n\r\n#compare qr files or create them\r\nuser_info=pd.read_csv(\"documents\\dynamiclinks_user_info.csv\",header=0,sep=';')\r\nGitHub_username=user_info['GitHub_username'][0] #this need to be created on the GitHub webpage\r\nRepository_name=user_info['Repository_name'][0] #this need to be created on the firebase webpage\r\nprint('create non existing qrs files...')\r\nif os.path.isdir('qrs')==True:\r\n for id in IDs:\r\n print(f'file {id} of file {IDs[-1]}',end='\\r', flush=True)\r\n path=f\"qrs/{id}.png\"\r\n if os.path.isfile(path)==False:\r\n longurl=f'https://raw.githubusercontent.com/{GitHub_username}/{Repository_name}/master/files/{id}.txt' \r\n shorturl=dynamiclinks(longurl)\r\n qr_manager(id,shorturl,0)\r\n else:\r\n pass\r\nelse:\r\n for id in IDs:\r\n print(f'file {id} of file {IDs[-1]}',end='\\r', flush=True)\r\n longurl=f'https://raw.githubusercontent.com/{GitHub_username}/{Repository_name}/master/files/{id}.txt'\r\n shorturl=dynamiclinks(longurl)\r\n qr_manager(id,shorturl,0)\r\n\r\nif showroom_option_button=='Yes':\r\n print('create non existing qrs shorwoom files...')\r\n if os.path.isdir('qrs_showroom')==True:\r\n for id in IDs:\r\n print(f'file {id} of file {IDs[-1]}',end='\\r', flush=True)\r\n path=f\"qrs_showroom/{id}.png\"\r\n if os.path.isfile(path)==False:\r\n longurl=f'https://raw.githubusercontent.com/{GitHub_username}/{Repository_name}/master/showroom_files/{id}.txt'\r\n shorturl=dynamiclinks(longurl)\r\n qr_manager(id,shorturl,1)\r\n else:\r\n pass\r\n else:\r\n for id in IDs:\r\n print(f'file {id} of file {IDs[-1]}',end='\\r', flush=True)\r\n longurl=f'https://raw.githubusercontent.com/{GitHub_username}/{Repository_name}/master/showroom_files/{id}.txt'\r\n shorturl=dynamiclinks(longurl)\r\n qr_manager(id,shorturl,1)\r\nelse:\r\n pass\r\nprint ('there is nothing more to do here...')"
] |
[
[
"pandas.read_excel",
"pandas.read_csv",
"pandas.DataFrame",
"numpy.base_repr",
"pandas.ExcelWriter"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
nchlis/rsom_vasculature
|
[
"320b6f0ff0a9968f18c6500aaa93d4c1d86ad25f"
] |
[
"step1_UNET_train.py"
] |
[
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Sep 6 16:04:13 2018\n\n@author: N.Chlis\n\"\"\"\n#if used on a non-GUI server ######\n#import matplotlib\n#matplotlib.use('Agg')\n###################################\n\nimport tensorflow as tf\ngpus = tf.config.experimental.list_physical_devices('GPU')\nif gpus:\n try:\n for gpu in gpus:\n tf.config.experimental.set_memory_growth(gpu, True)\n\n except RuntimeError as e:\n print(e)\n\nimport numpy as np\n#import pandas as pd\nimport matplotlib.pyplot as plt\n#import h5py\nimport pandas as pd\n\nfrom keras.models import Model\nfrom keras.layers import Input\nfrom keras.layers import Activation\nfrom keras.layers import BatchNormalization\nfrom keras.layers import Dense\n#from keras.layers import Flatten\nfrom keras.layers import AveragePooling2D\nfrom keras.layers.convolutional import Conv2D, MaxPooling2D, UpSampling2D, AveragePooling2D, Conv2DTranspose\nfrom keras.layers.merge import concatenate #Concatenate (capital C) not working \n#from keras.utils.vis_utils import plot_model\nfrom keras.layers import Dropout\n\nfrom keras.callbacks import EarlyStopping\nfrom keras.callbacks import ModelCheckpoint\nfrom keras.callbacks import CSVLogger\nimport time\nimport skimage.transform\nfrom sklearn.model_selection import train_test_split\nfrom keras import backend as K \nimport gc\nimport os\n\ndef rotateT(X,angle):\n #rotate image tensor, TF order, single channel\n X_rot = np.zeros_like(X)\n #repeat for every channel\n for ch in np.arange(X.shape[-1]):\n #print('channel',ch)\n #repeat for every image\n for i in np.arange(X.shape[0]):\n #print('image',i)\n X_rot[i,:,:,ch] = skimage.transform.rotate(X[i,:,:,ch],angle=angle,resize=False,preserve_range=True,mode='edge')\n return(X_rot)\n\ndef shiftT(X,dx,dy):\n #shift image tensor, TF order, single channel\n X_shift = np.zeros_like(X)\n #repeat for every image\n tform = skimage.transform.SimilarityTransform(translation=(dx, dy))\n for i in np.arange(X.shape[0]):\n #print('image',i)\n X_shift[i,:,:,:] = skimage.transform.warp(X[i,:,:,:],tform,mode='edge')\n return(X_shift)\n\n#%% define the generator for training\n# randomly flip, rotate and translate each input image\n\ndef aug_generator(X_raw=None,Y_raw=None,\n batch_size=4,\n flip_axes=['x','y'],\n rotation_angles=[5,15],\n translate_axes=['x','y'],\n translate_percentages=[0,0.1]):\n \n batch_size=batch_size#recommended batch size \n Ndatapoints = len(X_raw)\n #Naugmentations=4 #original + flip, rotation, noise_gaussian, noise_snp\n \n while(True):\n #print('start!')\n ix_randomized = np.random.choice(Ndatapoints,size=Ndatapoints,replace=False)\n ix_batches = np.array_split(ix_randomized,int(Ndatapoints/batch_size))\n for b in range(len(ix_batches)):\n #print('step',b,'of',len(ix_batches))\n ix_batch = ix_batches[b]\n current_batch_size=len(ix_batch)\n #print('size of current batch',current_batch_size)\n #print(ix_batch)\n X_batch = X_raw[ix_batch,:,:,:].copy()#.copy() to leave original unchanged\n Y_batch = Y_raw[ix_batch,:,:,:].copy()#.copy() to leave original unchanged\n \n #now do augmentation on images and masks\n #iterate over each image in the batch\n for img in range(current_batch_size):\n #print('current_image',img,': ',ix_batch[img])\n do_aug=np.random.choice([True, False],size=1)[0]#50-50 chance\n if do_aug == True:\n #print('flipping',img)\n flip_axis_selected = np.random.choice(flip_axes,1,replace=False)[0]\n if flip_axis_selected == 'x':\n flip_axis_selected = 1\n else: # 'y'\n flip_axis_selected = 0\n #flip an axis\n X_batch[img,:,:,:] = np.flip(X_batch[img,:,:,:],axis=flip_axis_selected)\n Y_batch[img,:,:,:] = np.flip(Y_batch[img,:,:,:],axis=flip_axis_selected)\n #print('Flip on axis',flip_axis_selected)\n \n do_aug=np.random.choice([True, False],size=1)[0]#50-50 chance\n if do_aug == True:\n #print('rotating',img)\n rotation_angle_selected = np.random.uniform(low=rotation_angles[0],high=rotation_angles[1],size=1)[0]\n #rotate the image\n X_batch[img,:,:,:] = rotateT(np.expand_dims(X_batch[img,:,:,:],axis=0),angle=rotation_angle_selected)\n Y_batch[img,:,:,:] = rotateT(np.expand_dims(Y_batch[img,:,:,:],axis=0),angle=rotation_angle_selected)\n #print('Rotate angle',rotation_angle_selected)\n \n do_aug=np.random.choice([True, False],size=1)[0]#50-50 chance\n if do_aug == True:\n #print('shifting',img)\n #print(X_batch.shape)\n dx=0\n if 'x' in translate_axes: \n dx=np.random.uniform(low=translate_percentages[0],high=translate_percentages[1],size=1)[0]\n dx=dx*X_batch.shape[1]\n dy=0\n if 'y' in translate_axes: \n dy=np.random.uniform(low=translate_percentages[0],high=translate_percentages[1],size=1)[0]\n dy=dy*X_batch.shape[2]\n #translate the image\n #print('dx',dx)\n #print('dy',dy)\n X_batch[img,:,:,:] = shiftT(np.expand_dims(X_batch[img,:,:,:],axis=0),dx=dx,dy=dy)\n Y_batch[img,:,:,:] = shiftT(np.expand_dims(Y_batch[img,:,:,:],axis=0),dx=dx,dy=dy)\n \n Y_batch_skin=(Y_batch==1).astype('float')\n Y_batch_vasc=(Y_batch==2).astype('float')\n yield(X_batch,[Y_batch_skin,Y_batch_vasc])\n #print('step end after',b,'of',len(ix_batches))\n\n#%%\n#load the data\nIMHEIGHT = 768\nIMWIDTH = 256\n\nsavepath = './data_'+str(IMHEIGHT)+'_'+str(IMWIDTH)+'_annotated/'\n\n#df=pd.read_csv(savepath+'/metadata_qc.csv')\ndf=pd.read_csv(savepath+'/metadata_qc_extra.csv')\nX=np.load(savepath+'X.npy')#data\nY=np.load(savepath+'Y.npy')#masks\n\nX=X[:,:,:,0:2]#drop the last black channel\n\ndf=pd.read_csv(savepath+'/metadata_qc_extra.csv')\nstudy=df['case'].values\n#do quality control, only keep 115 out of 122 unique studies\n#the 115 unique studies correspond to unique 205 scans (multiple scans for some patients)\nqc_pass = df.quality_control.values=='pass'\nstudy=study[qc_pass]\nX=X[qc_pass,:,:,:]\nY=Y[qc_pass,:,:,:]\nstudy_unique=np.unique(study)\n\n#%% do leave one patient out validation\n\nstart=0\n#resume=True\nresume=False\nif resume==True:\n #there are 2 files per saved model in the trained_models folder\n start=int(len(os.listdir('./trained_models/'))/2)\n\nfor i in np.arange(start=start,stop=len(study_unique)):#Leave one study out\n print('*** Study',i+1,'of',len(study_unique),'***')\n\n s = study_unique[i]\n print('Study number',s)\n train_ix = study!=s\n test_ix = study==s\n \n X_tr=X[train_ix,:]\n Y_tr=Y[train_ix,:]\n \n #X_ts=X[test_ix,:]\n #Y_ts=Y[test_ix,:]\n \n X_tr, X_val, Y_tr, Y_val = train_test_split(X_tr, Y_tr, test_size=0.1, random_state=1)\n \n Y_tr_skin=(Y_tr==1).astype('float')\n Y_tr_vasc=(Y_tr==2).astype('float') \n Y_val_skin=(Y_val==1).astype('float')\n Y_val_vasc=(Y_val==2).astype('float')\n #%% set-up the UNET model\n \n #model parameters\n bnorm_axis = -1\n #filter sizes of the original model\n nfilters = np.array([64, 128, 256, 512, 1024])\n drop_rate=0.5\n drop_train=False\n #downsize the UNET for this example.\n #the smaller network is faster to train\n #and produces excellent results on the dataset at hand\n div=8\n nfilters = (nfilters/div).astype('int')\n #aug=True\n aug=False\n \n #input\n input_tensor = Input(shape=X_tr.shape[1:], name='input_tensor')\n \n ####################################\n # encoder (contracting path)\n ####################################\n #encoder block 0\n e0 = Conv2D(filters=nfilters[0], kernel_size=(3,3), padding='same')(input_tensor)\n e0 = BatchNormalization(axis=bnorm_axis)(e0)\n e0 = Activation('relu')(e0)\n e0 = Conv2D(filters=nfilters[0], kernel_size=(3,3), padding='same')(e0)\n e0 = BatchNormalization(axis=bnorm_axis)(e0)\n e0 = Activation('relu')(e0)\n \n #encoder block 1\n e1 = MaxPooling2D((2, 2))(e0)\n e1 = Conv2D(filters=nfilters[1], kernel_size=(3,3), padding='same')(e1)\n e1 = BatchNormalization(axis=bnorm_axis)(e1)\n e1 = Activation('relu')(e1)\n e1 = Conv2D(filters=nfilters[1], kernel_size=(3,3), padding='same')(e1)\n e1 = BatchNormalization(axis=bnorm_axis)(e1)\n e1 = Activation('relu')(e1)\n \n #encoder block 2\n e2 = Dropout(drop_rate)(e1, training = drop_train)\n e2 = MaxPooling2D((2, 2))(e2)\n e2 = Conv2D(filters=nfilters[2], kernel_size=(3,3), padding='same')(e2)\n e2 = BatchNormalization(axis=bnorm_axis)(e2)\n e2 = Activation('relu')(e2)\n e2 = Conv2D(filters=nfilters[2], kernel_size=(3,3), padding='same')(e2)\n e2 = BatchNormalization(axis=bnorm_axis)(e2)\n e2 = Activation('relu')(e2)\n \n #encoder block 3\n e3 = Dropout(drop_rate)(e2, training = drop_train)\n e3 = MaxPooling2D((2, 2))(e3)\n e3 = Conv2D(filters=nfilters[3], kernel_size=(3,3), padding='same')(e3)\n e3 = BatchNormalization(axis=bnorm_axis)(e3)\n e3 = Activation('relu')(e3)\n e3 = Conv2D(filters=nfilters[3], kernel_size=(3,3), padding='same')(e3)\n e3 = BatchNormalization(axis=bnorm_axis)(e3)\n e3 = Activation('relu')(e3)\n \n #encoder block 4\n e4 = Dropout(drop_rate)(e3, training = drop_train)\n e4 = MaxPooling2D((2, 2))(e4)\n e4 = Conv2D(filters=nfilters[4], kernel_size=(3,3), padding='same')(e4)\n e4 = BatchNormalization(axis=bnorm_axis)(e4)\n e4 = Activation('relu')(e4)\n e4 = Conv2D(filters=nfilters[4], kernel_size=(3,3), padding='same')(e4)\n e4 = BatchNormalization(axis=bnorm_axis)(e4)\n e4 = Activation('relu')(e4)\n #e4 = MaxPooling2D((2, 2))(e4)\n \n ####################################\n # decoder (expansive path)\n ####################################\n \n #decoder block 3\n d3 = Dropout(drop_rate)(e4, training = drop_train)\n d3=UpSampling2D((2, 2),)(d3)\n d3=concatenate([e3,d3], axis=-1)#skip connection\n d3=Conv2DTranspose(nfilters[3], (3, 3), padding='same')(d3)\n d3=BatchNormalization(axis=bnorm_axis)(d3)\n d3=Activation('relu')(d3)\n d3=Conv2DTranspose(nfilters[3], (3, 3), padding='same')(d3)\n d3=BatchNormalization(axis=bnorm_axis)(d3)\n d3=Activation('relu')(d3)\n \n #decoder block 2\n d2 = Dropout(drop_rate)(d3, training = drop_train)\n d2=UpSampling2D((2, 2),)(d2)\n d2=concatenate([e2,d2], axis=-1)#skip connection\n d2=Conv2DTranspose(nfilters[2], (3, 3), padding='same')(d2)\n d2=BatchNormalization(axis=bnorm_axis)(d2)\n d2=Activation('relu')(d2)\n d2=Conv2DTranspose(nfilters[2], (3, 3), padding='same')(d2)\n d2=BatchNormalization(axis=bnorm_axis)(d2)\n d2=Activation('relu')(d2)\n \n #decoder block 1\n d1=UpSampling2D((2, 2),)(d2)\n d1=concatenate([e1,d1], axis=-1)#skip connection\n d1=Conv2DTranspose(nfilters[1], (3, 3), padding='same')(d1)\n d1=BatchNormalization(axis=bnorm_axis)(d1)\n d1=Activation('relu')(d1)\n d1=Conv2DTranspose(nfilters[1], (3, 3), padding='same')(d1)\n d1=BatchNormalization(axis=bnorm_axis)(d1)\n d1=Activation('relu')(d1)\n \n #decoder block 0\n d0=UpSampling2D((2, 2),)(d1)\n d0=concatenate([e0,d0], axis=-1)#skip connection\n d0=Conv2DTranspose(nfilters[0], (3, 3), padding='same')(d0)\n d0=BatchNormalization(axis=bnorm_axis)(d0)\n d0=Activation('relu')(d0)\n d0=Conv2DTranspose(nfilters[0], (3, 3), padding='same')(d0)\n d0=BatchNormalization(axis=bnorm_axis)(d0)\n d0=Activation('relu')(d0)\n \n #output\n #out_class = Dense(1)(d0)\n out_class_skin = Conv2D(1, (1, 1), padding='same')(d0)\n out_class_skin = Activation('sigmoid',name='output_skin')(out_class_skin)\n \n out_class_vasc = Conv2D(1, (1, 1), padding='same')(d0)\n out_class_vasc = Activation('sigmoid',name='output_vasc')(out_class_vasc)\n \n #create and compile the model\n model=Model(inputs=input_tensor,outputs=[out_class_skin,out_class_vasc])\n model.compile(loss={'output_skin':'binary_crossentropy',#epidermis region\n 'output_vasc':'binary_crossentropy'},#dermis region\n optimizer='adam')\n #%%\n print(model.summary())\n \n #%% train the model\n filepath = 'mcd_unet_testStudy'+str(s)+'_MSOT_div'+str(div)+'_drop_rate'+str(drop_rate)+'_aug'+str(aug)\n\n #save the model when val_loss improves during training\n checkpoint = ModelCheckpoint('./trained_models/'+filepath+'.hdf5', monitor='val_loss', verbose=1, save_best_only=True, mode='auto')\n #save training progress in a .csv\n csvlog = CSVLogger('./trained_models/'+filepath+'_train_log.csv',append=True)\n #stop training if no improvement has been seen on val_loss for a while\n early_stopping = EarlyStopping(monitor='val_loss', min_delta=0, patience=40)\n batch_size=16\n \n #initialize the generator\n gen_train = aug_generator(X_tr,Y_tr,batch_size=batch_size,flip_axes=['x'])\n #split the array and see how many splits there are to determine #steps\n steps_per_epoch_tr = len(np.array_split(np.zeros(len(X_tr)),int(len(X_tr)/batch_size)))\n \n if aug==True:\n #actually do the training\n model.fit_generator(generator=gen_train,\n steps_per_epoch=steps_per_epoch_tr,#the generator internally goes over the entire dataset in one iteration\n validation_data=(X_val,[Y_val_skin,Y_val_vasc]),\n epochs=200,\n verbose=2,\n initial_epoch=0,\n callbacks=[checkpoint, csvlog, early_stopping])\n else:#no data augmentation\n model.fit(x=X_tr,y=[Y_tr_skin,Y_tr_vasc],\n batch_size=batch_size,\n validation_data=(X_val,[Y_val_skin,Y_val_vasc]),\n epochs=200,\n verbose=2,\n initial_epoch=0,\n callbacks=[checkpoint, csvlog, early_stopping])\n print('clearing Keras session...')\n del model\n K.clear_session()\n gc.collect()\n\n\n\n\n\n\n\n"
] |
[
[
"pandas.read_csv",
"numpy.expand_dims",
"numpy.unique",
"numpy.random.choice",
"numpy.arange",
"tensorflow.config.experimental.list_physical_devices",
"tensorflow.config.experimental.set_memory_growth",
"sklearn.model_selection.train_test_split",
"numpy.random.uniform",
"numpy.zeros_like",
"numpy.load",
"numpy.array",
"numpy.flip"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
helderpsilva/tictactoe_reinforcement_learning
|
[
"2d7abfb1832545d7287f84f1f7cf4f94cdb8adbb"
] |
[
"assets/agent.py"
] |
[
"# importar librarias necessarias.\nimport numpy as np\n\nclass Agent():\n \"\"\"Criação da entidade agente\"\"\"\n\n # O símbolo é X ou 0\n # O epsilon é a probabilidade que o agente tem de fazer uma escolha aleatória\n # O learning rate é a taxa de aprendizagem\n def __init__(self, symbol, epsilon = 0.05, learning_rate=0.2, name=None):\n self.name = name\n self.epsilon = epsilon\n self.learning_rate = learning_rate\n self.symbol = symbol\n self.history = []\n\n # memória do jogador (iniciada com base em value_function.py)\n def value_function(self, V):\n self.V = V\n\n \n def make_move(self, environment):\n \"\"\"Função responsável por executar jogadas\"\"\"\n\n # Loop que procura todas as jogadas disponíveis\n available_moves = []\n \n for row in range(3):\n for column in range(3):\n if environment.board[row,column] == 0:\n available_moves.append((row,column))\n\n random_choice = np.random.random()\n\n # Escolha aleatória com base no epsilon do agente\n if random_choice < self.epsilon:\n \n move_index = np.random.choice(len(available_moves))\n player_move = available_moves[move_index]\n \n else:\n board_list = []\n current_board = environment.board.copy()\n \n for move in available_moves:\n future_board = current_board.copy()\n future_board[move[0], move[1]] = self.symbol\n board_list.append(future_board.copy())\n \n # Entre todas as jogadas possíveis, escolhe a que maximiza a função v\n states = [environment.game_status(board) for board in board_list]\n values = [self.V[state] for state in states]\n \n best_move = np.argmax(values)\n player_move = available_moves[best_move]\n \n environment.board[player_move[0], player_move[1]] = self.symbol\n \n def update_history(self, s):\n self.history.append(s)\n \n def update(self, environment):\n \"\"\"Função responsável pela aprendizagem do agente\"\"\"\n\n reward = environment.reward(self.symbol)\n target = reward\n\n # Atualização dos valores da função v com base no outcome do jogo\n for state in reversed(self.history):\n value = self.V[state] + self.learning_rate*(target - self.V[state])\n self.V[state] = value\n target = value\n \n self.history = []"
] |
[
[
"numpy.argmax",
"numpy.random.random"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
mchoi8739/sagemaker-debugger
|
[
"fcc02366d06308642cf96b17aae417db9c1192d5"
] |
[
"tests/profiler/tensorflow2/test_native_tf2_profiler.py"
] |
[
"# Standard Library\nimport os\n\n# Third Party\nimport pytest\nimport tensorflow as tf\nfrom tests.profiler.core.utils import validate_python_profiling_stats\nfrom tests.tensorflow2.utils import ModelType\n\n# First Party\nimport smdebug.tensorflow as smd\nfrom smdebug.core.collection import CollectionKeys\nfrom smdebug.core.utils import FRAMEWORK\nfrom smdebug.profiler.profiler_config_parser import ProfilerConfigParser\nfrom smdebug.profiler.profiler_constants import (\n CPROFILE_NAME,\n CPROFILE_STATS_FILENAME,\n PYINSTRUMENT_HTML_FILENAME,\n PYINSTRUMENT_JSON_FILENAME,\n PYINSTRUMENT_NAME,\n)\nfrom smdebug.profiler.python_profile_utils import StepPhase\nfrom smdebug.tensorflow import KerasHook as Hook\n\n\[email protected]\ndef native_tf2_cprofile_profiler_config_parser(config_folder, monkeypatch):\n config_path = os.path.join(\n config_folder, \"test_native_tf2_cprofile_profiler_config_parser.json\"\n )\n monkeypatch.setenv(\"SMPROFILER_CONFIG_PATH\", config_path)\n return ProfilerConfigParser(FRAMEWORK.TENSORFLOW)\n\n\[email protected]\ndef native_tf2_pyinstrument_profiler_config_parser(config_folder, monkeypatch):\n config_path = os.path.join(\n config_folder, \"test_native_tf2_pyinstrument_profiler_config_parser.json\"\n )\n monkeypatch.setenv(\"SMPROFILER_CONFIG_PATH\", config_path)\n return ProfilerConfigParser(FRAMEWORK.TENSORFLOW)\n\n\ndef _helper_native_tf2_gradtape(out_dir, model, dataset, profiler_config_parser):\n def get_grads(images, labels):\n return model(images, training=True)\n\n @tf.function\n def train_step(images, labels):\n return tf.reduce_mean(get_grads(images, labels))\n\n hook = Hook(out_dir=out_dir, save_all=True)\n # Known issue where logging in a python callback function (i.e. atexit) during pytest causes logging errors.\n # See https://github.com/pytest-dev/pytest/issues/5502 for more information.\n hook.logger.disabled = True\n hook.profiler_config_parser = profiler_config_parser\n\n start_step = profiler_config_parser.config.python_profiling_config.start_step\n end_step = start_step + profiler_config_parser.config.python_profiling_config.num_steps\n\n opt = tf.keras.optimizers.Adam()\n hook.wrap_optimizer(opt)\n\n for current_step, (data, labels) in enumerate(dataset):\n with hook.profiler():\n labels = tf.one_hot(labels, depth=10)\n with tf.GradientTape() as tape:\n logits = train_step(data, labels)\n if start_step <= current_step < end_step:\n assert profiler_config_parser.python_profiler._start_step == current_step\n assert (\n profiler_config_parser.python_profiler._start_phase == StepPhase.STEP_START\n )\n grads = tape.gradient(logits, model.variables)\n opt.apply_gradients(zip(grads, model.variables))\n\n hook.save_tensor(\"inputs\", data, CollectionKeys.INPUTS)\n hook.save_tensor(\"logits\", logits, CollectionKeys.OUTPUTS)\n hook.save_tensor(\"labels\", labels, CollectionKeys.OUTPUTS)\n\n if start_step <= current_step < end_step:\n assert profiler_config_parser.python_profiler._start_step == current_step\n assert profiler_config_parser.python_profiler._start_phase == StepPhase.STEP_END\n # required for these tests since this normally gets called in the cleanup process and we need to stop any ongoing\n # profiling and collect post-hook-close Python profiling stats\n hook.profiling_end()\n\n\ndef _verify_tensor_names(out_dir):\n \"\"\"\n This verifies the tensor names when debugger is enabled.\n \"\"\"\n\n trial = smd.create_trial(out_dir)\n assert len(trial.steps()) > 0, \"Nothing saved at any step.\"\n assert len(trial.tensor_names()) > 0, \"Tensors were not saved.\"\n assert trial.tensor_names(collection=CollectionKeys.LOSSES) == [\"loss\"]\n assert len(trial.tensor_names(collection=CollectionKeys.WEIGHTS)) > 0\n assert len(trial.tensor_names(collection=CollectionKeys.BIASES)) > 0\n assert trial.tensor_names(collection=\"optimizer_variables\") == [\n \"Adam/beta_1:0\",\n \"Adam/beta_2:0\",\n \"Adam/decay:0\",\n \"Adam/iter:0\",\n \"Adam/learning_rate:0\",\n ]\n assert trial.tensor_names(collection=CollectionKeys.INPUTS) == [\"inputs\"]\n assert trial.tensor_names(collection=CollectionKeys.OUTPUTS) == [\"labels\", \"logits\"]\n\n\[email protected](\"python_profiler_name\", [CPROFILE_NAME, PYINSTRUMENT_NAME])\[email protected](\n \"model_type\", [ModelType.SEQUENTIAL, ModelType.FUNCTIONAL, ModelType.SUBCLASSED]\n)\ndef test_native_tf2_profiling(\n python_profiler_name,\n model_type,\n tf2_mnist_sequential_model,\n tf2_mnist_functional_model,\n tf2_mnist_subclassed_model,\n native_tf2_cprofile_profiler_config_parser,\n native_tf2_pyinstrument_profiler_config_parser,\n out_dir,\n mnist_dataset,\n tf_eager_mode,\n):\n if model_type == ModelType.SEQUENTIAL:\n model = tf2_mnist_sequential_model\n elif model_type == ModelType.FUNCTIONAL:\n model = tf2_mnist_functional_model\n else:\n model = tf2_mnist_subclassed_model\n\n if python_profiler_name == CPROFILE_NAME:\n profiler_config_parser = native_tf2_cprofile_profiler_config_parser\n else:\n profiler_config_parser = native_tf2_pyinstrument_profiler_config_parser\n\n assert profiler_config_parser.profiling_enabled\n profiler_config_parser.load_config()\n profiler_config_parser.start_pre_step_zero_python_profiling()\n\n _helper_native_tf2_gradtape(out_dir, model, mnist_dataset, profiler_config_parser)\n\n # Sanity check debugger output\n _verify_tensor_names(out_dir)\n\n # The expected number of stats directories during is (num_steps * 2) + 2. This includes profiling for both\n # phases of each step and pre-step zero python profiling and post-hook-close python profiling.\n expected_stats_dir_count = (\n profiler_config_parser.config.python_profiling_config.num_steps * 2\n ) + 2\n python_stats_dir = os.path.join(out_dir, \"framework\", \"tensorflow\", python_profiler_name)\n validate_python_profiling_stats(\n python_stats_dir, python_profiler_name, expected_stats_dir_count\n )\n"
] |
[
[
"tensorflow.keras.optimizers.Adam",
"tensorflow.one_hot",
"tensorflow.GradientTape"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"2.2",
"2.3",
"2.4",
"2.5",
"2.6"
]
}
] |
prarabdh9909/audio
|
[
"6bad3a66a7a1c7cc05755e9ee5931b7391d2b94c"
] |
[
"test/torchaudio_unittest/functional/functional_impl.py"
] |
[
"\"\"\"Test defintion common to CPU and CUDA\"\"\"\nimport torch\nimport torchaudio.functional as F\nfrom parameterized import parameterized\nfrom scipy import signal\n\nfrom torchaudio_unittest import common_utils\n\n\nclass Lfilter(common_utils.TestBaseMixin):\n def test_simple(self):\n \"\"\"\n Create a very basic signal,\n Then make a simple 4th order delay\n The output should be same as the input but shifted\n \"\"\"\n\n torch.random.manual_seed(42)\n waveform = torch.rand(2, 44100 * 1, dtype=self.dtype, device=self.device)\n b_coeffs = torch.tensor([0, 0, 0, 1], dtype=self.dtype, device=self.device)\n a_coeffs = torch.tensor([1, 0, 0, 0], dtype=self.dtype, device=self.device)\n output_waveform = F.lfilter(waveform, a_coeffs, b_coeffs)\n\n self.assertEqual(output_waveform[:, 3:], waveform[:, 0:-3], atol=1e-5, rtol=1e-5)\n\n def test_clamp(self):\n input_signal = torch.ones(1, 44100 * 1, dtype=self.dtype, device=self.device)\n b_coeffs = torch.tensor([1, 0], dtype=self.dtype, device=self.device)\n a_coeffs = torch.tensor([1, -0.95], dtype=self.dtype, device=self.device)\n output_signal = F.lfilter(input_signal, a_coeffs, b_coeffs, clamp=True)\n assert output_signal.max() <= 1\n output_signal = F.lfilter(input_signal, a_coeffs, b_coeffs, clamp=False)\n assert output_signal.max() > 1\n\n @parameterized.expand([\n ((44100,),),\n ((3, 44100),),\n ((2, 3, 44100),),\n ((1, 2, 3, 44100),)\n ])\n def test_shape(self, shape):\n torch.random.manual_seed(42)\n waveform = torch.rand(*shape, dtype=self.dtype, device=self.device)\n b_coeffs = torch.tensor([0, 0, 0, 1], dtype=self.dtype, device=self.device)\n a_coeffs = torch.tensor([1, 0, 0, 0], dtype=self.dtype, device=self.device)\n output_waveform = F.lfilter(waveform, a_coeffs, b_coeffs)\n assert shape == waveform.size() == output_waveform.size()\n\n def test_9th_order_filter_stability(self):\n \"\"\"\n Validate the precision of lfilter against reference scipy implementation when using high order filter.\n The reference implementation use cascaded second-order filters so is more numerically accurate.\n \"\"\"\n # create an impulse signal\n x = torch.zeros(1024, dtype=self.dtype, device=self.device)\n x[0] = 1\n\n # get target impulse response\n sos = signal.butter(9, 850, 'hp', fs=22050, output='sos')\n y = torch.from_numpy(signal.sosfilt(sos, x.cpu().numpy())).to(self.dtype).to(self.device)\n\n # get lfilter coefficients\n b, a = signal.butter(9, 850, 'hp', fs=22050, output='ba')\n b, a = torch.from_numpy(b).to(self.dtype).to(self.device), torch.from_numpy(\n a).to(self.dtype).to(self.device)\n\n # predict impulse response\n yhat = F.lfilter(x, a, b, False)\n self.assertEqual(yhat, y, atol=1e-4, rtol=1e-5)\n\n\nclass Spectrogram(common_utils.TestBaseMixin):\n @parameterized.expand([(0., ), (1., ), (2., ), (3., )])\n def test_grad_at_zero(self, power):\n \"\"\"The gradient of power spectrogram should not be nan but zero near x=0\n\n https://github.com/pytorch/audio/issues/993\n \"\"\"\n x = torch.zeros(1, 22050, requires_grad=True)\n spec = F.spectrogram(\n x,\n pad=0,\n window=None,\n n_fft=2048,\n hop_length=None,\n win_length=None,\n power=power,\n normalized=False,\n )\n spec.sum().backward()\n assert not x.grad.isnan().sum()\n"
] |
[
[
"torch.ones",
"torch.zeros",
"torch.random.manual_seed",
"torch.from_numpy",
"torch.tensor",
"scipy.signal.butter",
"torch.rand"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"0.16",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"1.3",
"1.8"
],
"tensorflow": []
}
] |
berndbohnet/flax
|
[
"5aa7f335bb8819088c8b1aa89aa459c99eb00c1c"
] |
[
"tests/core/design/core_custom_vjp_test.py"
] |
[
"# Copyright 2022 The Flax Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nfrom typing import Sequence, Callable\nfrom functools import partial\n\nfrom absl.testing import absltest\n\nimport numpy as np\n\nfrom flax.core import Scope, Array, init, apply, unfreeze, lift, nn\n\nimport jax\nfrom jax import random, numpy as jnp\n\n\ndef mlp_custom_grad(scope: Scope, x: Array,\n sizes: Sequence[int] = (8, 1),\n act_fn: Callable[[Array], Array] = nn.relu):\n\n f = nn.dense\n\n def fwd(scope, x, features):\n y, vjp_fn = lift.vjp(partial(f, features=features), scope, x)\n return y, vjp_fn\n\n def bwd(features, res, y_t):\n del features\n vjp_fn = res\n input_t, params_t = vjp_fn(y_t)\n params_t = jax.tree_map(jnp.sign, params_t)\n return input_t, params_t\n\n dense_custom_grad = lift.custom_vjp(\n f, forward_fn=fwd, backward_fn=bwd, nondiff_argnums=(2,))\n\n # hidden layers\n for size in sizes[:-1]:\n x = scope.child(dense_custom_grad, prefix='hidden_')(x, size)\n x = act_fn(x)\n\n # output layer\n return scope.child(dense_custom_grad, 'out')(x, sizes[-1])\n\n\nclass CustomVJPTest(absltest.TestCase):\n\n def test_custom_vjp(self):\n x = random.normal(random.PRNGKey(0), (1, 4))\n y, variables = init(mlp_custom_grad)(random.PRNGKey(1), x)\n param_shapes = unfreeze(\n jax.tree_map(jnp.shape, variables['params']))\n loss_fn = lambda p, x: jnp.mean(apply(mlp_custom_grad)(p, x) ** 2)\n grad = jax.grad(loss_fn)(variables, x)\n grad_shapes = unfreeze(\n jax.tree_map(jnp.shape, grad['params']))\n self.assertEqual(y.shape, (1, 1))\n expected_param_shapes = {\n 'hidden_0': {'kernel': (4, 8), 'bias': (8,)},\n 'out': {'kernel': (8, 1), 'bias': (1,)},\n }\n self.assertEqual(param_shapes, expected_param_shapes)\n self.assertEqual(grad_shapes, expected_param_shapes)\n for g in jax.tree_leaves(grad):\n self.assertTrue(np.all(g == np.sign(g)))\n\n\nif __name__ == '__main__':\n absltest.main()\n"
] |
[
[
"numpy.sign"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
diningphil/CGMM-ICML2018
|
[
"c6da2ac267edae0a0326818c6b4f4a6c141a053f",
"c6da2ac267edae0a0326818c6b4f4a6c141a053f"
] |
[
"util.py",
"cgmm_classifier_task.py"
] |
[
"from typing import Optional, Tuple, List\n\nimport torch\nimport torch_geometric\n\n\ndef extend_lists(data_list: Optional[Tuple[Optional[List[torch.Tensor]]]],\n new_data_list: Tuple[Optional[List[torch.Tensor]]]) -> Tuple[Optional[List[torch.Tensor]]]:\n r\"\"\"\n Extends the semantic of Python :func:`extend()` over lists to tuples\n Used e.g., to concatenate results of mini-batches in incremental architectures such as :obj:`CGMM`\n\n Args:\n data_list: tuple of lists, or ``None`` if there is no list to extend.\n new_data_list: object of the same form of :obj:`data_list` that has to be concatenated\n\n Returns:\n the tuple of extended lists\n \"\"\"\n if data_list is None:\n return new_data_list\n\n assert len(data_list) == len(new_data_list)\n\n for i in range(len(data_list)):\n if new_data_list[i] is not None:\n data_list[i].extend(new_data_list[i])\n\n return data_list\n\n\ndef to_tensor_lists(embeddings: Tuple[Optional[torch.Tensor]],\n batch: torch_geometric.data.batch.Batch,\n edge_index: torch.Tensor) -> Tuple[Optional[List[torch.Tensor]]]:\n r\"\"\"\n Reverts batched outputs back to a list of Tensors elements.\n Can be useful to build incremental architectures such as :obj:`CGMM` that store intermediate results\n before training the next layer.\n\n Args:\n embeddings (tuple): a tuple of embeddings :obj:`(vertex_output, edge_output, graph_output, vertex_extra_output, edge_extra_output, graph_extra_output)`.\n Each embedding can be a :class:`torch.Tensor` or ``None``.\n batch (:class:`torch_geometric.data.batch.Batch`): Batch information used to split the tensors.\n\n edge_index (:class:`torch.Tensor`): a :obj:`2 x num_edges` tensor as defined in Pytorch Geometric.\n Used to split edge Tensors graph-wise.\n\n Returns:\n a tuple with the same semantics as the argument ``embeddings``, but this time each element holds a list of\n Tensors, one for each graph in the dataset.\n \"\"\"\n # Crucial: Detach the embeddings to free the computation graph!!\n # TODO this code can surely be made more compact, but leave it as is until future refactoring or removal from PyDGN.\n v_out, e_out, g_out, vo_out, eo_out, go_out = embeddings\n\n v_out = v_out.detach() if v_out is not None else None\n v_out_list = [] if v_out is not None else None\n\n e_out = e_out.detach() if e_out is not None else None\n e_out_list = [] if e_out is not None else None\n\n g_out = g_out.detach() if g_out is not None else None\n g_out_list = [] if g_out is not None else None\n\n vo_out = vo_out.detach() if vo_out is not None else None\n vo_out_list = [] if vo_out is not None else None\n\n eo_out = eo_out.detach() if eo_out is not None else None\n eo_out_list = [] if eo_out is not None else None\n\n go_out = go_out.detach() if go_out is not None else None\n go_out_list = [] if go_out is not None else None\n\n _, node_counts = torch.unique_consecutive(batch, return_counts=True)\n node_cumulative = torch.cumsum(node_counts, dim=0)\n\n if e_out is not None or eo_out is not None:\n edge_batch = batch[edge_index[0]]\n _, edge_counts = torch.unique_consecutive(edge_batch, return_counts=True)\n edge_cumulative = torch.cumsum(edge_counts, dim=0)\n\n if v_out_list is not None:\n v_out_list.append(v_out[:node_cumulative[0]])\n\n if e_out_list is not None:\n e_out_list.append(e_out[:edge_cumulative[0]])\n\n if g_out_list is not None:\n g_out_list.append(g_out[0].unsqueeze(0)) # recreate batch dimension by unsqueezing\n\n if vo_out_list is not None:\n vo_out_list.append(vo_out[:node_cumulative[0]])\n\n if eo_out_list is not None:\n eo_out_list.append(eo_out[:edge_cumulative[0]])\n\n if go_out_list is not None:\n go_out_list.append(go_out[0].unsqueeze(0)) # recreate batch dimension by unsqueezing\n\n for i in range(1, len(node_cumulative)):\n if v_out_list is not None:\n v_out_list.append(v_out[node_cumulative[i - 1]:node_cumulative[i]])\n\n if e_out_list is not None:\n e_out_list.append(e_out[edge_cumulative[i - 1]:edge_cumulative[i]])\n\n if g_out_list is not None:\n g_out_list.append(g_out[i].unsqueeze(0)) # recreate batch dimension by unsqueezing\n\n if vo_out_list is not None:\n vo_out_list.append(vo_out[node_cumulative[i - 1]:node_cumulative[i]])\n\n if eo_out_list is not None:\n eo_out_list.append(eo_out[edge_cumulative[i - 1]:edge_cumulative[i]])\n\n if go_out_list is not None:\n go_out_list.append(go_out[i].unsqueeze(0)) # recreate batch dimension by unsqueezing\n\n return v_out_list, e_out_list, g_out_list, vo_out_list, eo_out_list, go_out_list\n\n\ndef compute_unigram(posteriors: torch.Tensor, use_continuous_states: bool) -> torch.Tensor:\n r\"\"\"\n Computes the unigram representation of nodes as defined in https://www.jmlr.org/papers/volume21/19-470/19-470.pdf\n\n Args:\n posteriors (torch.Tensor): tensor of posterior distributions of nodes with shape `(#nodes,num_latent_states)`\n use_continuous_states (bool): whether or not to use the most probable state (one-hot vector) or a \"soft\" version\n\n Returns:\n a tensor of unigrams with shape `(#nodes,num_latent_states)`\n \"\"\"\n num_latent_states = posteriors.shape[1]\n\n if use_continuous_states:\n node_embeddings_batch = posteriors\n else:\n node_embeddings_batch = make_one_hot(posteriors.argmax(dim=1), num_latent_states)\n\n return node_embeddings_batch.double()\n\n\ndef compute_bigram(posteriors: torch.Tensor, edge_index: torch.Tensor, batch: torch.Tensor,\n use_continuous_states: bool) -> torch.Tensor:\n r\"\"\"\n Computes the bigram representation of nodes as defined in https://www.jmlr.org/papers/volume21/19-470/19-470.pdf\n\n Args:\n posteriors (torch.Tensor): tensor of posterior distributions of nodes with shape `(#nodes,num_latent_states)`\n edge_index (torch.Tensor): tensor of edge indices with shape `(2,#edges)` that adheres to PyG specifications\n batch (torch.Tensor): vector that assigns each node to a graph id in the batch\n use_continuous_states (bool): whether or not to use the most probable state (one-hot vector) or a \"soft\" version\n\n Returns:\n a tensor of bigrams with shape `(#nodes,num_latent_states*num_latent_states)`\n \"\"\"\n C = posteriors.shape[1]\n device = posteriors.get_device()\n device = 'cpu' if device == -1 else device\n\n if use_continuous_states:\n # Code provided by Daniele Atzeni to speed up the computation!\n nodes_in_batch = len(batch)\n sparse_adj_matrix = torch.sparse.FloatTensor(edge_index,\n torch.ones(edge_index.shape[1]).to(device),\n torch.Size([nodes_in_batch, nodes_in_batch]))\n tmp1 = torch.sparse.mm(sparse_adj_matrix, posteriors.float()).repeat(1, C)\n tmp2 = posteriors.reshape(-1, 1).repeat(1, C).reshape(-1, C * C)\n node_bigram_batch = torch.mul(tmp1, tmp2)\n else:\n # Convert into one hot\n posteriors_one_hot = make_one_hot(posteriors.argmax(dim=1), C).float()\n\n # Code provided by Daniele Atzeni to speed up the computation!\n nodes_in_batch = len(batch)\n sparse_adj_matrix = torch.sparse.FloatTensor(edge_index,\n torch.ones(edge_index.shape[1]).to(device),\n torch.Size([nodes_in_batch, nodes_in_batch]))\n tmp1 = torch.sparse.mm(sparse_adj_matrix, posteriors_one_hot).repeat(1, C)\n tmp2 = posteriors_one_hot.reshape(-1, 1).repeat(1, C).reshape(-1, C * C)\n node_bigram_batch = torch.mul(tmp1, tmp2)\n\n return node_bigram_batch.double()\n\n\ndef make_one_hot(labels: torch.Tensor, num_unique_ids: torch.Tensor) -> torch.Tensor:\n r\"\"\"\n Converts a vector of ids into a one-hot matrix\n\n Args:\n labels (torch.Tensor): the vector of ids\n num_unique_ids (torch.Tensor): number of unique ids\n\n Returns:\n a one-hot tensor with shape `(samples,num_unique_ids)`\n \"\"\"\n device = labels.get_device()\n device = 'cpu' if device == -1 else device\n one_hot = torch.zeros(labels.size(0), num_unique_ids).to(device)\n one_hot[torch.arange(labels.size(0)).to(device), labels] = 1\n return one_hot\n",
"import os\n\nimport torch\nfrom cgmm_incremental_task import CGMMTask\nfrom pydgn.experiment.util import s2c\nfrom pydgn.static import LOSS, SCORE\nfrom torch_geometric.data import Data\nfrom torch_geometric.loader import DataLoader\n\n\n# This works with graph classification only\nclass ClassifierCGMMTask(CGMMTask):\n\n def run_valid(self, dataset_getter, logger):\n \"\"\"\n This function returns the training and validation or test accuracy\n :return: (training accuracy, validation/test accuracy)\n \"\"\"\n\n # Necessary info to give a unique name to the dataset (some hyper-params like epochs are assumed to be fixed)\n embeddings_folder = self.model_config.layer_config['embeddings_folder']\n max_layers = self.model_config.layer_config['max_layers']\n layers = self.model_config.layer_config['layers']\n unibigram = self.model_config.layer_config['unibigram']\n C = self.model_config.layer_config['C']\n CA = self.model_config.layer_config['CA'] if 'CA' in self.model_config.layer_config else None\n aggregation = self.model_config.layer_config['aggregation']\n infer_with_posterior = self.model_config.layer_config['infer_with_posterior']\n outer_k = dataset_getter.outer_k\n inner_k = dataset_getter.inner_k\n # ====\n\n base_path = os.path.join(embeddings_folder, dataset_getter.dataset_name,\n f'{max_layers}_{unibigram}_{C}_{CA}_{aggregation}_{infer_with_posterior}_{outer_k + 1}_{inner_k + 1}')\n train_out_emb = torch.load(base_path + '_train.torch')[:, :layers, :]\n val_out_emb = torch.load(base_path + '_val.torch')[:, :layers, :]\n train_out_emb = torch.reshape(train_out_emb, (train_out_emb.shape[0], -1))\n val_out_emb = torch.reshape(val_out_emb, (val_out_emb.shape[0], -1))\n\n # Recover the targets\n fake_train_loader = dataset_getter.get_inner_train(batch_size=1, shuffle=False)\n fake_val_loader = dataset_getter.get_inner_val(batch_size=1, shuffle=False)\n train_y = [el.y for el in fake_train_loader.dataset]\n val_y = [el.y for el in fake_val_loader.dataset]\n arbitrary_logic_batch_size = self.model_config.layer_config['arbitrary_function_config']['batch_size']\n arbitrary_logic_shuffle = self.model_config.layer_config['arbitrary_function_config']['shuffle'] \\\n if 'shuffle' in self.model_config.layer_config['arbitrary_function_config'] else True\n\n # build data lists\n train_list = [Data(x=train_out_emb[i].unsqueeze(0), y=train_y[i]) for i in range(train_out_emb.shape[0])]\n val_list = [Data(x=val_out_emb[i].unsqueeze(0), y=val_y[i]) for i in range(val_out_emb.shape[0])]\n train_loader = DataLoader(train_list, batch_size=arbitrary_logic_batch_size, shuffle=arbitrary_logic_shuffle)\n val_loader = DataLoader(val_list, batch_size=arbitrary_logic_batch_size, shuffle=arbitrary_logic_shuffle)\n\n # Instantiate the Dataset\n dim_features = train_out_emb.shape[1]\n dim_target = dataset_getter.get_dim_target()\n\n config = self.model_config.layer_config['arbitrary_function_config']\n device = config['device']\n\n predictor_class = s2c(config['readout'])\n model = predictor_class(dim_node_features=dim_features,\n dim_edge_features=0,\n dim_target=dim_target,\n config=config)\n\n predictor_engine = self._create_engine(config, model, device, evaluate_every=self.model_config.evaluate_every)\n\n train_loss, train_score, _, \\\n val_loss, val_score, _, \\\n _, _, _ = predictor_engine.train(train_loader=train_loader,\n validation_loader=val_loader,\n test_loader=None,\n max_epochs=config['epochs'],\n logger=logger)\n\n train_res = {LOSS: train_loss, SCORE: train_score}\n val_res = {LOSS: val_loss, SCORE: val_score}\n return train_res, val_res\n\n def run_test(self, dataset_getter, logger):\n \"\"\"\n This function returns the training and test accuracy. DO NOT USE THE TEST FOR ANY REASON\n :return: (training accuracy, test accuracy)\n \"\"\"\n\n # Necessary info to give a unique name to the dataset (some hyper-params like epochs are assumed to be fixed)\n embeddings_folder = self.model_config.layer_config['embeddings_folder']\n max_layers = self.model_config.layer_config['max_layers']\n layers = self.model_config.layer_config['layers']\n unibigram = self.model_config.layer_config['unibigram']\n C = self.model_config.layer_config['C']\n CA = self.model_config.layer_config['CA'] if 'CA' in self.model_config.layer_config else None\n aggregation = self.model_config.layer_config['aggregation']\n infer_with_posterior = self.model_config.layer_config['infer_with_posterior']\n outer_k = dataset_getter.outer_k\n inner_k = dataset_getter.inner_k\n if inner_k is None: # workaround the \"safety\" procedure of evaluation protocol, but we will not do anything wrong.\n dataset_getter.set_inner_k(0)\n inner_k = 0 # pick the split of the first inner fold\n # ====\n\n # NOTE: We reload the associated inner train and val splits, using the outer_test for assessment.\n # This is slightly different from standard exps, where we compute a different outer train-val split, but it should not change things much.\n\n base_path = os.path.join(embeddings_folder, dataset_getter.dataset_name,\n f'{max_layers}_{unibigram}_{C}_{CA}_{aggregation}_{infer_with_posterior}_{outer_k + 1}_{inner_k + 1}')\n train_out_emb = torch.load(base_path + '_train.torch')[:, :layers, :]\n val_out_emb = torch.load(base_path + '_val.torch')[:, :layers, :]\n test_out_emb = torch.load(base_path + '_test.torch')[:, :layers, :]\n train_out_emb = torch.reshape(train_out_emb, (train_out_emb.shape[0], -1))\n val_out_emb = torch.reshape(val_out_emb, (val_out_emb.shape[0], -1))\n test_out_emb = torch.reshape(test_out_emb, (test_out_emb.shape[0], -1))\n\n # Recover the targets\n fake_train_loader = dataset_getter.get_inner_train(batch_size=1, shuffle=False)\n fake_val_loader = dataset_getter.get_inner_val(batch_size=1, shuffle=False)\n fake_test_loader = dataset_getter.get_outer_test(batch_size=1, shuffle=False)\n train_y = [el.y for el in fake_train_loader.dataset]\n val_y = [el.y for el in fake_val_loader.dataset]\n test_y = [el.y for el in fake_test_loader.dataset]\n arbitrary_logic_batch_size = self.model_config.layer_config['arbitrary_function_config']['batch_size']\n arbitrary_logic_shuffle = self.model_config.layer_config['arbitrary_function_config']['shuffle'] \\\n if 'shuffle' in self.model_config.layer_config['arbitrary_function_config'] else True\n\n # build data lists\n train_list = [Data(x=train_out_emb[i].unsqueeze(0), y=train_y[i]) for i in range(train_out_emb.shape[0])]\n val_list = [Data(x=val_out_emb[i].unsqueeze(0), y=val_y[i]) for i in range(val_out_emb.shape[0])]\n test_list = [Data(x=test_out_emb[i].unsqueeze(0), y=test_y[i]) for i in range(test_out_emb.shape[0])]\n train_loader = DataLoader(train_list, batch_size=arbitrary_logic_batch_size, shuffle=arbitrary_logic_shuffle)\n val_loader = DataLoader(val_list, batch_size=arbitrary_logic_batch_size, shuffle=arbitrary_logic_shuffle)\n test_loader = DataLoader(test_list, batch_size=arbitrary_logic_batch_size, shuffle=arbitrary_logic_shuffle)\n\n # Instantiate the Dataset\n dim_features = train_out_emb.shape[1]\n dim_target = dataset_getter.get_dim_target()\n\n config = self.model_config.layer_config['arbitrary_function_config']\n device = config['device']\n\n predictor_class = s2c(config['readout'])\n model = predictor_class(dim_node_features=dim_features,\n dim_edge_features=0,\n dim_target=dim_target,\n config=config)\n\n predictor_engine = self._create_engine(config, model, device, evaluate_every=self.model_config.evaluate_every)\n\n train_loss, train_score, _, \\\n val_loss, val_score, _, \\\n test_loss, test_score, _ = predictor_engine.train(train_loader=train_loader,\n validation_loader=val_loader,\n test_loader=test_loader,\n max_epochs=config['epochs'],\n logger=logger)\n\n train_res = {LOSS: train_loss, SCORE: train_score}\n val_res = {LOSS: val_loss, SCORE: val_score}\n test_res = {LOSS: test_loss, SCORE: test_score}\n return train_res, val_res, test_res\n"
] |
[
[
"torch.Size",
"torch.ones",
"torch.sparse.mm",
"torch.mul",
"torch.unique_consecutive",
"torch.cumsum"
],
[
"torch.reshape",
"torch.load"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Z-T-WANG/DeepReinforcementLearningControlOfQuantumCartpoles
|
[
"3b243f235b4945a4817b738d8dbc412937de9f28"
] |
[
"implementation codes/quartic oscillator/setupC.py"
] |
[
"from distutils.core import setup, Extension\nfrom math import pi\nimport numpy as np \nimport os, sys, shutil, glob\nimport argparse\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--lambda', default= pi/25., type=float, metavar='\\lambda',\n help='the strength of the quartic anharmonic oscillator')\nparser.add_argument('--x_max', default=8.5, type=float, metavar='x_{max}',\n help='the distance from the center to the border of the simulation space')\nparser.add_argument('--grid_size', default = 0.1, type=float, metavar='h',\n help='the grid size of the discretized simulation space')\nparser.add_argument('--mass', default = 1./pi, type=float, metavar='m',\n help='the mass of the simulated particle')\nparser.add_argument('--moment', default = 5, type=int,\n help='the order of the distribution moments to compute in the compiled function \"get_moments\"')\nargs = parser.parse_args()\n\n\n\n\n# Please rewrite the following arguments based on your OS and your prescription of compilation if necessary\n# Please refer to https://software.intel.com/en-us/articles/intel-mkl-link-line-advisor . Usually Python uses GCC as the default compiler, and then GNU compiler should be selected. The arguments starting with \"-I\" mean to \"include\" those directories.\n\nlink_options = ['-Wl,--start-group', os.environ['MKLROOT']+'/lib/intel64/libmkl_intel_ilp64.a', os.environ['MKLROOT']+'/lib/intel64/libmkl_intel_thread.a', os.environ['MKLROOT']+'/lib/intel64/libmkl_core.a', '-Wl,--end-group', '-liomp5', '-lpthread', '-lm', '-ldl']\n\ncompiler_options = ['-DMKL_ILP64','-m64']\n\n##############################################################################\n# The following is the compilation program. \n\ndef compile(x_max, grid_size, mass, lambda_, moment):\n assert lambda_>= 0., 'quartic oscillator strength \\lambda should be positive'\n assert mass> 0., 'the mass should be positive'\n assert x_max> 0., 'the size of the simulation space (2 * x_max) should be positive'\n assert grid_size> 0., 'the simulation grid size should be positive'\n assert moment >= 1, 'the order of distribution moments should be larger than 1'\n\n # It invokes the native \"distutils.core\" of Python by setting the commandline arguments stored in sys.argv to the desired one (\"build\")\n\n # set the \"build\" command\n original_args_exist = False\n if len(sys.argv)>=2:\n original_args=sys.argv[1:]\n sys.argv = [sys.argv[0], \"build\"]\n original_args_exist = True\n else: sys.argv.append(\"build\")\n\n os.environ[\"MKL_NUM_THREADS\"] = \"1\"\n\n package_name = 'simulation'\n\n module1 = Extension(package_name,language='c++',\n define_macros = [('X_MAX', str(x_max)), ('GRID_SIZE', repr(grid_size)), ('MASS',repr(mass)), ('LAMBDA', repr(lambda_)), ('MOMENT', str(moment))], # pass the defining parameters\n include_dirs = [np.get_include(), os.path.join(os.environ['MKLROOT'],'include')],\n sources = ['simulation_quart.cpp'], \n extra_compile_args = compiler_options+['-Ofast','-funroll-loops', '-march=native', '-flto','-fuse-linker-plugin','--param', 'ipcp-unit-growth=2000', '-std=c++14','-fno-stack-protector','-fmerge-all-constants'], \n extra_link_args = link_options+['-Ofast','-fdelete-null-pointer-checks','-funroll-loops', '-march=native', '-fwhole-program','-flto','-fuse-linker-plugin','--param', 'ipcp-unit-growth=2000','-std=c++14','-fno-stack-protector','-fmerge-all-constants'])\n\n setup (name = package_name,\n version = '1.0',\n description = 'do simulation steps',\n author = 'Wang Zhikang',\n ext_modules = [module1])\n\n # copy the compiled C module to the root to import\n compiled_files = glob.glob('build/**/*')\n for compiled_file in compiled_files:\n if 'temp' not in compiled_file:\n shutil.move(compiled_file, os.path.basename(compiled_file), copy_function=shutil.copy2)\n\n # restore the original commandline arguments\n if original_args_exist: sys.argv = [sys.argv[0]]+original_args\n else: sys.argv.pop(1)\n\ncompile(x_max=args.x_max, grid_size=args.grid_size, mass=args.mass, lambda_=args.__dict__['lambda'], moment=args.moment)\n"
] |
[
[
"numpy.get_include"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
urigoren/allennlp
|
[
"236e1fd01ca30409cd736625901292609009f5c4",
"236e1fd01ca30409cd736625901292609009f5c4"
] |
[
"allennlp/training/metrics/attachment_scores.py",
"allennlp/tests/common/testing.py"
] |
[
"from typing import Optional, List\n\nfrom overrides import overrides\nimport torch\n\nfrom allennlp.training.metrics.metric import Metric\n\n\[email protected](\"attachment_scores\")\nclass AttachmentScores(Metric):\n \"\"\"\n Computes labeled and unlabeled attachment scores for a\n dependency parse, as well as sentence level exact match\n for both labeled and unlabeled trees. Note that the input\n to this metric is the sampled predictions, not the distribution\n itself.\n\n # Parameters\n\n ignore_classes : `List[int]`, optional (default = None)\n A list of label ids to ignore when computing metrics.\n \"\"\"\n\n def __init__(self, ignore_classes: List[int] = None) -> None:\n self._labeled_correct = 0.0\n self._unlabeled_correct = 0.0\n self._exact_labeled_correct = 0.0\n self._exact_unlabeled_correct = 0.0\n self._total_words = 0.0\n self._total_sentences = 0.0\n\n self._ignore_classes: List[int] = ignore_classes or []\n\n def __call__( # type: ignore\n self,\n predicted_indices: torch.Tensor,\n predicted_labels: torch.Tensor,\n gold_indices: torch.Tensor,\n gold_labels: torch.Tensor,\n mask: Optional[torch.BoolTensor] = None,\n ):\n \"\"\"\n # Parameters\n\n predicted_indices : `torch.Tensor`, required.\n A tensor of head index predictions of shape (batch_size, timesteps).\n predicted_labels : `torch.Tensor`, required.\n A tensor of arc label predictions of shape (batch_size, timesteps).\n gold_indices : `torch.Tensor`, required.\n A tensor of the same shape as `predicted_indices`.\n gold_labels : `torch.Tensor`, required.\n A tensor of the same shape as `predicted_labels`.\n mask : `torch.BoolTensor`, optional (default = None).\n A tensor of the same shape as `predicted_indices`.\n \"\"\"\n detached = self.detach_tensors(\n predicted_indices, predicted_labels, gold_indices, gold_labels, mask\n )\n predicted_indices, predicted_labels, gold_indices, gold_labels, mask = detached\n\n if mask is None:\n mask = torch.ones_like(predicted_indices).bool()\n\n predicted_indices = predicted_indices.long()\n predicted_labels = predicted_labels.long()\n gold_indices = gold_indices.long()\n gold_labels = gold_labels.long()\n\n # Multiply by a mask denoting locations of\n # gold labels which we should ignore.\n for label in self._ignore_classes:\n label_mask = gold_labels.eq(label)\n mask = mask & ~label_mask\n\n correct_indices = predicted_indices.eq(gold_indices).long() * mask\n unlabeled_exact_match = (correct_indices + ~mask).prod(dim=-1)\n correct_labels = predicted_labels.eq(gold_labels).long() * mask\n correct_labels_and_indices = correct_indices * correct_labels\n labeled_exact_match = (correct_labels_and_indices + ~mask).prod(dim=-1)\n\n self._unlabeled_correct += correct_indices.sum()\n self._exact_unlabeled_correct += unlabeled_exact_match.sum()\n self._labeled_correct += correct_labels_and_indices.sum()\n self._exact_labeled_correct += labeled_exact_match.sum()\n self._total_sentences += correct_indices.size(0)\n self._total_words += correct_indices.numel() - (~mask).sum()\n\n def get_metric(self, reset: bool = False):\n \"\"\"\n # Returns\n\n The accumulated metrics as a dictionary.\n \"\"\"\n unlabeled_attachment_score = 0.0\n labeled_attachment_score = 0.0\n unlabeled_exact_match = 0.0\n labeled_exact_match = 0.0\n if self._total_words > 0.0:\n unlabeled_attachment_score = float(self._unlabeled_correct) / float(self._total_words)\n labeled_attachment_score = float(self._labeled_correct) / float(self._total_words)\n if self._total_sentences > 0:\n unlabeled_exact_match = float(self._exact_unlabeled_correct) / float(\n self._total_sentences\n )\n labeled_exact_match = float(self._exact_labeled_correct) / float(self._total_sentences)\n if reset:\n self.reset()\n return {\n \"UAS\": unlabeled_attachment_score,\n \"LAS\": labeled_attachment_score,\n \"UEM\": unlabeled_exact_match,\n \"LEM\": labeled_exact_match,\n }\n\n @overrides\n def reset(self):\n self._labeled_correct = 0.0\n self._unlabeled_correct = 0.0\n self._exact_labeled_correct = 0.0\n self._exact_unlabeled_correct = 0.0\n self._total_words = 0.0\n self._total_sentences = 0.0\n",
"import torch\n\nfrom allennlp.common.testing import AllenNlpTestCase, multi_device\n\n\nclass TestTesting(AllenNlpTestCase):\n def test_multi_device(self):\n actual_devices = set()\n\n @multi_device\n def dummy_func(_self, device: str):\n # Have `self` as in class test functions.\n nonlocal actual_devices\n actual_devices.add(device)\n\n dummy_func(self)\n\n expected_devices = {\"cpu\", \"cuda\"} if torch.cuda.is_available() else {\"cpu\"}\n self.assertSetEqual(expected_devices, actual_devices)\n"
] |
[
[
"torch.ones_like"
],
[
"torch.cuda.is_available"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
emycooper/adaptnlp
|
[
"2e39f81a7faa4c7cd1d2a3764790cf7bb7ad7469",
"2e39f81a7faa4c7cd1d2a3764790cf7bb7ad7469"
] |
[
"adaptnlp/training.py",
"adaptnlp/transformers/question_answering.py"
] |
[
"from typing import Union, Dict\nfrom pathlib import Path\nimport json\nimport csv\n\nimport numpy as np\n\nfrom adaptnlp import EasyDocumentEmbeddings\n\nfrom flair.datasets import CSVClassificationCorpus\nfrom flair.data import Corpus\nfrom flair.embeddings import DocumentRNNEmbeddings\nfrom flair.models import TextClassifier\nfrom flair.trainers import ModelTrainer\nfrom flair.visual.training_curves import Plotter\n\n\nclass SequenceClassifierTrainer:\n \"\"\"Sequence Classifier Trainer\n\n Usage:\n\n ```python\n >>> sc_trainer = SequenceClassifierTrainer(corpus=\"/Path/to/data/dir\")\n ```\n\n **Parameters:**\n\n * **corpus** - A flair corpus data model or `Path`/string to a directory with train.csv/test.csv/dev.csv\n * **encoder** - A `EasyDocumentEmbeddings` object if training with a flair prediction head or `Path`/string if training with Transformer's prediction models\n * **column_name_map** - Required if corpus is not a `Corpus` object, it's a dictionary specifying the indices of the text and label columns of the csv i.e. {1:\"text\",2:\"label\"}\n * **corpus_in_memory** - Boolean for whether to store corpus embeddings in memory\n * **predictive_head** - For now either \"flair\" or \"transformers\" for the prediction head\n * ****kwargs** - Keyword arguments for Flair's `TextClassifier` model class\n \"\"\"\n\n def __init__(\n self,\n corpus: Union[Corpus, Path, str],\n encoder: Union[EasyDocumentEmbeddings, Path, str],\n column_name_map: None,\n corpus_in_memory: bool = True,\n predictive_head: str = \"flair\",\n **kwargs,\n ):\n if isinstance(corpus, Corpus):\n self.corpus = corpus\n else:\n if isinstance(corpus, str):\n corpus = Path(corpus)\n if not column_name_map:\n raise ValueError(\n \"If not instantiating with `Corpus` object, must pass in `column_name_map` argument to specify text/label indices\"\n )\n self.corpus = CSVClassificationCorpus(\n corpus,\n column_name_map,\n skip_header=True,\n delimiter=\",\",\n in_memory=corpus_in_memory,\n )\n\n # Verify predictive head is within available heads\n self.available_predictive_head = [\"flair\", \"transformers\"]\n if predictive_head not in self.available_predictive_head:\n raise ValueError(\n f\"predictive_head param must be one of the following: {self.available_predictive_head}\"\n )\n self.predictive_head = predictive_head\n\n # Verify correct corresponding encoder is used with predictive head (This can be structured with better design in the future)\n if isinstance(encoder, EasyDocumentEmbeddings):\n if predictive_head == \"transformers\":\n raise ValueError(\n \"If using `transformers` predictive head, pass in the path to the transformer's model\"\n )\n else:\n self.encoder = encoder\n else:\n if isinstance(encoder, str):\n encoder = Path(encoder)\n self.encoder = encoder\n\n # Create the label dictionary on init (store to keep from constantly generating label_dict) should we use dev/test set instead assuming all labels are provided?\n self.label_dict = self.corpus.make_label_dictionary()\n\n # Save trainer kwargs dict for reinitializations\n self.trainer_kwargs = kwargs\n\n # Load trainer with initial setup\n self._initial_setup(self.label_dict, **kwargs)\n\n def _initial_setup(self, label_dict: Dict, **kwargs):\n if self.predictive_head == \"flair\":\n\n # Get Document embeddings from `embeddings`\n document_embeddings: DocumentRNNEmbeddings = self.encoder.rnn_embeddings\n\n # Create the text classifier\n classifier = TextClassifier(\n document_embeddings, label_dictionary=label_dict, **kwargs,\n )\n\n # Initialize the text classifier trainer\n self.trainer = ModelTrainer(classifier, self.corpus)\n\n # TODO: In internal transformers package, create ****ForSequenceClassification adaptations\n elif self.predictive_head == \"transformers\":\n with open(self.encoder / \"config.json\") as config_f:\n configs = json.load(config_f)\n model_name = configs[\"architectures\"][-1]\n if model_name == \"BertForMaskedLM\":\n pass\n\n def train(\n self,\n output_dir: Union[Path, str],\n learning_rate: float = 0.07,\n mini_batch_size: int = 32,\n anneal_factor: float = 0.5,\n patience: int = 5,\n max_epochs: int = 150,\n plot_weights: bool = False,\n **kwargs,\n ) -> None:\n \"\"\"\n Train the Sequence Classifier\n\n * **output_dir** - The output directory where the model predictions and checkpoints will be written.\n * **learning_rate** - The initial learning rate\n * **mini_batch_size** - Batch size for the dataloader\n * **anneal_factor** - The factor by which the learning rate is annealed\n * **patience** - Patience is the number of epochs with no improvement the Trainer waits until annealing the learning rate\n * **max_epochs** - Maximum number of epochs to train. Terminates training if this number is surpassed.\n * **plot_weights** - Bool to plot weights or not\n * **kwargs** - Keyword arguments for the rest of Flair's `Trainer.train()` hyperparameters\n \"\"\"\n if isinstance(output_dir, str):\n output_dir = Path(output_dir)\n\n # Start the training\n self.trainer.train(\n output_dir,\n learning_rate=learning_rate,\n mini_batch_size=mini_batch_size,\n anneal_factor=anneal_factor,\n patience=patience,\n max_epochs=max_epochs,\n **kwargs,\n )\n\n # Plot weight traces\n if plot_weights:\n plotter = Plotter()\n plotter.plot_weights(output_dir / \"weights.txt\")\n\n def find_learning_rate(\n self,\n output_dir: Union[Path, str],\n file_name: str = \"learning_rate.tsv\",\n start_learning_rate: float = 1e-8,\n end_learning_rate: float = 10,\n iterations: int = 100,\n mini_batch_size: int = 32,\n stop_early: bool = True,\n smoothing_factor: float = 0.7,\n plot_learning_rate: bool = True,\n **kwargs,\n ) -> float:\n \"\"\"\n Uses Leslie's cyclical learning rate finding method to generate and save the loss x learning rate plot\n\n This method returns a suggested learning rate using the static method `LMFineTuner.suggest_learning_rate()`\n which is implicitly run in this method.\n\n * **output_dir** - Path to dir for learning rate file to be saved\n * **file_name** - Name of learning rate .tsv file\n * **start_learning_rate** - Initial learning rate to start cyclical learning rate finder method\n * **end_learning_rate** - End learning rate to stop exponential increase of the learning rate\n * **iterations** - Number of optimizer iterations for the ExpAnnealLR scheduler\n * **mini_batch_size** - Batch size for dataloader\n * **stop_early** - Bool for stopping early once loss diverges\n * **smoothing_factor** - Smoothing factor on moving average of losses\n * **adam_epsilon** - Epsilon for Adam optimizer.\n * **weight_decay** - Weight decay if we apply some.\n * **kwargs** - Additional keyword arguments for the Adam optimizer\n **return** - Learning rate as a float\n \"\"\"\n # 7. find learning rate\n learning_rate_tsv = self.trainer.find_learning_rate(\n base_path=output_dir,\n file_name=file_name,\n start_learning_rate=start_learning_rate,\n end_learning_rate=end_learning_rate,\n iterations=iterations,\n mini_batch_size=mini_batch_size,\n stop_early=stop_early,\n smoothing_factor=smoothing_factor,\n )\n\n # Reinitialize optimizer and parameters by reinitializing trainer\n self._initial_setup(self.label_dict, **self.trainer_kwargs)\n\n if plot_learning_rate:\n plotter = Plotter()\n plotter.plot_learning_rate(learning_rate_tsv)\n\n # Use the automated learning rate finder\n with open(learning_rate_tsv) as lr_f:\n lr_tsv = list(csv.reader(lr_f, delimiter=\"\\t\"))\n losses = np.array([float(row[-1]) for row in lr_tsv[1:]])\n lrs = np.array([float(row[-2]) for row in lr_tsv[1:]])\n lr_to_use = self.suggested_learning_rate(losses, lrs, **kwargs)\n print(f\"Recommended Learning Rate {lr_to_use}\")\n return lr_to_use\n\n @staticmethod\n def suggested_learning_rate(\n losses: np.array,\n lrs: np.array,\n lr_diff: int = 15,\n loss_threshold: float = 0.2,\n adjust_value: float = 1,\n ) -> float:\n # This seems redundant unless we can make this configured for each trainer/finetuner\n \"\"\"\n Attempts to find the optimal learning rate using a interval slide rule approach with the cyclical learning rate method\n\n * **losses** - Numpy array of losses\n * **lrs** - Numpy array of exponentially increasing learning rates (must match dim of `losses`)\n * **lr_diff** - Learning rate Interval of slide ruler\n * **loss_threshold** - Threshold of loss difference on interval where the sliding stops\n * **adjust_value** - Coefficient for adjustment\n **return** - the optimal learning rate as a float\n \"\"\"\n # Get loss values and their corresponding gradients, and get lr values\n assert lr_diff < len(losses)\n loss_grad = np.gradient(losses)\n\n # Search for index in gradients where loss is lowest before the loss spike\n # Initialize right and left idx using the lr_diff as a spacing unit\n # Set the local min lr as -1 to signify if threshold is too low\n r_idx = -1\n l_idx = r_idx - lr_diff\n local_min_lr = lrs[l_idx]\n while (l_idx >= -len(losses)) and (\n abs(loss_grad[r_idx] - loss_grad[l_idx]) > loss_threshold\n ):\n local_min_lr = lrs[l_idx]\n r_idx -= 1\n l_idx -= 1\n\n lr_to_use = local_min_lr * adjust_value\n\n return lr_to_use\n",
"# Contains code used/modified by AdaptNLP author from transformers\n# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.\n# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom abc import ABC, abstractmethod\nfrom typing import List, Union, Tuple\nimport collections\nfrom collections import OrderedDict\n\nimport torch\nfrom torch.utils.data import DataLoader, SequentialSampler, TensorDataset\n\nfrom transformers import (\n BertConfig,\n BertForQuestionAnswering,\n BertTokenizer,\n XLMConfig,\n XLMForQuestionAnswering,\n XLMTokenizer,\n XLNetConfig,\n XLNetForQuestionAnswering,\n XLNetTokenizer,\n)\n\nfrom adaptnlp.transformers.utils_squad import (\n SquadExample,\n InputFeatures,\n convert_examples_to_features,\n RawResult,\n RawResultExtended,\n get_final_text,\n _get_best_indexes,\n _compute_softmax,\n)\n\n\nclass QuestionAnsweringModel(ABC):\n @abstractmethod\n def __init__(self):\n super().__init__()\n self.config\n self.tokenizer\n self.model\n\n @abstractmethod\n def _load(self):\n raise NotImplementedError\n\n @abstractmethod\n def predict(self, query, context, top_n, as_dict):\n raise NotImplementedError\n\n\n# TODO To be deprecated in the near future for a better module design\nclass BertQuestionAnsweringModel(QuestionAnsweringModel):\n def __init__(self):\n self.config = BertConfig\n self.tokenizer = BertTokenizer\n self.model = BertForQuestionAnswering\n self.model_names = list(self.config.pretrained_config_archive_map.keys())\n\n # Post Load\n self.pretrained_config = None\n self.pretrained_tokenizer = None\n self.pretrained_model = None\n\n self.device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n def _to_list(self, tensor: torch.Tensor) -> List[float]:\n return tensor.detach().cpu().tolist()\n\n def _load(self) -> None:\n print(\"Loading Pretrained Bert Question Answering Model...\")\n model_name = \"bert-large-uncased-whole-word-masking-finetuned-squad\"\n self.pretrained_config = self.config.from_pretrained(\n \"bert-large-uncased-whole-word-masking-finetuned-squad\"\n )\n if \"uncased\" in model_name:\n tokenizer = self.tokenizer.from_pretrained(\n \"bert-large-uncased\", do_lower_case=True\n )\n else:\n tokenizer = self.tokenizer.from_pretrained(\n \"bert-large-cased\", do_lower_case=False\n )\n self.pretrained_tokenizer = tokenizer\n\n model = self.model.from_pretrained(\n model_name,\n from_tf=bool(\".ckpt\" in model_name),\n config=self.pretrained_config,\n )\n self.pretrained_model = model\n self.pretrained_model.to(self.device)\n\n def _load_one_query(\n self, query: str, context: str, output_examples=True\n ) -> Union[TensorDataset, List[SquadExample], List[InputFeatures]]:\n # Create doc_tokens for SquadExample with one query and context\n\n def is_whitespace(c):\n if c == \" \" or c == \"\\t\" or c == \"\\r\" or c == \"\\n\" or ord(c) == 0x202F:\n return True\n return False\n\n # Create doc_tokens\n doc_tokens = []\n prev_is_whitespace = True\n for c in context:\n if is_whitespace(c):\n prev_is_whitespace = True\n else:\n if prev_is_whitespace:\n doc_tokens.append(c)\n else:\n doc_tokens[-1] += c\n prev_is_whitespace = False\n\n # Create SquadExample\n examples = []\n example = SquadExample(\n qas_id=None,\n question_text=query,\n doc_tokens=doc_tokens,\n orig_answer_text=None,\n start_position=None,\n end_position=None,\n is_impossible=False,\n )\n examples.append(example)\n\n # Convert to features\n features = convert_examples_to_features(\n examples=examples,\n tokenizer=self.pretrained_tokenizer,\n max_seq_length=384,\n doc_stride=128,\n max_query_length=64,\n is_training=False,\n )\n\n # Convert to Tensors and build dataset\n all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)\n all_input_mask = torch.tensor(\n [f.input_mask for f in features], dtype=torch.long\n )\n all_segment_ids = torch.tensor(\n [f.segment_ids for f in features], dtype=torch.long\n )\n all_cls_index = torch.tensor([f.cls_index for f in features], dtype=torch.long)\n all_p_mask = torch.tensor([f.p_mask for f in features], dtype=torch.float)\n\n all_example_index = torch.arange(all_input_ids.size(0), dtype=torch.long)\n dataset = TensorDataset(\n all_input_ids,\n all_input_mask,\n all_segment_ids,\n all_example_index,\n all_cls_index,\n all_p_mask,\n )\n\n if output_examples:\n return dataset, examples, features\n return dataset\n\n def _produce_concrete_predictions(\n self,\n all_examples,\n all_features,\n all_results,\n n_best_size=10,\n max_answer_length=30,\n do_lower_case=True,\n verbose_logging=False,\n version_2_with_negative=True,\n null_score_diff_threshold=0.0,\n ):\n\n example_index_to_features = collections.defaultdict(list)\n for feature in all_features:\n example_index_to_features[feature.example_index].append(feature)\n\n unique_id_to_result = {}\n for result in all_results:\n unique_id_to_result[result.unique_id] = result\n\n _PrelimPrediction = collections.namedtuple( # pylint: disable=invalid-name\n \"PrelimPrediction\",\n [\"feature_index\", \"start_index\", \"end_index\", \"start_logit\", \"end_logit\"],\n )\n\n all_predictions = collections.OrderedDict()\n all_nbest_json = collections.OrderedDict()\n scores_diff_json = collections.OrderedDict()\n\n for (example_index, example) in enumerate(all_examples):\n features = example_index_to_features[example_index]\n\n prelim_predictions = []\n # keep track of the minimum score of null start+end of position 0\n score_null = 1000000 # large and positive\n min_null_feature_index = 0 # the paragraph slice with min null score\n null_start_logit = 0 # the start logit at the slice with min null score\n null_end_logit = 0 # the end logit at the slice with min null score\n for (feature_index, feature) in enumerate(features):\n result = unique_id_to_result[feature.unique_id]\n start_indexes = _get_best_indexes(result.start_logits, n_best_size)\n end_indexes = _get_best_indexes(result.end_logits, n_best_size)\n # if we could have irrelevant answers, get the min score of irrelevant\n if version_2_with_negative:\n feature_null_score = result.start_logits[0] + result.end_logits[0]\n if feature_null_score < score_null:\n score_null = feature_null_score\n min_null_feature_index = feature_index\n null_start_logit = result.start_logits[0]\n null_end_logit = result.end_logits[0]\n for start_index in start_indexes:\n for end_index in end_indexes:\n # We could hypothetically create invalid predictions, e.g., predict\n # that the start of the span is in the question. We throw out all\n # invalid predictions.\n if start_index >= len(feature.tokens):\n continue\n if end_index >= len(feature.tokens):\n continue\n if start_index not in feature.token_to_orig_map:\n continue\n if end_index not in feature.token_to_orig_map:\n continue\n if not feature.token_is_max_context.get(start_index, False):\n continue\n if end_index < start_index:\n continue\n length = end_index - start_index + 1\n if length > max_answer_length:\n continue\n prelim_predictions.append(\n _PrelimPrediction(\n feature_index=feature_index,\n start_index=start_index,\n end_index=end_index,\n start_logit=result.start_logits[start_index],\n end_logit=result.end_logits[end_index],\n )\n )\n if version_2_with_negative:\n prelim_predictions.append(\n _PrelimPrediction(\n feature_index=min_null_feature_index,\n start_index=0,\n end_index=0,\n start_logit=null_start_logit,\n end_logit=null_end_logit,\n )\n )\n prelim_predictions = sorted(\n prelim_predictions,\n key=lambda x: (x.start_logit + x.end_logit),\n reverse=True,\n )\n\n _NbestPrediction = collections.namedtuple( # pylint: disable=invalid-name\n \"NbestPrediction\",\n [\"text\", \"start_index\", \"end_index\", \"start_logit\", \"end_logit\"],\n ) # ### start_end_index\n\n seen_predictions = {}\n nbest = []\n for pred in prelim_predictions:\n if len(nbest) >= n_best_size:\n break\n feature = features[pred.feature_index]\n orig_doc_start = 0\n orig_doc_end = 0\n if pred.start_index > 0: # this is a non-null prediction\n tok_tokens = feature.tokens[pred.start_index : (pred.end_index + 1)]\n orig_doc_start = feature.token_to_orig_map[pred.start_index]\n orig_doc_end = feature.token_to_orig_map[pred.end_index]\n orig_tokens = example.doc_tokens[\n orig_doc_start : (orig_doc_end + 1)\n ]\n tok_text = \" \".join(tok_tokens)\n\n # De-tokenize WordPieces that have been split off.\n tok_text = tok_text.replace(\" ##\", \"\")\n tok_text = tok_text.replace(\"##\", \"\")\n\n # Clean whitespace\n tok_text = tok_text.strip()\n tok_text = \" \".join(tok_text.split())\n orig_text = \" \".join(orig_tokens)\n\n final_text = get_final_text(\n tok_text, orig_text, do_lower_case, verbose_logging\n )\n if final_text in seen_predictions:\n continue\n\n seen_predictions[final_text] = True\n else:\n final_text = \"\"\n seen_predictions[final_text] = True\n\n nbest.append(\n _NbestPrediction(\n text=final_text,\n start_logit=pred.start_logit,\n end_logit=pred.end_logit,\n start_index=orig_doc_start,\n end_index=orig_doc_end,\n )\n ) # ### start_end_index...Make span indices inclusive\n # if we didn't include the empty option in the n-best, include it\n if version_2_with_negative:\n if \"\" not in seen_predictions:\n nbest.append(\n _NbestPrediction(\n text=\"\",\n start_logit=null_start_logit,\n end_logit=null_end_logit,\n start_index=0,\n end_index=0,\n )\n ) # ### start_end_index should this be pred.<index>\n\n # In very rare edge cases we could only have single null prediction.\n # So we just create a nonce prediction in this case to avoid failure.\n if len(nbest) == 1:\n nbest.insert(\n 0,\n _NbestPrediction(\n text=\"empty\",\n start_logit=0.0,\n end_logit=0.0,\n start_index=0.0,\n end_index=0.0,\n ),\n ) # ### start_end_index\n\n # In very rare edge cases we could have no valid predictions. So we\n # just create a nonce prediction in this case to avoid failure.\n if not nbest:\n nbest.append(\n _NbestPrediction(\n text=\"empty\",\n start_logit=0.0,\n end_logit=0.0,\n start_index=0.0,\n end_index=0.0,\n )\n ) # ### start_end_index\n\n assert len(nbest) >= 1\n\n total_scores = []\n best_non_null_entry = None\n for entry in nbest:\n total_scores.append(entry.start_logit + entry.end_logit)\n if not best_non_null_entry:\n if entry.text:\n best_non_null_entry = entry\n\n probs = _compute_softmax(total_scores)\n\n nbest_json = []\n for (i, entry) in enumerate(nbest):\n output = collections.OrderedDict()\n output[\"text\"] = entry.text\n output[\"probability\"] = probs[i]\n output[\"start_logit\"] = entry.start_logit\n output[\"end_logit\"] = entry.end_logit\n output[\n \"start_index\"\n ] = (\n entry.start_index\n ) # ### start_end_index MAGIC NUMBERS for adjustment :/\n output[\"end_index\"] = entry.end_index\n nbest_json.append(output)\n\n assert len(nbest_json) >= 1\n\n if not version_2_with_negative:\n all_predictions[example.qas_id] = nbest_json[0][\"text\"]\n else:\n # predict \"\" iff the null score - the score of best non-null > threshold\n score_diff = (\n score_null\n - best_non_null_entry.start_logit\n - (best_non_null_entry.end_logit)\n )\n scores_diff_json[example.qas_id] = score_diff\n if score_diff > null_score_diff_threshold:\n all_predictions[example.qas_id] = \"\"\n else:\n all_predictions[example.qas_id] = best_non_null_entry.text\n all_nbest_json[example.qas_id] = nbest_json\n\n # All ids set as None so get rid of None Key\n all_predictions = all_predictions[None]\n all_nbest_json = all_nbest_json[None]\n\n return all_predictions, all_nbest_json\n\n def predict(\n self, query: str, context: str, n_best_size: int = 20\n ) -> Tuple[str, List[OrderedDict]]:\n \"\"\" Predicts top_n answer spans of query in regards to context\n\n Args:\n query: The question\n context: The context of which the question is asking\n top_n: The top n answers returned\n\n Returns:\n Either a list of string answers or a dict of the results\n \"\"\"\n self._load() if not self.pretrained_model or not self.pretrained_tokenizer else None\n\n # Load and Evaluate Context Queries\n dataset, examples, features = self._load_one_query(query, context)\n eval_sampler = SequentialSampler(dataset)\n eval_dataloader = DataLoader(dataset, sampler=eval_sampler, batch_size=5)\n all_results = []\n for batch in eval_dataloader:\n self.pretrained_model.eval()\n batch = tuple(t.to(self.device) for t in batch)\n\n with torch.no_grad():\n # BERT XLM XLNET DIFFERENCE\n inputs = {\n \"input_ids\": batch[0],\n \"attention_mask\": batch[1],\n \"token_type_ids\": batch[2],\n }\n example_indices = batch[3]\n outputs = self.pretrained_model(**inputs)\n\n for i, example_index in enumerate(example_indices):\n eval_feature = features[example_index.item()]\n unique_id = int(eval_feature.unique_id)\n # BERT XLM XLNET DIFFERENCE\n result = RawResult(\n unique_id=unique_id,\n start_logits=self._to_list(outputs[0][i]),\n end_logits=self._to_list(outputs[1][i]),\n )\n all_results.append(result)\n\n # Obtain Concrete Predictions\n all_predictions, all_nbest_json = self._produce_concrete_predictions(\n examples, features, all_results, n_best_size=n_best_size\n )\n return all_predictions, all_nbest_json\n\n\nclass XLNetQuestionAnsweringModel(QuestionAnsweringModel):\n def __init__(self):\n self.config = XLNetConfig\n self.tokenizer = XLNetTokenizer\n self.model = XLNetForQuestionAnswering\n self.model_names = list(self.config.pretrained_config_archive_map.keys())\n\n # Post Load\n self.pretrained_config = None\n self.pretrained_tokenizer = None\n self.pretrained_model = None\n\n self.device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n def _to_list(self, tensor: torch.Tensor) -> List[float]:\n return tensor.detach().cpu().tolist()\n\n def _load(self) -> None:\n print(\"Loading Pretrained XLNet Question Answering Model...\")\n model_name = \"xlnet-large-cased\"\n self.pretrained_config = self.config.from_pretrained(\"xlnet-large-cased\")\n tokenizer = self.tokenizer.from_pretrained(\n \"xlnet-large-cased\", do_lower_case=False\n )\n self.pretrained_tokenizer = tokenizer\n\n model = self.model.from_pretrained(\n model_name,\n from_tf=bool(\".ckpt\" in model_name),\n config=self.pretrained_config,\n )\n self.pretrained_model = model\n self.pretrained_model.to(self.device)\n\n def _load_one_query(\n self, query: str, context: str, output_examples=True\n ) -> Union[TensorDataset, List[SquadExample], List[InputFeatures]]:\n # Create doc_tokens for SquadExample with one query and context\n\n def is_whitespace(c):\n if c == \" \" or c == \"\\t\" or c == \"\\r\" or c == \"\\n\" or ord(c) == 0x202F:\n return True\n return False\n\n # Create doc_tokens\n doc_tokens = []\n prev_is_whitespace = True\n for c in context:\n if is_whitespace(c):\n prev_is_whitespace = True\n else:\n if prev_is_whitespace:\n doc_tokens.append(c)\n else:\n doc_tokens[-1] += c\n prev_is_whitespace = False\n\n # Create SquadExample\n examples = []\n example = SquadExample(\n qas_id=None,\n question_text=query,\n doc_tokens=doc_tokens,\n orig_answer_text=None,\n start_position=None,\n end_position=None,\n is_impossible=False,\n )\n examples.append(example)\n\n # Convert to features\n features = convert_examples_to_features(\n examples=examples,\n tokenizer=self.pretrained_tokenizer,\n max_seq_length=384,\n doc_stride=128,\n max_query_length=64,\n is_training=False,\n )\n\n # Convert to Tensors and build dataset\n all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)\n all_input_mask = torch.tensor(\n [f.input_mask for f in features], dtype=torch.long\n )\n all_segment_ids = torch.tensor(\n [f.segment_ids for f in features], dtype=torch.long\n )\n all_cls_index = torch.tensor([f.cls_index for f in features], dtype=torch.long)\n all_p_mask = torch.tensor([f.p_mask for f in features], dtype=torch.float)\n\n all_example_index = torch.arange(all_input_ids.size(0), dtype=torch.long)\n dataset = TensorDataset(\n all_input_ids,\n all_input_mask,\n all_segment_ids,\n all_example_index,\n all_cls_index,\n all_p_mask,\n )\n\n if output_examples:\n return dataset, examples, features\n return dataset\n\n def _produce_concrete_predictions(\n self,\n all_examples,\n all_features,\n all_results,\n n_best_size=10,\n max_answer_length=30,\n verbose_logging=False,\n ):\n\n start_n_top = self.pretrained_model.config.start_n_top\n end_n_top = self.pretrained_model.config.end_n_top\n tokenizer = self.pretrained_tokenizer\n\n _PrelimPrediction = collections.namedtuple( # pylint: disable=invalid-name\n \"PrelimPrediction\",\n [\n \"feature_index\",\n \"start_index\",\n \"end_index\",\n \"start_log_prob\",\n \"end_log_prob\",\n ],\n )\n\n _NbestPrediction = collections.namedtuple( # pylint: disable=invalid-name\n \"NbestPrediction\", [\"text\", \"start_log_prob\", \"end_log_prob\"]\n )\n\n example_index_to_features = collections.defaultdict(list)\n for feature in all_features:\n example_index_to_features[feature.example_index].append(feature)\n\n unique_id_to_result = {}\n for result in all_results:\n unique_id_to_result[result.unique_id] = result\n\n all_predictions = collections.OrderedDict()\n all_nbest_json = collections.OrderedDict()\n scores_diff_json = collections.OrderedDict()\n\n for (example_index, example) in enumerate(all_examples):\n features = example_index_to_features[example_index]\n\n prelim_predictions = []\n # keep track of the minimum score of null start+end of position 0\n score_null = 1000000 # large and positive\n\n for (feature_index, feature) in enumerate(features):\n result = unique_id_to_result[feature.unique_id]\n\n cur_null_score = result.cls_logits\n\n # if we could have irrelevant answers, get the min score of irrelevant\n score_null = min(score_null, cur_null_score)\n\n for i in range(start_n_top):\n for j in range(end_n_top):\n start_log_prob = result.start_top_log_probs[i]\n start_index = result.start_top_index[i]\n\n j_index = i * end_n_top + j\n\n end_log_prob = result.end_top_log_probs[j_index]\n end_index = result.end_top_index[j_index]\n\n # We could hypothetically create invalid predictions, e.g., predict\n # that the start of the span is in the question. We throw out all\n # invalid predictions.\n if start_index >= feature.paragraph_len - 1:\n continue\n if end_index >= feature.paragraph_len - 1:\n continue\n\n if not feature.token_is_max_context.get(start_index, False):\n continue\n if end_index < start_index:\n continue\n length = end_index - start_index + 1\n if length > max_answer_length:\n continue\n\n prelim_predictions.append(\n _PrelimPrediction(\n feature_index=feature_index,\n start_index=start_index,\n end_index=end_index,\n start_log_prob=start_log_prob,\n end_log_prob=end_log_prob,\n )\n )\n\n prelim_predictions = sorted(\n prelim_predictions,\n key=lambda x: (x.start_log_prob + x.end_log_prob),\n reverse=True,\n )\n\n seen_predictions = {}\n nbest = []\n for pred in prelim_predictions:\n if len(nbest) >= n_best_size:\n break\n feature = features[pred.feature_index]\n\n # XLNet un-tokenizer\n # Let's keep it simple for now and see if we need all this later.\n #\n # tok_start_to_orig_index = feature.tok_start_to_orig_index\n # tok_end_to_orig_index = feature.tok_end_to_orig_index\n # start_orig_pos = tok_start_to_orig_index[pred.start_index]\n # end_orig_pos = tok_end_to_orig_index[pred.end_index]\n # paragraph_text = example.paragraph_text\n # final_text = paragraph_text[start_orig_pos: end_orig_pos + 1].strip()\n\n # Previously used Bert untokenizer\n tok_tokens = feature.tokens[pred.start_index : (pred.end_index + 1)]\n orig_doc_start = feature.token_to_orig_map[pred.start_index]\n orig_doc_end = feature.token_to_orig_map[pred.end_index]\n orig_tokens = example.doc_tokens[orig_doc_start : (orig_doc_end + 1)]\n tok_text = tokenizer.convert_tokens_to_string(tok_tokens)\n\n # Clean whitespace\n tok_text = tok_text.strip()\n tok_text = \" \".join(tok_text.split())\n orig_text = \" \".join(orig_tokens)\n\n final_text = get_final_text(\n tok_text, orig_text, tokenizer.do_lower_case, verbose_logging\n )\n\n if final_text in seen_predictions:\n continue\n\n seen_predictions[final_text] = True\n\n nbest.append(\n _NbestPrediction(\n text=final_text,\n start_log_prob=pred.start_log_prob,\n end_log_prob=pred.end_log_prob,\n )\n )\n\n # In very rare edge cases we could have no valid predictions. So we\n # just create a nonce prediction in this case to avoid failure.\n if not nbest:\n nbest.append(\n _NbestPrediction(text=\"\", start_log_prob=-1e6, end_log_prob=-1e6)\n )\n\n total_scores = []\n best_non_null_entry = None\n for entry in nbest:\n total_scores.append(entry.start_log_prob + entry.end_log_prob)\n if not best_non_null_entry:\n best_non_null_entry = entry\n\n probs = _compute_softmax(total_scores)\n\n nbest_json = []\n for (i, entry) in enumerate(nbest):\n output = collections.OrderedDict()\n output[\"text\"] = entry.text\n output[\"probability\"] = probs[i]\n output[\"start_log_prob\"] = entry.start_log_prob\n output[\"end_log_prob\"] = entry.end_log_prob\n nbest_json.append(output)\n\n assert len(nbest_json) >= 1\n assert best_non_null_entry is not None\n\n score_diff = score_null\n scores_diff_json[example.qas_id] = score_diff\n # note(zhiliny): always predict best_non_null_entry\n # and the evaluation script will search for the best threshold\n all_predictions[example.qas_id] = best_non_null_entry.text\n\n all_nbest_json[example.qas_id] = nbest_json\n\n \"\"\"\n if version_2_with_negative:\n with open(output_null_log_odds_file, \"w\") as writer:\n writer.write(json.dumps(scores_diff_json, indent=4) + \"\\n\")\n\n with open(orig_data_file, \"r\", encoding='utf-8') as reader:\n orig_data = json.load(reader)[\"data\"]\n \"\"\"\n\n # All ids set as None so get rid of None Key\n all_predictions = all_predictions[None]\n all_nbest_json = all_nbest_json[None]\n\n return all_predictions, all_nbest_json\n\n def predict(\n self, query: str, context: str, n_best_size: int = 20, as_dict: bool = False\n ) -> Union[List[str], dict]:\n \"\"\" Predicts top_n answer spans of query in regards to context\n\n Args:\n query: The question\n context: The context of which the question is asking\n top_n: The top n answers returned\n as_dict: Returns answer in dict format if True\n\n Returns:\n Either a list of string answers or a dict of the results\n \"\"\"\n self._load() if not self.pretrained_model or not self.pretrained_tokenizer else None\n\n # Load and Evaluate Context Queries\n dataset, examples, features = self._load_one_query(query, context)\n eval_sampler = SequentialSampler(dataset)\n eval_dataloader = DataLoader(dataset, sampler=eval_sampler, batch_size=5)\n all_results = []\n for batch in eval_dataloader:\n self.pretrained_model.eval()\n batch = tuple(t.to(self.device) for t in batch)\n\n with torch.no_grad():\n # BERT XLM XLNET DIFFERENCE\n inputs = {\n \"input_ids\": batch[0],\n \"attention_mask\": batch[1],\n \"token_type_ids\": batch[2],\n \"cls_index\": batch[4],\n \"p_mask\": batch[5],\n }\n example_indices = batch[3]\n outputs = self.pretrained_model(**inputs)\n\n for i, example_index in enumerate(example_indices):\n eval_feature = features[example_index.item()]\n unique_id = int(eval_feature.unique_id)\n # BERT XLM XLNET DIFFERENCE\n result = RawResultExtended(\n unique_id=unique_id,\n start_top_log_probs=self._to_list(outputs[0][i]),\n start_top_index=self._to_list(outputs[1][i]),\n end_top_log_probs=self._to_list(outputs[2][i]),\n end_top_index=self._to_list(outputs[3][i]),\n cls_logits=self._to_list(outputs[4][i]),\n )\n all_results.append(result)\n\n # Obtain Concrete Predictions\n all_predictions, all_nbest_json = self._produce_concrete_predictions(\n examples, features, all_results, n_best_size=n_best_size\n )\n return all_predictions, all_nbest_json\n\n\nclass XLMQuestionAnsweringModel(QuestionAnsweringModel):\n def __init__(self):\n self.config = XLMConfig\n self.tokenizer = XLMTokenizer\n self.model = XLMForQuestionAnswering\n self.model_names = list(self.config.pretrained_config_archive_map.keys())\n\n # Post Load\n self.pretrained_config = None\n self.pretrained_tokenizer = None\n self.pretrained_model = None\n\n self.device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n def _to_list(self, tensor: torch.Tensor) -> List[float]:\n return tensor.detach().cpu().tolist()\n\n def _load(self) -> None:\n print(\"Loading Pretrained XLNet Question Answering Model...\")\n model_name = \"xlm-mlm-en-2048\"\n self.pretrained_config = self.config.from_pretrained(\"xlm-mlm-en-2048\")\n tokenizer = self.tokenizer.from_pretrained(\n \"xlm-mlm-en-2048\", do_lower_case=False\n )\n self.pretrained_tokenizer = tokenizer\n\n model = self.model.from_pretrained(\n model_name,\n from_tf=bool(\".ckpt\" in model_name),\n config=self.pretrained_config,\n )\n self.pretrained_model = model\n self.pretrained_model.to(self.device)\n\n def _load_one_query(\n self, query: str, context: str, output_examples=True\n ) -> Union[TensorDataset, List[SquadExample], List[InputFeatures]]:\n # Create doc_tokens for SquadExample with one query and context\n\n def is_whitespace(c):\n if c == \" \" or c == \"\\t\" or c == \"\\r\" or c == \"\\n\" or ord(c) == 0x202F:\n return True\n return False\n\n # Create doc_tokens\n doc_tokens = []\n prev_is_whitespace = True\n for c in context:\n if is_whitespace(c):\n prev_is_whitespace = True\n else:\n if prev_is_whitespace:\n doc_tokens.append(c)\n else:\n doc_tokens[-1] += c\n prev_is_whitespace = False\n\n # Create SquadExample\n examples = []\n example = SquadExample(\n qas_id=None,\n question_text=query,\n doc_tokens=doc_tokens,\n orig_answer_text=None,\n start_position=None,\n end_position=None,\n is_impossible=False,\n )\n examples.append(example)\n\n # Convert to features\n features = convert_examples_to_features(\n examples=examples,\n tokenizer=self.pretrained_tokenizer,\n max_seq_length=384,\n doc_stride=128,\n max_query_length=64,\n is_training=False,\n )\n\n # Convert to Tensors and build dataset\n all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)\n all_input_mask = torch.tensor(\n [f.input_mask for f in features], dtype=torch.long\n )\n all_segment_ids = torch.tensor(\n [f.segment_ids for f in features], dtype=torch.long\n )\n all_cls_index = torch.tensor([f.cls_index for f in features], dtype=torch.long)\n all_p_mask = torch.tensor([f.p_mask for f in features], dtype=torch.float)\n\n all_example_index = torch.arange(all_input_ids.size(0), dtype=torch.long)\n dataset = TensorDataset(\n all_input_ids,\n all_input_mask,\n all_segment_ids,\n all_example_index,\n all_cls_index,\n all_p_mask,\n )\n\n if output_examples:\n return dataset, examples, features\n return dataset\n\n def _produce_concrete_predictions(\n self,\n all_examples,\n all_features,\n all_results,\n n_best_size=10,\n max_answer_length=30,\n verbose_logging=False,\n ):\n\n start_n_top = self.pretrained_model.config.start_n_top\n end_n_top = self.pretrained_model.config.end_n_top\n tokenizer = self.pretrained_tokenizer\n\n _PrelimPrediction = collections.namedtuple( # pylint: disable=invalid-name\n \"PrelimPrediction\",\n [\n \"feature_index\",\n \"start_index\",\n \"end_index\",\n \"start_log_prob\",\n \"end_log_prob\",\n ],\n )\n\n _NbestPrediction = collections.namedtuple( # pylint: disable=invalid-name\n \"NbestPrediction\", [\"text\", \"start_log_prob\", \"end_log_prob\"]\n )\n\n example_index_to_features = collections.defaultdict(list)\n for feature in all_features:\n example_index_to_features[feature.example_index].append(feature)\n\n unique_id_to_result = {}\n for result in all_results:\n unique_id_to_result[result.unique_id] = result\n\n all_predictions = collections.OrderedDict()\n all_nbest_json = collections.OrderedDict()\n scores_diff_json = collections.OrderedDict()\n\n for (example_index, example) in enumerate(all_examples):\n features = example_index_to_features[example_index]\n\n prelim_predictions = []\n # keep track of the minimum score of null start+end of position 0\n score_null = 1000000 # large and positive\n\n for (feature_index, feature) in enumerate(features):\n result = unique_id_to_result[feature.unique_id]\n\n cur_null_score = result.cls_logits\n\n # if we could have irrelevant answers, get the min score of irrelevant\n score_null = min(score_null, cur_null_score)\n\n for i in range(start_n_top):\n for j in range(end_n_top):\n start_log_prob = result.start_top_log_probs[i]\n start_index = result.start_top_index[i]\n\n j_index = i * end_n_top + j\n\n end_log_prob = result.end_top_log_probs[j_index]\n end_index = result.end_top_index[j_index]\n\n # We could hypothetically create invalid predictions, e.g., predict\n # that the start of the span is in the question. We throw out all\n # invalid predictions.\n if start_index >= feature.paragraph_len - 1:\n continue\n if end_index >= feature.paragraph_len - 1:\n continue\n\n if not feature.token_is_max_context.get(start_index, False):\n continue\n if end_index < start_index:\n continue\n length = end_index - start_index + 1\n if length > max_answer_length:\n continue\n\n prelim_predictions.append(\n _PrelimPrediction(\n feature_index=feature_index,\n start_index=start_index,\n end_index=end_index,\n start_log_prob=start_log_prob,\n end_log_prob=end_log_prob,\n )\n )\n\n prelim_predictions = sorted(\n prelim_predictions,\n key=lambda x: (x.start_log_prob + x.end_log_prob),\n reverse=True,\n )\n\n seen_predictions = {}\n nbest = []\n for pred in prelim_predictions:\n if len(nbest) >= n_best_size:\n break\n feature = features[pred.feature_index]\n\n # XLNet un-tokenizer\n # Let's keep it simple for now and see if we need all this later.\n #\n # tok_start_to_orig_index = feature.tok_start_to_orig_index\n # tok_end_to_orig_index = feature.tok_end_to_orig_index\n # start_orig_pos = tok_start_to_orig_index[pred.start_index]\n # end_orig_pos = tok_end_to_orig_index[pred.end_index]\n # paragraph_text = example.paragraph_text\n # final_text = paragraph_text[start_orig_pos: end_orig_pos + 1].strip()\n\n # Previously used Bert untokenizer\n tok_tokens = feature.tokens[pred.start_index : (pred.end_index + 1)]\n orig_doc_start = feature.token_to_orig_map[pred.start_index]\n orig_doc_end = feature.token_to_orig_map[pred.end_index]\n orig_tokens = example.doc_tokens[orig_doc_start : (orig_doc_end + 1)]\n tok_text = tokenizer.convert_tokens_to_string(tok_tokens)\n\n # Clean whitespace\n tok_text = tok_text.strip()\n tok_text = \" \".join(tok_text.split())\n orig_text = \" \".join(orig_tokens)\n\n final_text = get_final_text(\n tok_text,\n orig_text,\n tokenizer, # .do_lower_case, (a XLM problem?)\n verbose_logging,\n )\n\n if final_text in seen_predictions:\n continue\n\n seen_predictions[final_text] = True\n\n nbest.append(\n _NbestPrediction(\n text=final_text,\n start_log_prob=pred.start_log_prob,\n end_log_prob=pred.end_log_prob,\n )\n )\n\n # In very rare edge cases we could have no valid predictions. So we\n # just create a nonce prediction in this case to avoid failure.\n if not nbest:\n nbest.append(\n _NbestPrediction(text=\"\", start_log_prob=-1e6, end_log_prob=-1e6)\n )\n\n total_scores = []\n best_non_null_entry = None\n for entry in nbest:\n total_scores.append(entry.start_log_prob + entry.end_log_prob)\n if not best_non_null_entry:\n best_non_null_entry = entry\n\n probs = _compute_softmax(total_scores)\n\n nbest_json = []\n for (i, entry) in enumerate(nbest):\n output = collections.OrderedDict()\n output[\"text\"] = entry.text\n output[\"probability\"] = probs[i]\n output[\"start_log_prob\"] = entry.start_log_prob\n output[\"end_log_prob\"] = entry.end_log_prob\n nbest_json.append(output)\n\n assert len(nbest_json) >= 1\n assert best_non_null_entry is not None\n\n score_diff = score_null\n scores_diff_json[example.qas_id] = score_diff\n # note(zhiliny): always predict best_non_null_entry\n # and the evaluation script will search for the best threshold\n all_predictions[example.qas_id] = best_non_null_entry.text\n\n all_nbest_json[example.qas_id] = nbest_json\n\n \"\"\"\n if version_2_with_negative:\n with open(output_null_log_odds_file, \"w\") as writer:\n writer.write(json.dumps(scores_diff_json, indent=4) + \"\\n\")\n\n with open(orig_data_file, \"r\", encoding='utf-8') as reader:\n orig_data = json.load(reader)[\"data\"]\n \"\"\"\n\n # All ids set as None so get rid of None Key\n all_predictions = all_predictions[None]\n all_nbest_json = all_nbest_json[None]\n\n return all_predictions, all_nbest_json\n\n def predict(\n self, query: str, context: str, n_best_size: int = 20, as_dict: bool = False\n ) -> Union[List[str], dict]:\n \"\"\" Predicts top_n answer spans of query in regards to context\n\n Args:\n query: The question\n context: The context of which the question is asking\n top_n: The top n answers returned\n as_dict: Returns answer in dict format if True\n\n Returns:\n Either a list of string answers or a dict of the results\n \"\"\"\n self._load() if not self.pretrained_model or not self.pretrained_tokenizer else None\n\n # Load and Evaluate Context Queries\n dataset, examples, features = self._load_one_query(query, context)\n eval_sampler = SequentialSampler(dataset)\n eval_dataloader = DataLoader(dataset, sampler=eval_sampler, batch_size=5)\n all_results = []\n for batch in eval_dataloader:\n self.pretrained_model.eval()\n batch = tuple(t.to(self.device) for t in batch)\n\n with torch.no_grad():\n # BERT XLM XLNET DIFFERENCE\n inputs = {\n \"input_ids\": batch[0],\n \"attention_mask\": batch[1],\n \"token_type_ids\": batch[2],\n \"cls_index\": batch[4],\n \"p_mask\": batch[5],\n }\n example_indices = batch[3]\n outputs = self.pretrained_model(**inputs)\n\n for i, example_index in enumerate(example_indices):\n eval_feature = features[example_index.item()]\n unique_id = int(eval_feature.unique_id)\n # BERT XLM XLNET DIFFERENCE\n result = RawResultExtended(\n unique_id=unique_id,\n start_top_log_probs=self._to_list(outputs[0][i]),\n start_top_index=self._to_list(outputs[1][i]),\n end_top_log_probs=self._to_list(outputs[2][i]),\n end_top_index=self._to_list(outputs[3][i]),\n cls_logits=self._to_list(outputs[4][i]),\n )\n all_results.append(result)\n\n # Obtain Concrete Predictions\n all_predictions, all_nbest_json = self._produce_concrete_predictions(\n examples, features, all_results, n_best_size=n_best_size\n )\n return all_predictions, all_nbest_json\n"
] |
[
[
"numpy.gradient"
],
[
"torch.utils.data.TensorDataset",
"torch.utils.data.SequentialSampler",
"torch.utils.data.DataLoader",
"torch.tensor",
"torch.no_grad",
"torch.cuda.is_available"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
AlexanLee/DeepCTR
|
[
"1ff32c6b0105e3341ddf34e7074e596032bff158"
] |
[
"examples/run_dien.py"
] |
[
"import numpy as np\nimport tensorflow as tf\n\nfrom deepctr.models import DIEN\nfrom deepctr.inputs import SparseFeat, DenseFeat, VarLenSparseFeat, get_feature_names\n\n\ndef get_xy_fd(use_neg=False, hash_flag=False):\n feature_columns = [SparseFeat('user', 3, embedding_dim=10, use_hash=hash_flag),\n SparseFeat('gender', 2, embedding_dim=4, use_hash=hash_flag),\n SparseFeat('item', 3 + 1, embedding_dim=8, use_hash=hash_flag),\n SparseFeat('item_gender', 2 + 1, embedding_dim=4, use_hash=hash_flag),\n DenseFeat('score', 1)]\n\n feature_columns += [\n VarLenSparseFeat('hist_item', maxlen=4, vocabulary_size=3 + 1, embedding_dim=8, embedding_name='item',\n length_name=\"seq_length\"),\n VarLenSparseFeat('hist_item_gender', maxlen=4, vocabulary_size=3 + 1, embedding_dim=4,\n embedding_name='item_gender', length_name=\"seq_length\")]\n\n behavior_feature_list = [\"item\", \"item_gender\"]\n uid = np.array([0, 1, 2])\n ugender = np.array([0, 1, 0])\n iid = np.array([1, 2, 3]) # 0 is mask value\n igender = np.array([1, 2, 1]) # 0 is mask value\n score = np.array([0.1, 0.2, 0.3])\n\n hist_iid = np.array([[1, 2, 3, 0], [1, 2, 3, 0], [1, 2, 0, 0]])\n hist_igender = np.array([[1, 1, 2, 0], [2, 1, 1, 0], [2, 1, 0, 0]])\n\n behavior_length = np.array([3, 3, 2])\n\n feature_dict = {'user': uid, 'gender': ugender, 'item': iid, 'item_gender': igender,\n 'hist_item': hist_iid, 'hist_item_gender': hist_igender,\n 'score': score, \"seq_length\": behavior_length}\n\n if use_neg:\n feature_dict['neg_hist_item'] = np.array([[1, 2, 3, 0], [1, 2, 3, 0], [1, 2, 0, 0]])\n feature_dict['neg_hist_item_gender'] = np.array([[1, 1, 2, 0], [2, 1, 1, 0], [2, 1, 0, 0]])\n feature_columns += [\n VarLenSparseFeat('neg_hist_item', maxlen=4, vocabulary_size=3 + 1, embedding_dim=8, embedding_name='item',\n length_name=\"seq_length\"),\n VarLenSparseFeat('neg_hist_item_gender', maxlen=4, vocabulary_size=3 + 1, embedding_dim=4,\n embedding_name='item_gender', length_name=\"seq_length\")]\n\n x = {name: feature_dict[name] for name in get_feature_names(feature_columns)}\n y = [1, 0, 1]\n return x, y, feature_columns, behavior_feature_list\n\n\nif __name__ == \"__main__\":\n if tf.__version__ >= '2.0.0':\n tf.compat.v1.disable_eager_execution()\n\n x, y, feature_columns, behavior_feature_list = get_xy_fd(use_neg=True)\n model = DIEN(feature_columns, behavior_feature_list,\n dnn_hidden_units=[4, 4, 4], dnn_dropout=0.6, gru_type=\"AUGRU\", use_negsampling=True)\n\n model.compile('adam', 'binary_crossentropy',\n metrics=['binary_crossentropy'])\n history = model.fit(x, y, verbose=1, epochs=10, validation_split=0.5)\n"
] |
[
[
"numpy.array",
"tensorflow.compat.v1.disable_eager_execution"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ColdTeapot273K/bert-extractive-summarizer
|
[
"19c4b5a5790294bbbc5a26e8248655705710ff14"
] |
[
"summarizer/model_processors.py"
] |
[
"from summarizer.bert_parent import BertParent\nfrom summarizer.cluster_features import ClusterFeatures\nfrom summarizer.sentence_handler import SentenceHandler\nfrom typing import List\nfrom abc import abstractmethod\nimport numpy as np\nfrom transformers import *\n\n\nclass ModelProcessor(object):\n\n def __init__(\n self,\n model: str = 'bert-large-uncased',\n custom_model: PreTrainedModel = None,\n custom_tokenizer: PreTrainedTokenizer = None,\n hidden: int = -2,\n reduce_option: str = 'mean',\n sentence_handler: SentenceHandler = SentenceHandler(),\n random_state: int = 12345\n ):\n \"\"\"\n This is the parent Bert Summarizer model. New methods should implement this class\n\n :param model: This parameter is associated with the inherit string parameters from the transformers library.\n :param custom_model: If you have a pre-trained model, you can add the model class here.\n :param custom_tokenizer: If you have a custom tokenizer, you can add the tokenizer here.\n :param hidden: This signifies which layer of the BERT model you would like to use as embeddings.\n :param reduce_option: Given the output of the bert model, this param determines how you want to reduce results.\n :param sentence_handler: The handler to process sentences. If want to use coreference, instantiate and pass CoreferenceHandler instance\n :param random_state: The random state to reproduce summarizations.\n \"\"\"\n\n np.random.seed(random_state)\n self.model = BertParent(model, custom_model, custom_tokenizer)\n self.hidden = hidden\n self.reduce_option = reduce_option\n self.sentence_handler = sentence_handler\n self.random_state = random_state\n\n def process_content_sentences(self, body: str, min_length:int = 40, max_length: int = 600) -> List[str]:\n \"\"\"\n Processes the content sentences with neural coreference.\n :param body: The raw string body to process\n :param min_length: Minimum length that the sentences must be\n :param max_length: Max length that the sentences mus fall under\n :return: Returns a list of sentences with coreference applied.\n \"\"\"\n\n doc = self.nlp(body)._.coref_resolved\n doc = self.nlp(doc)\n return [c.string.strip() for c in doc.sents if max_length > len(c.string.strip()) > min_length]\n\n @abstractmethod\n def run_clusters(\n self,\n content: List[str],\n ratio:float = 0.2,\n algorithm: str = 'kmeans',\n use_first: bool = True\n ) -> List[str]:\n \"\"\"\n Classes must implement this to run the clusters.\n \"\"\"\n raise NotImplementedError(\"Must Implement run_clusters\")\n\n def run(\n self,\n body: str,\n ratio: float = 0.2,\n min_length: int = 40,\n max_length: int = 600,\n use_first: bool = True,\n algorithm: str ='kmeans'\n ) -> str:\n \"\"\"\n Preprocesses the sentences, runs the clusters to find the centroids, then combines the sentences.\n\n :param body: The raw string body to process\n :param ratio: Ratio of sentences to use\n :param min_length: Minimum length of sentence candidates to utilize for the summary.\n :param max_length: Maximum length of sentence candidates to utilize for the summary\n :param use_first: Whether or not to use the first sentence\n :param algorithm: Which clustering algorithm to use. (kmeans, gmm)\n :return: A summary sentence\n \"\"\"\n sentences = self.sentence_handler(body, min_length, max_length)\n\n if sentences:\n sentences = self.run_clusters(sentences, ratio, algorithm, use_first)\n\n return ' '.join(sentences)\n\n def __call__(\n self,\n body: str,\n ratio: float = 0.2,\n min_length: int = 40,\n max_length: int = 600,\n use_first: bool = True,\n algorithm: str = 'kmeans'\n ) -> str:\n \"\"\"\n (utility that wraps around the run function)\n\n Preprocesses the sentences, runs the clusters to find the centroids, then combines the sentences.\n\n :param body: The raw string body to process\n :param ratio: Ratio of sentences to use\n :param min_length: Minimum length of sentence candidates to utilize for the summary.\n :param max_length: Maximum length of sentence candidates to utilize for the summary\n :param use_first: Whether or not to use the first sentence\n :param algorithm: Which clustering algorithm to use. (kmeans, gmm)\n :return: A summary sentence\n \"\"\"\n return self.run(body, ratio, min_length, max_length, algorithm=algorithm, use_first=use_first)\n\n\nclass SingleModel(ModelProcessor):\n \"\"\"\n Deprecated for naming sake.\n \"\"\"\n\n def __init__(\n self,\n model='bert-large-uncased',\n custom_model: PreTrainedModel = None,\n custom_tokenizer: PreTrainedTokenizer = None,\n hidden: int=-2,\n reduce_option: str = 'mean',\n sentence_handler: SentenceHandler = SentenceHandler(),\n random_state: int=12345\n ):\n super(SingleModel, self).__init__(\n model=model, custom_model=custom_model, custom_tokenizer=custom_tokenizer,\n hidden=hidden, reduce_option=reduce_option,\n sentence_handler=sentence_handler, random_state=random_state\n )\n\n def run_clusters(self, content: List[str], ratio=0.2, algorithm='kmeans', use_first: bool= True) -> List[str]:\n hidden = self.model(content, self.hidden, self.reduce_option)\n hidden_args = ClusterFeatures(hidden, algorithm, random_state=self.random_state).cluster(ratio)\n\n if use_first:\n if hidden_args[0] != 0:\n hidden_args.insert(0,0)\n\n return [content[j] for j in hidden_args]\n\n\nclass Summarizer(SingleModel):\n\n def __init__(\n self,\n model: str = 'bert-large-uncased',\n custom_model: PreTrainedModel = None,\n custom_tokenizer: PreTrainedTokenizer = None,\n hidden: int = -2,\n reduce_option: str = 'mean',\n sentence_handler: SentenceHandler = SentenceHandler(),\n random_state: int = 12345\n ):\n \"\"\"\n This is the main Bert Summarizer class.\n\n :param model: This parameter is associated with the inherit string parameters from the transformers library.\n :param custom_model: If you have a pre-trained model, you can add the model class here.\n :param custom_tokenizer: If you have a custom tokenizer, you can add the tokenizer here.\n :param hidden: This signifies which layer of the BERT model you would like to use as embeddings.\n :param reduce_option: Given the output of the bert model, this param determines how you want to reduce results.\n :param greedyness: associated with the neuralcoref library. Determines how greedy coref should be.\n :param language: Which language to use for training.\n :param random_state: The random state to reproduce summarizations.\n \"\"\"\n super(Summarizer, self).__init__(\n model, custom_model, custom_tokenizer, hidden, reduce_option, sentence_handler, random_state\n )\n\n\nclass TransformerSummarizer(SingleModel):\n\n MODEL_DICT = {\n 'Bert': (BertModel, BertTokenizer),\n 'OpenAIGPT': (OpenAIGPTModel, OpenAIGPTTokenizer),\n 'GPT2': (GPT2Model, GPT2Tokenizer),\n 'CTRL': (CTRLModel, CTRLTokenizer),\n 'TransfoXL': (TransfoXLModel, TransfoXLTokenizer),\n 'XLNet': (XLNetModel, XLNetTokenizer),\n 'XLM': (XLMModel, XLMTokenizer),\n 'DistilBert': (DistilBertModel, DistilBertTokenizer),\n }\n\n def __init__(\n self,\n transformer_type: str = 'Bert',\n transformer_model_key: str = 'bert-base-uncased',\n transformer_tokenizer_key: str = None,\n hidden: int = -2,\n reduce_option: str = 'mean',\n sentence_handler: SentenceHandler = SentenceHandler(),\n random_state: int = 12345\n ):\n try:\n self.MODEL_DICT['Roberta'] = (RobertaModel, RobertaTokenizer)\n self.MODEL_DICT['Albert'] = (AlbertModel, AlbertTokenizer)\n self.MODEL_DICT['Camembert'] = (CamembertModel, CamembertTokenizer)\n except Exception as e:\n pass # older transformer version\n\n model_clz, tokenizer_clz = self.MODEL_DICT[transformer_type]\n model = model_clz.from_pretrained(transformer_model_key, output_hidden_states=True)\n tokenizer = tokenizer_clz.from_pretrained(\n transformer_tokenizer_key if transformer_tokenizer_key is not None else transformer_model_key\n )\n super().__init__(\n None, model, tokenizer, hidden, reduce_option, sentence_handler, random_state\n )\n"
] |
[
[
"numpy.random.seed"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
sajastu/abs-summarization
|
[
"9d4b35b457cfd617965ed1fab68c173c98333439",
"9d4b35b457cfd617965ed1fab68c173c98333439"
] |
[
"onmt/utils/loss.py",
"onmt/tests/test_models.py"
] |
[
"\"\"\"\nThis includes: LossComputeBase and the standard NMTLossCompute, and\n sharded loss compute stuff.\n\"\"\"\nfrom __future__ import division\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nimport onmt\nfrom onmt.modules.sparse_losses import SparsemaxLoss\nfrom onmt.modules.sparse_activations import LogSparsemax\n\n\ndef build_loss_compute(model, tgt_field, opt, train=True):\n \"\"\"\n Returns a LossCompute subclass which wraps around an nn.Module subclass\n (such as nn.NLLLoss) which defines the loss criterion. The LossCompute\n object allows this loss to be computed in shards and passes the relevant\n data to a Statistics object which handles training/validation logging.\n Currently, the NMTLossCompute class handles all loss computation except\n for when using a copy mechanism.\n \"\"\"\n device = torch.device(\"cuda\" if onmt.utils.misc.use_gpu(opt) else \"cpu\")\n\n padding_idx = tgt_field.vocab.stoi[tgt_field.pad_token]\n unk_idx = tgt_field.vocab.stoi[tgt_field.unk_token]\n if opt.copy_attn:\n criterion = onmt.modules.CopyGeneratorLoss(\n len(tgt_field.vocab), opt.copy_attn_force,\n unk_index=unk_idx, ignore_index=padding_idx\n )\n elif opt.label_smoothing > 0 and train:\n criterion = LabelSmoothingLoss(\n opt.label_smoothing, len(tgt_field.vocab), ignore_index=padding_idx\n )\n elif isinstance(model.generator[-1], LogSparsemax):\n criterion = SparsemaxLoss(ignore_index=padding_idx, reduction='sum')\n else:\n criterion = nn.NLLLoss(ignore_index=padding_idx, reduction='sum')\n\n # if the loss function operates on vectors of raw logits instead of\n # probabilities, only the first part of the generator needs to be\n # passed to the NMTLossCompute. At the moment, the only supported\n # loss function of this kind is the sparsemax loss.\n use_raw_logits = isinstance(criterion, SparsemaxLoss)\n loss_gen = model.generator[0] if use_raw_logits else model.generator\n if opt.copy_attn:\n compute = onmt.modules.CopyGeneratorLossCompute(\n criterion, loss_gen, tgt_field.vocab, opt.copy_loss_by_seqlength\n )\n else:\n compute = NMTLossCompute(criterion, loss_gen)\n compute.to(device)\n\n return compute\n\n\nclass LossComputeBase(nn.Module):\n \"\"\"\n Class for managing efficient loss computation. Handles\n sharding next step predictions and accumulating multiple\n loss computations\n\n Users can implement their own loss computation strategy by making\n subclass of this one. Users need to implement the _compute_loss()\n and make_shard_state() methods.\n\n Args:\n generator (:obj:`nn.Module`) :\n module that maps the output of the decoder to a\n distribution over the target vocabulary.\n tgt_vocab (:obj:`Vocab`) :\n torchtext vocab object representing the target output\n normalzation (str): normalize by \"sents\" or \"tokens\"\n \"\"\"\n\n def __init__(self, criterion, generator):\n super(LossComputeBase, self).__init__()\n self.criterion = criterion\n self.generator = generator\n\n @property\n def padding_idx(self):\n return self.criterion.ignore_index\n\n def _make_shard_state(self, batch, output, range_, attns=None):\n \"\"\"\n Make shard state dictionary for shards() to return iterable\n shards for efficient loss computation. Subclass must define\n this method to match its own _compute_loss() interface.\n Args:\n batch: the current batch.\n output: the predict output from the model.\n range_: the range of examples for computing, the whole\n batch or a trunc of it?\n attns: the attns dictionary returned from the model.\n \"\"\"\n return NotImplementedError\n\n def _compute_loss(self, batch, output, target, **kwargs):\n \"\"\"\n Compute the loss. Subclass must define this method.\n\n Args:\n\n batch: the current batch.\n output: the predict output from the model.\n target: the validate target to compare output with.\n **kwargs(optional): additional info for computing loss.\n \"\"\"\n return NotImplementedError\n\n def __call__(self,\n batch,\n output,\n attns,\n normalization=1.0,\n shard_size=0,\n trunc_start=0,\n trunc_size=None):\n \"\"\"Compute the forward loss, possibly in shards in which case this\n method also runs the backward pass and returns ``None`` as the loss\n value.\n\n Also supports truncated BPTT for long sequences by taking a\n range in the decoder output sequence to back propagate in.\n Range is from `(trunc_start, trunc_start + trunc_size)`.\n\n Note sharding is an exact efficiency trick to relieve memory\n required for the generation buffers. Truncation is an\n approximate efficiency trick to relieve the memory required\n in the RNN buffers.\n\n Args:\n batch (batch) : batch of labeled examples\n output (:obj:`FloatTensor`) :\n output of decoder model `[tgt_len x batch x hidden]`\n attns (dict) : dictionary of attention distributions\n `[tgt_len x batch x src_len]`\n normalization: Optional normalization factor.\n shard_size (int) : maximum number of examples in a shard\n trunc_start (int) : starting position of truncation window\n trunc_size (int) : length of truncation window\n\n Returns:\n A tuple with the loss and a :obj:`onmt.utils.Statistics` instance.\n \"\"\"\n if trunc_size is None:\n trunc_size = batch.tgt.size(0) - trunc_start\n trunc_range = (trunc_start, trunc_start + trunc_size)\n shard_state = self._make_shard_state(batch, output, trunc_range, attns)\n if shard_size == 0:\n loss, stats = self._compute_loss(batch, **shard_state)\n return loss / float(normalization), stats\n batch_stats = onmt.utils.Statistics()\n for shard in shards(shard_state, shard_size):\n loss, stats = self._compute_loss(batch, **shard)\n loss.div(float(normalization)).backward()\n batch_stats.update(stats)\n return None, batch_stats\n\n def _stats(self, loss, scores, target):\n \"\"\"\n Args:\n loss (:obj:`FloatTensor`): the loss computed by the loss criterion.\n scores (:obj:`FloatTensor`): a score for each possible output\n target (:obj:`FloatTensor`): true targets\n\n Returns:\n :obj:`onmt.utils.Statistics` : statistics for this batch.\n \"\"\"\n pred = scores.max(1)[1]\n non_padding = target.ne(self.padding_idx)\n num_correct = pred.eq(target).masked_select(non_padding).sum().item()\n num_non_padding = non_padding.sum().item()\n return onmt.utils.Statistics(loss.item(), num_non_padding, num_correct)\n\n def _bottle(self, _v):\n return _v.view(-1, _v.size(2))\n\n def _unbottle(self, _v, batch_size):\n return _v.view(-1, batch_size, _v.size(1))\n\n\nclass LabelSmoothingLoss(nn.Module):\n \"\"\"\n With label smoothing,\n KL-divergence between q_{smoothed ground truth prob.}(w)\n and p_{prob. computed by model}(w) is minimized.\n \"\"\"\n def __init__(self, label_smoothing, tgt_vocab_size, ignore_index=-100):\n assert 0.0 < label_smoothing <= 1.0\n self.ignore_index = ignore_index\n super(LabelSmoothingLoss, self).__init__()\n\n smoothing_value = label_smoothing / (tgt_vocab_size - 2)\n one_hot = torch.full((tgt_vocab_size,), smoothing_value)\n one_hot[self.ignore_index] = 0\n self.register_buffer('one_hot', one_hot.unsqueeze(0))\n\n self.confidence = 1.0 - label_smoothing\n\n def forward(self, output, target):\n \"\"\"\n output (FloatTensor): batch_size x n_classes\n target (LongTensor): batch_size\n \"\"\"\n model_prob = self.one_hot.repeat(target.size(0), 1)\n model_prob.scatter_(1, target.unsqueeze(1), self.confidence)\n model_prob.masked_fill_((target == self.ignore_index).unsqueeze(1), 0)\n\n return F.kl_div(output, model_prob, reduction='sum')\n\n\nclass NMTLossCompute(LossComputeBase):\n \"\"\"\n Standard NMT Loss Computation.\n \"\"\"\n\n def __init__(self, criterion, generator, normalization=\"sents\"):\n super(NMTLossCompute, self).__init__(criterion, generator)\n\n def _make_shard_state(self, batch, output, range_, attns=None):\n return {\n \"output\": output,\n \"target\": batch.tgt[range_[0] + 1: range_[1], :, 0],\n }\n\n def _compute_loss(self, batch, output, target):\n bottled_output = self._bottle(output)\n\n scores = self.generator(bottled_output)\n gtruth = target.view(-1)\n\n loss = self.criterion(scores, gtruth)\n stats = self._stats(loss.clone(), scores, gtruth)\n\n return loss, stats\n\n\ndef filter_shard_state(state, shard_size=None):\n for k, v in state.items():\n if shard_size is None:\n yield k, v\n\n if v is not None:\n v_split = []\n if isinstance(v, torch.Tensor):\n for v_chunk in torch.split(v, shard_size):\n v_chunk = v_chunk.data.clone()\n v_chunk.requires_grad = v.requires_grad\n v_split.append(v_chunk)\n yield k, (v, v_split)\n\n\ndef shards(state, shard_size, eval_only=False):\n \"\"\"\n Args:\n state: A dictionary which corresponds to the output of\n *LossCompute._make_shard_state(). The values for\n those keys are Tensor-like or None.\n shard_size: The maximum size of the shards yielded by the model.\n eval_only: If True, only yield the state, nothing else.\n Otherwise, yield shards.\n\n Yields:\n Each yielded shard is a dict.\n\n Side effect:\n After the last shard, this function does back-propagation.\n \"\"\"\n if eval_only:\n yield filter_shard_state(state)\n else:\n # non_none: the subdict of the state dictionary where the values\n # are not None.\n non_none = dict(filter_shard_state(state, shard_size))\n\n # Now, the iteration:\n # state is a dictionary of sequences of tensor-like but we\n # want a sequence of dictionaries of tensors.\n # First, unzip the dictionary into a sequence of keys and a\n # sequence of tensor-like sequences.\n keys, values = zip(*((k, [v_chunk for v_chunk in v_split])\n for k, (_, v_split) in non_none.items()))\n\n # Now, yield a dictionary for each shard. The keys are always\n # the same. values is a sequence of length #keys where each\n # element is a sequence of length #shards. We want to iterate\n # over the shards, not over the keys: therefore, the values need\n # to be re-zipped by shard and then each shard can be paired\n # with the keys.\n for shard_tensors in zip(*values):\n yield dict(zip(keys, shard_tensors))\n\n # Assumed backprop'd\n variables = []\n for k, (v, v_split) in non_none.items():\n if isinstance(v, torch.Tensor) and state[k].requires_grad:\n variables.extend(zip(torch.split(state[k], shard_size),\n [v_chunk.grad for v_chunk in v_split]))\n inputs, grads = zip(*variables)\n torch.autograd.backward(inputs, grads)\n",
"import copy\nimport unittest\nimport math\n\nimport torch\n\nimport onmt\nimport onmt.inputters\nimport onmt.opts\nfrom onmt.model_builder import build_embeddings, \\\n build_encoder, build_decoder\nfrom onmt.encoders.image_encoder import ImageEncoder\nfrom onmt.encoders.audio_encoder import AudioEncoder\nfrom onmt.utils.parse import ArgumentParser\n\nparser = ArgumentParser(description='train.py')\nonmt.opts.model_opts(parser)\nonmt.opts.train_opts(parser)\n\n# -data option is required, but not used in this test, so dummy.\nopt = parser.parse_known_args(['-data', 'dummy'])[0]\n\n\nclass TestModel(unittest.TestCase):\n\n def __init__(self, *args, **kwargs):\n super(TestModel, self).__init__(*args, **kwargs)\n self.opt = opt\n\n def get_field(self):\n src = onmt.inputters.get_fields(\"text\", 0, 0)[\"src\"]\n src.base_field.build_vocab([])\n return src\n\n def get_batch(self, source_l=3, bsize=1):\n # len x batch x nfeat\n test_src = torch.ones(source_l, bsize, 1).long()\n test_tgt = torch.ones(source_l, bsize, 1).long()\n test_length = torch.ones(bsize).fill_(source_l).long()\n return test_src, test_tgt, test_length\n\n def get_batch_image(self, tgt_l=3, bsize=1, h=15, w=17):\n # batch x c x h x w\n test_src = torch.ones(bsize, 3, h, w).float()\n test_tgt = torch.ones(tgt_l, bsize, 1).long()\n test_length = None\n return test_src, test_tgt, test_length\n\n def get_batch_audio(self, tgt_l=7, bsize=3, sample_rate=5500,\n window_size=0.03, t=37):\n # batch x 1 x nfft x t\n nfft = int(math.floor((sample_rate * window_size) / 2) + 1)\n test_src = torch.ones(bsize, 1, nfft, t).float()\n test_tgt = torch.ones(tgt_l, bsize, 1).long()\n test_length = torch.ones(bsize).long().fill_(tgt_l)\n return test_src, test_tgt, test_length\n\n def embeddings_forward(self, opt, source_l=3, bsize=1):\n '''\n Tests if the embeddings works as expected\n\n args:\n opt: set of options\n source_l: Length of generated input sentence\n bsize: Batchsize of generated input\n '''\n word_field = self.get_field()\n emb = build_embeddings(opt, word_field)\n test_src, _, __ = self.get_batch(source_l=source_l, bsize=bsize)\n if opt.decoder_type == 'transformer':\n input = torch.cat([test_src, test_src], 0)\n res = emb(input)\n compare_to = torch.zeros(source_l * 2, bsize,\n opt.src_word_vec_size)\n else:\n res = emb(test_src)\n compare_to = torch.zeros(source_l, bsize, opt.src_word_vec_size)\n\n self.assertEqual(res.size(), compare_to.size())\n\n def encoder_forward(self, opt, source_l=3, bsize=1):\n '''\n Tests if the encoder works as expected\n\n args:\n opt: set of options\n source_l: Length of generated input sentence\n bsize: Batchsize of generated input\n '''\n if opt.rnn_size > 0:\n opt.enc_rnn_size = opt.rnn_size\n word_field = self.get_field()\n embeddings = build_embeddings(opt, word_field)\n enc = build_encoder(opt, embeddings)\n\n test_src, test_tgt, test_length = self.get_batch(source_l=source_l,\n bsize=bsize)\n\n hidden_t, outputs, test_length = enc(test_src, test_length)\n\n # Initialize vectors to compare size with\n test_hid = torch.zeros(self.opt.enc_layers, bsize, opt.enc_rnn_size)\n test_out = torch.zeros(source_l, bsize, opt.dec_rnn_size)\n\n # Ensure correct sizes and types\n self.assertEqual(test_hid.size(),\n hidden_t[0].size(),\n hidden_t[1].size())\n self.assertEqual(test_out.size(), outputs.size())\n self.assertEqual(type(outputs), torch.Tensor)\n\n def nmtmodel_forward(self, opt, source_l=3, bsize=1):\n \"\"\"\n Creates a nmtmodel with a custom opt function.\n Forwards a testbatch and checks output size.\n\n Args:\n opt: Namespace with options\n source_l: length of input sequence\n bsize: batchsize\n \"\"\"\n if opt.rnn_size > 0:\n opt.enc_rnn_size = opt.rnn_size\n opt.dec_rnn_size = opt.rnn_size\n word_field = self.get_field()\n\n embeddings = build_embeddings(opt, word_field)\n enc = build_encoder(opt, embeddings)\n\n embeddings = build_embeddings(opt, word_field, for_encoder=False)\n dec = build_decoder(opt, embeddings)\n\n model = onmt.models.model.NMTModel(enc, dec)\n\n test_src, test_tgt, test_length = self.get_batch(source_l=source_l,\n bsize=bsize)\n outputs, attn = model(test_src, test_tgt, test_length)\n outputsize = torch.zeros(source_l - 1, bsize, opt.dec_rnn_size)\n # Make sure that output has the correct size and type\n self.assertEqual(outputs.size(), outputsize.size())\n self.assertEqual(type(outputs), torch.Tensor)\n\n def imagemodel_forward(self, opt, tgt_l=2, bsize=1, h=15, w=17):\n \"\"\"\n Creates an image-to-text nmtmodel with a custom opt function.\n Forwards a testbatch and checks output size.\n\n Args:\n opt: Namespace with options\n source_l: length of input sequence\n bsize: batchsize\n \"\"\"\n if opt.encoder_type == 'transformer' or opt.encoder_type == 'cnn':\n return\n\n word_field = self.get_field()\n\n enc = ImageEncoder(\n opt.enc_layers, opt.brnn, opt.enc_rnn_size, opt.dropout)\n\n embeddings = build_embeddings(opt, word_field, for_encoder=False)\n dec = build_decoder(opt, embeddings)\n\n model = onmt.models.model.NMTModel(enc, dec)\n\n test_src, test_tgt, test_length = self.get_batch_image(\n h=h, w=w,\n bsize=bsize,\n tgt_l=tgt_l)\n outputs, attn = model(test_src, test_tgt, test_length)\n outputsize = torch.zeros(tgt_l - 1, bsize, opt.dec_rnn_size)\n # Make sure that output has the correct size and type\n self.assertEqual(outputs.size(), outputsize.size())\n self.assertEqual(type(outputs), torch.Tensor)\n\n def audiomodel_forward(self, opt, tgt_l=7, bsize=3, t=37):\n \"\"\"\n Creates a speech-to-text nmtmodel with a custom opt function.\n Forwards a testbatch and checks output size.\n\n Args:\n opt: Namespace with options\n source_l: length of input sequence\n bsize: batchsize\n \"\"\"\n if opt.encoder_type == 'transformer' or opt.encoder_type == 'cnn':\n return\n if opt.rnn_type == 'SRU':\n return\n\n word_field = self.get_field()\n\n enc = AudioEncoder(opt.rnn_type, opt.enc_layers, opt.dec_layers,\n opt.brnn, opt.enc_rnn_size, opt.dec_rnn_size,\n opt.audio_enc_pooling, opt.dropout,\n opt.sample_rate, opt.window_size)\n\n embeddings = build_embeddings(opt, word_field, for_encoder=False)\n dec = build_decoder(opt, embeddings)\n\n model = onmt.models.model.NMTModel(enc, dec)\n\n test_src, test_tgt, test_length = self.get_batch_audio(\n bsize=bsize,\n sample_rate=opt.sample_rate,\n window_size=opt.window_size,\n t=t, tgt_l=tgt_l)\n outputs, attn = model(test_src, test_tgt, test_length)\n outputsize = torch.zeros(tgt_l - 1, bsize, opt.dec_rnn_size)\n # Make sure that output has the correct size and type\n self.assertEqual(outputs.size(), outputsize.size())\n self.assertEqual(type(outputs), torch.Tensor)\n\n\ndef _add_test(param_setting, methodname):\n \"\"\"\n Adds a Test to TestModel according to settings\n\n Args:\n param_setting: list of tuples of (param, setting)\n methodname: name of the method that gets called\n \"\"\"\n\n def test_method(self):\n opt = copy.deepcopy(self.opt)\n if param_setting:\n for param, setting in param_setting:\n setattr(opt, param, setting)\n ArgumentParser.update_model_opts(opt)\n getattr(self, methodname)(opt)\n if param_setting:\n name = 'test_' + methodname + \"_\" + \"_\".join(\n str(param_setting).split())\n else:\n name = 'test_' + methodname + '_standard'\n setattr(TestModel, name, test_method)\n test_method.__name__ = name\n\n\n'''\nTEST PARAMETERS\n'''\nopt.brnn = False\n\ntest_embeddings = [[],\n [('decoder_type', 'transformer')]\n ]\n\nfor p in test_embeddings:\n _add_test(p, 'embeddings_forward')\n\ntests_encoder = [[],\n [('encoder_type', 'mean')],\n # [('encoder_type', 'transformer'),\n # ('word_vec_size', 16), ('rnn_size', 16)],\n []\n ]\n\nfor p in tests_encoder:\n _add_test(p, 'encoder_forward')\n\ntests_nmtmodel = [[('rnn_type', 'GRU')],\n [('layers', 10)],\n [('input_feed', 0)],\n [('decoder_type', 'transformer'),\n ('encoder_type', 'transformer'),\n ('src_word_vec_size', 16),\n ('tgt_word_vec_size', 16),\n ('rnn_size', 16)],\n [('decoder_type', 'transformer'),\n ('encoder_type', 'transformer'),\n ('src_word_vec_size', 16),\n ('tgt_word_vec_size', 16),\n ('rnn_size', 16),\n ('position_encoding', True)],\n [('coverage_attn', True)],\n [('copy_attn', True)],\n [('global_attention', 'mlp')],\n [('context_gate', 'both')],\n [('context_gate', 'target')],\n [('context_gate', 'source')],\n [('encoder_type', \"brnn\"),\n ('brnn_merge', 'sum')],\n [('encoder_type', \"brnn\")],\n [('decoder_type', 'cnn'),\n ('encoder_type', 'cnn')],\n [('encoder_type', 'rnn'),\n ('global_attention', None)],\n [('encoder_type', 'rnn'),\n ('global_attention', None),\n ('copy_attn', True),\n ('copy_attn_type', 'general')],\n [('encoder_type', 'rnn'),\n ('global_attention', 'mlp'),\n ('copy_attn', True),\n ('copy_attn_type', 'general')],\n [],\n ]\n\nif onmt.models.sru.check_sru_requirement():\n # \"\"\" Only do SRU test if requirment is safisfied. \"\"\"\n # SRU doesn't support input_feed.\n tests_nmtmodel.append([('rnn_type', 'SRU'), ('input_feed', 0)])\n\nfor p in tests_nmtmodel:\n _add_test(p, 'nmtmodel_forward')\n\nfor p in tests_nmtmodel:\n _add_test(p, 'imagemodel_forward')\n\nfor p in tests_nmtmodel:\n p.append(('sample_rate', 5500))\n p.append(('window_size', 0.03))\n # when reasonable, set audio_enc_pooling to 2\n for arg, val in p:\n if arg == \"layers\" and int(val) > 2:\n # Need lengths >= audio_enc_pooling**n_layers.\n # That condition is unrealistic for large n_layers,\n # so leave audio_enc_pooling at 1.\n break\n else:\n p.append(('audio_enc_pooling', '2'))\n _add_test(p, 'audiomodel_forward')\n"
] |
[
[
"torch.nn.functional.kl_div",
"torch.nn.NLLLoss",
"torch.full",
"torch.autograd.backward",
"torch.split"
],
[
"torch.cat",
"torch.ones",
"torch.zeros"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Fred62879/ACORN
|
[
"2de0bf747d595dbdc4d67311fb8f46cf47f9b4cb"
] |
[
"utils.py"
] |
[
"import os\nimport torch\nimport numpy as np\nimport skimage.measure\nimport matplotlib.pyplot as plt\n\nfrom tqdm import tqdm\nfrom astropy.io import fits\nfrom astropy.wcs import WCS\nfrom astropy.nddata import Cutout2D\nfrom torchvision.utils import make_grid\n\n\ndef cond_mkdir(path):\n if not os.path.exists(path):\n os.makedirs(path)\n\n\ndef write_psnr(pred_img, gt_img, writer, iter, prefix):\n batch_size = pred_img.shape[0]\n\n pred_img = pred_img.detach().cpu().numpy()\n gt_img = gt_img.detach().cpu().numpy()\n\n psnrs = list()\n for i in range(batch_size):\n p = pred_img[i].transpose(1, 2, 0)\n trgt = gt_img[i].transpose(1, 2, 0)\n\n p = (p / 2.) + 0.5\n p = np.clip(p, a_min=0., a_max=1.)\n trgt = (trgt / 2.) + 0.5\n\n psnr = skimage.metrics.peak_signal_noise_ratio(p, trgt, data_range=1)\n psnrs.append(psnr)\n\n writer.add_scalar(prefix + \"psnr\", np.mean(psnrs), iter)\n\n\ndef write_image_patch_multiscale_summary(image_resolution, patch_size, dataset, model, model_input, gt,\n model_output, writer, total_steps, prefix='train_',\n model_type='multiscale', skip=False):\n if skip:\n return\n\n # uniformly sample the image\n dataset.toggle_eval()\n model_input, gt = dataset[0]\n dataset.toggle_eval()\n\n # convert to cuda and add batch dimension\n tmp = {}\n for key, value in model_input.items():\n if isinstance(value, torch.Tensor):\n tmp.update({key: value[None, ...].cpu()})\n else:\n tmp.update({key: value})\n model_input = tmp\n\n tmp = {}\n for key, value in gt.items():\n if isinstance(value, torch.Tensor):\n tmp.update({key: value[None, ...].cpu()})\n else:\n tmp.update({key: value})\n gt = tmp\n\n # run the model on uniform samples\n n_channels = gt['img'].shape[-1]\n pred_img = process_batch_in_chunks(model_input, model)['model_out']['output']\n\n # get pixel idx for each coordinate\n coords = model_input['fine_abs_coords'].detach().cpu().numpy()\n pixel_idx = np.zeros_like(coords).astype(np.int32)\n pixel_idx[..., 0] = np.round((coords[..., 0] + 1.)/2. * (dataset.sidelength[0]-1)).astype(np.int32)\n pixel_idx[..., 1] = np.round((coords[..., 1] + 1.)/2. * (dataset.sidelength[1]-1)).astype(np.int32)\n pixel_idx = pixel_idx.reshape(-1, 2)\n\n # get pixel idx for each coordinate in frozen patches\n frozen_coords, frozen_values = dataset.get_frozen_patches()\n if frozen_coords is not None:\n frozen_coords = frozen_coords.detach().cpu().numpy()\n frozen_pixel_idx = np.zeros_like(frozen_coords).astype(np.int32)\n frozen_pixel_idx[..., 0] = np.round((frozen_coords[..., 0] + 1.) / 2. * (dataset.sidelength[0] - 1)).astype(np.int32)\n frozen_pixel_idx[..., 1] = np.round((frozen_coords[..., 1] + 1.) / 2. * (dataset.sidelength[1] - 1)).astype(np.int32)\n frozen_pixel_idx = frozen_pixel_idx.reshape(-1, 2)\n\n # init a new reconstructed image\n display_pred = np.zeros((*dataset.sidelength, n_channels))\n\n # assign predicted image values into a new array\n # need to use numpy since it supports index assignment\n pred_img = pred_img.reshape(-1, n_channels).detach().cpu().numpy()\n display_pred[[pixel_idx[:, 0]], [pixel_idx[:, 1]]] = pred_img\n\n # assign frozen image values into the array too\n if frozen_coords is not None:\n frozen_values = frozen_values.reshape(-1, n_channels).detach().cpu().numpy()\n display_pred[[frozen_pixel_idx[:, 0]], [frozen_pixel_idx[:, 1]]] = frozen_values\n\n # show reconstructed img\n display_pred = torch.tensor(display_pred)[None, ...]\n display_pred = display_pred.permute(0, 3, 1, 2)\n\n gt_img = gt['img'].reshape(-1, n_channels).detach().cpu().numpy()\n display_gt = np.zeros((*dataset.sidelength, n_channels))\n display_gt[[pixel_idx[:, 0]], [pixel_idx[:, 1]]] = gt_img\n display_gt = torch.tensor(display_gt)[None, ...]\n display_gt = display_gt.permute(0, 3, 1, 2)\n\n fig = dataset.quadtree.draw()\n writer.add_figure(prefix + 'tiling', fig, global_step=total_steps)\n\n if 'img' in gt:\n output_vs_gt = torch.cat((display_gt, display_pred), dim=0)\n writer.add_image(prefix + 'gt_vs_pred', make_grid(output_vs_gt, scale_each=False, normalize=True),\n global_step=total_steps)\n write_psnr(display_pred, display_gt, writer, total_steps, prefix+'img_')\n\n\ndef dict2cuda(a_dict):\n tmp = {}\n for key, value in a_dict.items():\n if isinstance(value, torch.Tensor):\n tmp.update({key: value.cuda()})\n else:\n tmp.update({key: value})\n return tmp\n\n\ndef dict2cpu(a_dict):\n tmp = {}\n for key, value in a_dict.items():\n if isinstance(value, torch.Tensor):\n tmp.update({key: value.cpu()})\n elif isinstance(value, dict):\n tmp.update({key: dict2cpu(value)})\n else:\n tmp.update({key: value})\n return tmp\n\n\ndef process_batch_in_chunks(in_dict, model, max_chunk_size=1024, progress=None):\n\n in_chunked = []\n for key in in_dict:\n chunks = torch.split(in_dict[key], max_chunk_size, dim=1)\n in_chunked.append(chunks)\n\n list_chunked_batched_in = \\\n [{k: v for k, v in zip(in_dict.keys(), curr_chunks)} for curr_chunks in zip(*in_chunked)]\n del in_chunked\n\n list_chunked_batched_out_out = {}\n list_chunked_batched_out_in = {}\n for chunk_batched_in in tqdm(list_chunked_batched_in):\n if torch.cuda.is_available():\n chunk_batched_in = {k: v.cuda() for k, v in chunk_batched_in.items()}\n else:\n chunk_batched_in = {k: v for k, v in chunk_batched_in.items()}\n\n tmp = model(chunk_batched_in)\n tmp = dict2cpu(tmp)\n\n for key in tmp['model_out']:\n if tmp['model_out'][key] is None:\n continue\n\n out_ = tmp['model_out'][key].detach().clone().requires_grad_(False)\n list_chunked_batched_out_out.setdefault(key, []).append(out_)\n\n for key in tmp['model_in']:\n if tmp['model_in'][key] is None:\n continue\n\n in_ = tmp['model_in'][key].detach().clone().requires_grad_(False)\n list_chunked_batched_out_in.setdefault(key, []).append(in_)\n\n del tmp, chunk_batched_in\n\n # Reassemble the output chunks in a batch\n batched_out = {}\n for key in list_chunked_batched_out_out:\n batched_out_lin = torch.cat(list_chunked_batched_out_out[key], dim=1)\n batched_out[key] = batched_out_lin\n\n batched_in = {}\n for key in list_chunked_batched_out_in:\n batched_in_lin = torch.cat(list_chunked_batched_out_in[key], dim=1)\n batched_in[key] = batched_in_lin\n\n return {'model_in': batched_in, 'model_out': batched_out}\n\n\ndef subsample_dict(in_dict, num_views, multiscale=False):\n if multiscale:\n out = {}\n for k, v in in_dict.items():\n if v.shape[0] == in_dict['octant_coords'].shape[0]:\n # this is arranged by blocks\n out.update({k: v[0:num_views[0]]})\n else:\n # arranged by rays\n out.update({k: v[0:num_views[1]]})\n else:\n out = {key: value[0:num_views, ...] for key, value in in_dict.items()}\n\n return out\n\ndef get_header(dir, sz):\n hdu = fits.open(os.path.join(dir, 'pdr3_dud/calexp-HSC-G-9813-0%2C0.fits'))[1]\n header = hdu.header\n cutout = Cutout2D(hdu.data, position=(sz//2, sz//2),\n size=sz, wcs=WCS(header))\n return cutout.wcs.to_header()\n\n\ndef reconstruct_trail(id, coord_dataset, gt, model_input, model, recon_dir, header):\n n_channels = gt['img'].shape[-1]\n\n pred_img = pred_img.detach().cpu().numpy().reshape(-1, n_channels)\n display_pred = np.zeros((*coord_dataset.sidelength, n_channels))\n display_pred[[pixel_idx[:, 0]], [pixel_idx[:, 1]]] = pred_img\n display_pred = torch.tensor(display_pred)[None, ...]\n display_pred = display_pred.permute(0, 3, 1, 2)\n\n if not saved_gt:\n gt_img = gt['img'].detach().cpu().numpy().reshape(-1, n_channels)\n display_gt = np.zeros((*coord_dataset.sidelength, n_channels))\n display_gt[[pixel_idx[:, 0]], [pixel_idx[:, 1]]] = gt_img\n display_gt = torch.tensor(display_gt)[None, ...]\n display_gt = display_gt.permute(0, 3, 1, 2)\n print(f'Reshape: {time() - start:.02f}')\n\n # record metrics\n start = time()\n psnr, ssim = get_metrics(display_pred, display_gt)\n metrics.update({curr_iter: {'psnr': psnr, 'ssim': ssim}})\n print(f'Metrics: {time() - start:.02f}')\n print(f'Iter: {curr_iter}, PSNR: {psnr:.02f}')\n\n # save images\n pred_out = np.clip((display_pred.squeeze().numpy()/2.) + 0.5, a_min=0., a_max=1.).transpose(1, 2, 0)*255\n pred_out = pred_out.astype(np.uint8)\n pred_fname = os.path.join(eval_dir, f'pred_{curr_iter:06d}.png')\n print('Saving image')\n cv2.imwrite(pred_fname, cv2.cvtColor(pred_out, cv2.COLOR_RGB2BGR))\n\n if not saved_gt:\n gt_out = np.clip((display_gt.squeeze().numpy()/2.) + 0.5, a_min=0., a_max=1.).transpose(1, 2, 0)*255\n gt_out = gt_out.astype(np.uint8)\n gt_fname = os.path.join(eval_dir, 'gt.png')\n cv2.imwrite(gt_fname, cv2.cvtColor(gt_out, cv2.COLOR_RGB2BGR))\n saved_gt = True\n\n # record and save metrics\n psnr, ssim, mse = get_metrics(recon, gt)\n print(f'PSNR: {psnr:.04f}, SSIM: {ssim:.04f}, MSE:{mse:.06f}')\n\n # save images\n recon_fn = os.path.join(recon_dir, '{}'.format(id))\n np.save(recon_fn + '.npy', recon)\n hdu = fits.PrimaryHDU(data=recon, header=header)\n hdu.writeto(recon_fn + '.fits', overwrite=True)\n\n # save tiling\n tiling_fname = os.path.join(recon_dir, 'tiling_{}.pdf'.format(id))\n coord_dataset.quadtree.draw()\n plt.savefig(tiling_fname)\n\n return mse, psnr, ssim\n\n\ndef reconstruct_astro(id, coord_dataset, gt, model_input, model, recon_dir, header):\n n_channels = gt['img'].shape[-1]\n\n with torch.no_grad():\n recon = process_batch_in_chunks\\\n (model_input, model, max_chunk_size=512)['model_out']['output']\n\n if torch.cuda.is_available():\n torch.cuda.synchronize()\n\n # get pixel idx for each coordinate\n coords = model_input['fine_abs_coords'].detach().cpu().numpy()\n pixel_idx = np.zeros_like(coords).astype(np.int32)\n pixel_idx[..., 0] = np.round((coords[..., 0] + 1.)/2.*\n (coord_dataset.sidelength[0] - 1)).astype(np.int32)\n\n pixel_idx[..., 1] = np.round((coords[..., 1] + 1.)/2. *\n (coord_dataset.sidelength[1] - 1)).astype(np.int32)\n pixel_idx = pixel_idx.reshape(-1, 2)\n\n recon = recon.detach().cpu().numpy()[0].transpose((2,0,1))\n gt = gt['img'].detach().cpu().numpy()[0].transpose((2,0,1))\n print(gt.shape)\n\n # record and save metrics\n psnr, ssim, mse = get_metrics(recon, gt)\n print(f'PSNR: {psnr:.04f}, SSIM: {ssim:.04f}, MSE:{mse:.06f}')\n\n # save images\n recon_fn = os.path.join(recon_dir, '{}'.format(id))\n np.save(recon_fn + '.npy', recon)\n hdu = fits.PrimaryHDU(data=recon, header=header)\n hdu.writeto(recon_fn + '.fits', overwrite=True)\n\n # save tiling\n tiling_fname = os.path.join(recon_dir, 'tiling_{}.pdf'.format(id))\n coord_dataset.quadtree.draw()\n plt.savefig(tiling_fname)\n\n return mse, psnr, ssim\n\n\ndef get_metrics(pred_img, gt_img):\n #pred_img = pred_img.detach().cpu().numpy().squeeze()\n #gt_img = gt_img.detach().cpu().numpy().squeeze()\n\n p = pred_img.transpose(1, 2, 0)\n trgt = gt_img.transpose(1, 2, 0)\n\n p = (p / 2.) + 0.5\n p = np.clip(p, a_min=0., a_max=1.)\n\n trgt = (trgt / 2.) + 0.5\n\n range = np.max(gt_img) - np.min(gt_img)\n mse = np.mean((p-trgt)**2)\n print(p.shape, trgt.shape)\n psnr = skimage.metrics.peak_signal_noise_ratio(p, trgt, data_range=range)\n ssim = skimage.metrics.structural_similarity(p, trgt, multichannel=True, data_range=range)\n\n return psnr, ssim, mse\n"
] |
[
[
"torch.cuda.synchronize",
"numpy.clip",
"torch.cat",
"numpy.min",
"matplotlib.pyplot.savefig",
"numpy.save",
"torch.tensor",
"numpy.max",
"numpy.round",
"numpy.mean",
"torch.no_grad",
"torch.cuda.is_available",
"torch.split",
"numpy.zeros_like",
"numpy.zeros"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
t24kc/raspberry-pi
|
[
"505a504019165fb4a66c13bc27eff443bc083b1d"
] |
[
"main/handler.py"
] |
[
"from datetime import datetime\nfrom time import sleep\nfrom lib.mail import Mail\nfrom lib.spread_sheet import SpreadSheet\nfrom sensor.SHT31 import SHT31\nfrom sensor.BH1750FVI import BH1750FVI\nfrom sensor.VL6180 import VL6180X\nfrom sensor.CO2MINI import CO2MINI\nfrom sensor.relay_module import RelayModule\n\nimport matplotlib.pyplot as plt\nimport schedule\nimport yaml\n\nDEFAULT_COLUMNS = [\n \"Time\",\n \"Distance(mm)\",\n \"Light(lux)\",\n \"Light(klux/h)\",\n \"Temperature(C)\",\n \"Humidity(%)\",\n \"CO2(ppm)\",\n \"WaterFlag\",\n]\nDEFAULT_DATA_IMAGE_PATH = \"data/figure.png\"\n\n\nclass Scheduler(object):\n def __init__(self, config):\n self.params = {\n \"distance\": None,\n \"light\": None,\n \"light_klux\": None,\n \"light_total\": 0,\n \"temperature\": None,\n \"humidity\": None,\n \"co2\": None,\n \"alert_remaining\": None,\n }\n\n self._config = config\n self._full_alert_remaining()\n\n self._mail_client = Mail(\n self._config[\"google\"][\"credentials_path\"],\n self._config[\"google\"][\"token_path\"],\n )\n self._spread_sheet_client = SpreadSheet(\n self._config[\"google\"][\"service_account_path\"],\n self._config[\"google\"][\"spread_sheet_id\"],\n )\n if not self._spread_sheet_client.get_label_value(\"A1\"):\n self._spread_sheet_client.append_row(DEFAULT_COLUMNS)\n\n self._vl6180x_sensor = VL6180X()\n self._bh1750fvi_sensor = BH1750FVI()\n self._sht31_sensor = SHT31()\n self._relay_module = RelayModule()\n self._co2mini_sensor = CO2MINI()\n\n def monitoring_job(self):\n self._fetch_params()\n self._logging_params()\n self._alert_params()\n if self._is_water_flag():\n self.turn_on_water()\n\n def mail_job(self):\n dframe = self._spread_sheet_client.get_dataframe(diff_days=7)\n\n kwargs = {\"kind\": \"line\", \"use_index\": True, \"rot\": 45}\n setting_list = [\n {\"title\": \"Light(lux)\", \"x\": \"Time\", \"y\": \"Light(lux)\"},\n {\"title\": \"CO2(ppm)\", \"x\": \"Time\", \"y\": \"CO2(ppm)\"},\n {\"title\": \"Temperature(C)\", \"x\": \"Time\", \"y\": \"Temperature(C)\"},\n {\"title\": \"Humidity(%)\", \"x\": \"Time\", \"y\": \"Humidity(%)\"},\n ]\n fig, axes = plt.subplots(\n ncols=2, nrows=2, figsize=(20, 15), sharex=\"col\")\n for ax, setting in zip(axes.ravel(), setting_list):\n dframe.plot(\n setting[\"x\"], setting[\"y\"], ax=ax, **kwargs, title=setting[\"title\"]\n )\n\n plt.savefig(DEFAULT_DATA_IMAGE_PATH)\n self._send_mail(\n self._config[\"mail\"][\"summary\"][\"subject\"],\n self._config[\"mail\"][\"summary\"][\"body\"],\n DEFAULT_DATA_IMAGE_PATH,\n )\n\n def _fetch_params(self):\n light = self._bh1750fvi_sensor.get_light()\n light_klux = (\n light *\n self._config[\"scheduler\"][\"monitoring_interval_minutes\"] / 60000\n )\n\n self._co2mini_sensor.read_data()\n self.params.update(\n {\n \"distance\": self._vl6180x_sensor.get_distance(),\n \"light\": light,\n \"light_klux\": light_klux,\n \"temperature\": self._sht31_sensor.get_temperature(),\n \"humidity\": self._sht31_sensor.get_humidity(),\n \"co2\": self._co2mini_sensor.get_co2(),\n }\n )\n self.params[\"light_total\"] += light_klux\n\n def _logging_params(self):\n current_datetime = datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n\n values = [\n current_datetime,\n round(self.params[\"distance\"], 1),\n round(self.params[\"light\"], 1),\n round(self.params[\"light_klux\"], 1),\n round(self.params[\"temperature\"], 1),\n round(self.params[\"humidity\"], 1),\n round(self.params[\"co2\"], 1),\n int(self._is_water_flag()),\n ]\n self._spread_sheet_client.append_row(values)\n print(values)\n\n def _alert_params(self):\n if not self._is_alert_flag():\n self._full_alert_remaining()\n return\n\n self.params[\"alert_remaining\"] -= 1\n if self.params[\"alert_remaining\"] > 0:\n return\n\n body = \"\"\n if self._is_temperature_upper_limit():\n body = self._config[\"mail\"][\"alert\"][\"body\"][\"temperature_upper\"].format(\n self.params[\"temperature\"]\n )\n elif self._is_temperature_lower_limit():\n body = self._config[\"mail\"][\"alert\"][\"body\"][\"temperature_lower\"].format(\n self.params[\"temperature\"]\n )\n elif self._is_co2_upper_limit():\n body = self._config[\"mail\"][\"alert\"][\"body\"][\"co2_upper\"].format(\n self.params[\"co2\"]\n )\n elif self._is_co2_lower_limit():\n body = self._config[\"mail\"][\"alert\"][\"body\"][\"co2_lower\"].format(\n self.params[\"co2\"]\n )\n self._send_mail(self._config[\"mail\"][\"alert\"][\"subject\"], body)\n self._full_alert_remaining()\n\n def _send_mail(self, subject, body, image_file=None):\n if image_file:\n message = self._mail_client.create_message_with_image(\n self._config[\"mail\"][\"to_address\"], subject, body, image_file\n )\n else:\n message = self._mail_client.create_message(\n self._config[\"mail\"][\"to_address\"], subject, body\n )\n self._mail_client.send_message(message)\n\n def _full_alert_remaining(self):\n self.params[\"alert_remaining\"] = self._config[\"alert\"][\"consecutive_time\"]\n\n def _is_alert_flag(self):\n return (\n self._is_temperature_upper_limit()\n or self._is_temperature_lower_limit()\n or self._is_co2_upper_limit()\n or self._is_co2_lower_limit()\n )\n\n def _is_temperature_upper_limit(self):\n return (\n self._config[\"alert\"][\"temperature_upper_limit\"]\n < self.params[\"temperature\"]\n )\n\n def _is_temperature_lower_limit(self):\n return (\n self.params[\"temperature\"]\n < self._config[\"alert\"][\"temperature_lower_limit\"]\n )\n\n def _is_co2_upper_limit(self):\n return self._config[\"alert\"][\"co2_upper_limit\"] < self.params[\"co2\"]\n\n def _is_co2_lower_limit(self):\n return self.params[\"co2\"] < self._config[\"alert\"][\"co2_lower_limit\"]\n\n def _is_water_flag(self):\n return (\n self.params[\"light_total\"] > self._config[\"sensor\"][\"solar_radiation_limit\"]\n )\n\n def turn_on_water(self):\n self.params[\"light_total\"] = 0\n self._relay_module.setup()\n self._relay_module.turn_on_water(\n self._config[\"sensor\"][\"water_turn_on_time\"])\n\n def turn_off_water(self):\n self._relay_module.turn_off_water()\n\n def cleanup(self):\n self._relay_module.cleanup()\n\n\ndef main():\n with open(\"config.yaml\") as file:\n config = yaml.full_load(file)\n\n scheduler = Scheduler(config)\n schedule.every(config[\"scheduler\"][\"monitoring_interval_minutes\"]).minutes.do(\n scheduler.monitoring_job\n )\n schedule.every(config[\"scheduler\"][\"summary_mail_interval_days\"]).days.do(\n scheduler.mail_job\n )\n\n try:\n while True:\n schedule.run_pending()\n sleep(1)\n except KeyboardInterrupt:\n scheduler.cleanup()\n pass\n\n\nif __name__ == \"__main__\":\n main()\n"
] |
[
[
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.savefig"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
tum-ai/expingo-inpainting-service
|
[
"657f65316c179f85507350d55e4ab4ac429552a0"
] |
[
"app/main.py"
] |
[
"import numpy as np\nimport cv2\n\nimport uvicorn\nimport tensorflow as tf\nimport neuralgym as ng\nfrom fastapi.middleware.cors import CORSMiddleware\nfrom pydantic import BaseModel\n\nfrom fastapi import FastAPI, UploadFile, File\nfrom fastapi import HTTPException\nfrom inpaint.inpainting_model import InpaintCAModel\n\n\nclass PaintRequest(BaseModel):\n image: str\n mask: str\n\n\nFLAGS = ng.Config('inpaint.yml')\nMODEL_DIR = \"../model_logs/places2\"\nMODEL = InpaintCAModel()\napp = FastAPI()\n\norigins = [\n \"*\"\n]\n\napp.add_middleware(\n CORSMiddleware,\n allow_origins=origins,\n allow_credentials=True,\n allow_methods=[\"*\"],\n allow_headers=[\"*\"]\n)\n\n\[email protected](\"/\")\nasync def root():\n return {\"message\": \"Hello World\"}\n\n\[email protected](\"/inpaint/\")\nasync def create_upload_file(request: PaintRequest):\n import base64\n import io\n from PIL import Image\n image = request.image\n mask = request.mask\n\n image = image.split(\",\", 1)[1]\n mask = mask.split(\",\", 1)[1]\n\n base64_decoded_image = base64.b64decode(image)\n\n image = Image.open(io.BytesIO(base64_decoded_image))\n image = np.array(image)\n\n base64_decoded_mask = base64.b64decode(mask)\n\n mask = Image.open(io.BytesIO(base64_decoded_mask))\n mask = np.array(mask)\n\n # mask is always PNG, image might have only 3 dimensions.\n mask = mask[:, :, :3]\n if image.shape[2] == 4:\n image = image[:, :, :3]\n\n # Catch weird error that image is turned if format is jpg and upright\n if image.shape[0] == mask.shape[1] and image.shape[1] == mask.shape[0]:\n image = np.flip(np.transpose(image, (1, 0, 2)), axis=1)\n if image.shape != mask.shape:\n raise HTTPException(\n status_code=400,\n detail=f\"Image and Mask have unequal shape. {image.shape} vs {mask.shape}\")\n\n # Image and Mask must be same dimension by now. Both have dimensions (x, y, 3)\n\n h, w, _ = image.shape\n grid = 8\n image = image[:h // grid * grid, :w // grid * grid, :]\n mask = mask[:h // grid * grid, :w // grid * grid, :]\n print('Shape of image: {}'.format(image.shape))\n\n image = np.expand_dims(image, 0)\n mask = np.expand_dims(mask, 0)\n print(image.shape)\n print(mask.shape)\n input_image = np.concatenate([image, mask], axis=2)\n print(input_image.shape)\n\n sess_config = tf.ConfigProto()\n sess_config.gpu_options.allow_growth = True\n with tf.Session(config=sess_config) as sess:\n input_image = tf.constant(input_image, dtype=tf.float32)\n output = MODEL.build_server_graph(FLAGS, input_image)\n output = (output + 1.) * 127.5\n output = tf.reverse(output, [-1])\n output = tf.saturate_cast(output, tf.uint8)\n # load pretrained model\n vars_list = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)\n assign_ops = []\n for var in vars_list:\n vname = var.name\n from_name = vname\n var_value = tf.contrib.framework.load_variable(MODEL_DIR, from_name)\n assign_ops.append(tf.assign(var, var_value))\n sess.run(assign_ops)\n print('Model loaded.')\n result = sess.run(output)\n cv2.imwrite(\"output.png\", result[0])\n tf.reset_default_graph()\n #return FileResponse(\"output.png\", media_type=\"image/png\")\n with open(\"output.png\", \"rb\") as image_file:\n image_string = \"data:image/png;base64,{}\".format(base64.b64encode(image_file.read()).decode())\n return {\n \"image\": image_string\n }\n\n\nif __name__ == '__main__':\n uvicorn.run(app, host=\"0.0.0.0\", port=8080)\n"
] |
[
[
"tensorflow.reverse",
"numpy.expand_dims",
"tensorflow.constant",
"tensorflow.get_collection",
"tensorflow.assign",
"numpy.transpose",
"numpy.concatenate",
"tensorflow.ConfigProto",
"tensorflow.reset_default_graph",
"tensorflow.contrib.framework.load_variable",
"tensorflow.Session",
"tensorflow.saturate_cast",
"numpy.array"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
}
] |
rkent/BDBD
|
[
"c5d391da84faf5607c443078781f8b4e1c017dd5",
"c5d391da84faf5607c443078781f8b4e1c017dd5"
] |
[
"src/bdbd/src/bdbd/analysis/motion/NewRaph10.py",
"src/bdbd/src/bdbd/analysis/motion/NewRaph8.py"
] |
[
"# newton-raphson iteration of motion equations\n\nimport numpy as np\nimport rospy\nimport math\nimport time\nfrom bdbd_common.utils import fstr, gstr\nfrom bdbd_common.msg import LeftRights\nfrom bdbd_common.geometry import lr_est, default_lr_model, D_TO_R\n\ndef estr(a):\n return fstr(a, fmat='10.7g', n_per_line=10)\n\nclass NewRaph():\n def __init__(self, n, dt\n ,lr_model=default_lr_model()\n ,start_pose=(0.0, 0.0, 0.0)\n ,start_twist=(0.0, 0.0, 0.0)\n ):\n self.lr_model = lr_model\n self.n = n\n self.dt = dt\n self.start_pose = start_pose\n self.start_twist = start_twist\n\n # prep constants for calculations\n alr_model = np.array(self.lr_model)\n self.bhes = (dt * alr_model[0], dt * alr_model[1], dt * alr_model[2])\n (_, _, qhx) = self.bhes[0]\n (_, _, qhy) = self.bhes[1]\n (_, _, qho) = self.bhes[2]\n #print('(bhxl, bhxr, qhx): ' + estr((bhxl, bhxr, qhx)))\n #print('(bhyl, bhyr, qhy): ' + estr((bhyl, bhyr, qhy)))\n #print('(bhol, bhor, qho): ' + estr((bhol, bhor, qho)))\n (alphax, alphay, alphao) = 1.0 - np.array((qhx, qhy, qho))\n #print('(alphax, alphay, alphao):' + estr((alphax, alphay, alphao)))\n \n # alpha ** j\n alphaxj = [1.0]\n alphayj = [1.0]\n alphaoj = [1.0]\n betaj = [dt]\n for i in range(1, n):\n alphaxj.append(alphaxj[i-1] * alphax)\n alphayj.append(alphayj[i-1] * alphay)\n alphaoj.append(alphaoj[i-1] * alphao)\n betaj.append(betaj[i-1] + dt * alphaoj[i])\n self.alphaxj = np.array(alphaxj)\n self.alphayj = np.array(alphayj)\n self.alphaoj = np.array(alphaoj)\n self.betaj = np.array(betaj)\n #print('alphaxj:' + estr(self.alphaxj))\n #print('alphayj:' + estr(self.alphayj))\n #print('alphaoj:' + estr(self.alphaoj))\n #print('betaj:' + estr(self.betaj))\n\n def poses(self, ls, rs,\n details=False\n ):\n als = np.asarray(ls)\n ars = np.asarray(rs)\n self.als = als\n self.ars = ars\n #print('als:' + estr(als))\n (px0, py0, theta0) = self.start_pose\n (bhxl, bhxr, _) = self.bhes[0]\n (bhyl, bhyr, _) = self.bhes[1]\n (bhol, bhor, _) = self.bhes[2]\n (vxw0, vyw0, omega0) = self.start_twist\n n = self.n\n dt = self.dt\n alphaxj = self.alphaxj\n alphayj = self.alphayj\n alphaoj = self.alphaoj\n\n # initial robot velocities\n vx0 = vxw0 * math.cos(theta0) + vyw0 * math.cos(theta0)\n vy0 = -vxw0 * math.sin(theta0) + vyw0 * math.cos(theta0)\n\n # twist\n vxj = np.empty(n)\n vyj = np.empty(n)\n omegaj = np.empty(n)\n vxj[0] = vx0\n vyj[0] = vy0\n omegaj[0] = omega0\n bmotorxj = bhxl * als + bhxr * ars\n bmotoryj = bhyl * als + bhyr * ars\n bmotoroj = bhol * als + bhor * ars\n for i in range(1, n):\n vxj[i] = vx0 * alphaxj[i] + np.dot(alphaxj[i-1::-1], bmotorxj[1:i+1])\n vyj[i] = vy0 * alphayj[i] + np.dot(alphayj[i-1::-1], bmotoryj[1:i+1])\n omegaj[i] = omega0 * alphaoj[i] + np.dot(alphaoj[i-1::-1], bmotoroj[1:i+1])\n\n if details:\n print(estr({'alphaoj[n-2::-1]': alphaoj[n-2::-1]}))\n print(estr({'bmotoroj[1:n]': bmotoroj[1:n]}))\n # pose\n pxj = np.empty(n)\n pyj = np.empty(n)\n thetaj = np.empty(n)\n pxj[0] = px0\n pyj[0] = py0\n thetaj[0] = theta0\n for i in range(1, n):\n thetaj[i] = theta0 + omega0 * (self.betaj[i] - dt) \\\n + np.dot(self.betaj[i-1::-1], bmotoroj[1:i+1])\n\n # intermediate values as vectors\n cosj = np.cos(thetaj)\n sinj = np.sin(thetaj)\n vxcj = vxj * cosj\n vxsj = vxj * sinj\n vycj = vyj * cosj\n vysj = vyj * sinj\n vxwj = vxcj - vysj\n vywj = vxsj + vycj\n\n pxj[1:] = px0 + dt * np.cumsum(vxwj[1:])\n pyj[1:] = py0 + dt * np.cumsum(vywj[1:])\n\n # intermediate results\n self.cosj = cosj\n self.sinj = sinj\n self.vxcj = vxcj\n self.vxsj = vxsj\n self.vycj = vycj\n self.vysj = vysj\n self.vxwj = vxwj\n self.vywj = vywj\n\n self.vxj = vxj\n self.vyj = vyj\n self.omegaj = omegaj\n self.pxj = pxj\n self.pyj = pyj\n self.thetaj = thetaj\n return (pxj, pyj, thetaj, vxj, vyj, omegaj)\n\n def gradients(self):\n # gradients\n\n (bhxl, bhxr, _) = self.bhes[0]\n (bhyl, bhyr, _) = self.bhes[1]\n (bhol, bhor, _) = self.bhes[2]\n n = self.n\n dt = self.dt\n alphaxj = self.alphaxj\n alphayj = self.alphayj\n betaj = self.betaj\n\n cosj = self.cosj\n sinj = self.sinj\n vxcj = self.vxcj\n vxsj = self.vxsj\n vycj = self.vycj\n vysj = self.vysj\n\n dpxdl = np.zeros((n,n))\n dpydl = np.zeros((n,n))\n dpxdr = np.zeros((n,n))\n dpydr = np.zeros((n,n))\n\n for i in range(1, n):\n # gradients\n for k in range(1, i+1):\n doto = np.dot((-vxsj[k:i+1] - vycj[k:i+1]), betaj[:i+1-k])\n dotx = np.dot(cosj[k:i+1], alphaxj[:i+1-k])\n doty = np.dot(-sinj[k:i+1], alphayj[:i+1-k])\n dpxdl[i,k] = dt * (\n +bhol * doto\n +bhxl * dotx\n +bhyl * doty\n )\n dpxdr[i,k] = dt * (\n +bhor * doto\n +bhxr * dotx\n +bhyr * doty\n )\n #if i == 1 and k == 1:\n # print(estr({'bhor': bhor, 'doto': doto, 'bhxr': bhxr, 'dotx': dotx,\n # 'bhyr': bhyr, 'doty': doty}))\n doto = np.dot((vxcj[k:i+1] - vysj[k:i+1]), betaj[:i+1-k])\n dotx = np.dot(sinj[k:i+1], alphaxj[:i+1-k])\n doty = np.dot(cosj[k:i+1], alphayj[:i+1-k])\n dpydl[i,k] = dt * (\n +bhol * doto\n +bhxl * dotx\n +bhyl * doty\n )\n dpydr[i,k] = dt * (\n +bhor * doto\n +bhxr * dotx\n +bhyr * doty\n )\n self.dpxdl = dpxdl\n self.dpydl = dpydl\n self.dpxdr = dpxdr\n self.dpydr = dpydr\n\n return (dpxdl, dpxdr, dpydl, dpydr)\n\n def seconds(self):\n # second partial derivatives at final location\n\n (bhxl, bhxr, _) = self.bhes[0]\n (bhyl, bhyr, _) = self.bhes[1]\n (bhol, bhor, _) = self.bhes[2]\n n = self.n\n dt = self.dt\n alphaxj = self.alphaxj\n alphayj = self.alphayj\n betaj = self.betaj\n\n cosj = self.cosj\n sinj = self.sinj\n vxwj = self.vxwj\n vywj = self.vywj\n\n d2pxdldl = np.zeros((n, n))\n d2pxdldr = np.zeros((n, n))\n d2pxdrdr = np.zeros((n, n))\n d2pydldl = np.zeros((n, n))\n d2pydldr = np.zeros((n, n))\n d2pydrdr = np.zeros((n, n))\n\n # This could be vectorized, but instead I do it discretely to more closely\n # match the C++ version which is what we will actually use.\n for j in range(1, n):\n vxwdt = vxwj[j] * dt\n vywdt = vywj[j] * dt\n sdt = sinj[j] * dt\n cdt = cosj[j] * dt\n for k in range(1, j + 1):\n betaljk = betaj[j-k] * bhol\n betarjk = betaj[j-k] * bhor\n alphaxljk = alphaxj[j-k] * bhxl\n alphaxrjk = alphaxj[j-k] * bhxr\n alphayljk = alphayj[j-k] * bhyl\n alphayrjk = alphayj[j-k] * bhyr\n for m in range(1, j + 1):\n betaljm = betaj[j-m] * bhol\n betarjm = betaj[j-m] * bhor\n alphaxljm = alphaxj[j-m] * bhxl\n alphaxrjm = alphaxj[j-m] * bhxr\n alphayljm = alphaxj[j-m] * bhyl\n alphayrjm = alphaxj[j-m] * bhyr\n\n sumxll = (\n -vxwdt * betaljk * betaljm\n +sdt * (-betaljk * alphaxljm -alphaxljk * betaljm)\n +cdt * (-betaljk * alphayljm -alphayljk * betaljm)\n )\n sumxlr = (\n -vxwdt * betaljk * betarjm\n +sdt * (-betaljk * alphaxrjm -alphaxljk * betarjm)\n +cdt * (-betaljk * alphayrjm -alphayljk * betarjm)\n )\n sumxrr = (\n -vxwdt * betarjk * betarjm\n +sdt * (-betarjk * alphaxrjm -alphaxrjk * betarjm)\n +cdt * (-betarjk * alphayrjm -alphayrjk * betarjm)\n )\n sumyll = (\n -vywdt * betaljk * betaljm\n +sdt * (-betaljk * alphayljm -alphayljk * betaljm)\n +cdt * (betaljk * alphayljm +alphayljk * betaljm)\n )\n sumylr = (\n -vywdt * betaljk * betarjm\n +sdt * (-betaljk * alphayrjm -alphayljk * betarjm)\n +cdt * (betaljk * alphayrjm +alphayljk * betarjm)\n )\n sumyrr = (\n -vywdt * betarjk * betarjm\n +sdt * (-betarjk * alphayrjm -alphayrjk * betarjm)\n +cdt * (betarjk * alphayrjm +alphayrjk * betarjm)\n )\n #print('i,j,k,m', i, j, k, m)\n d2pxdldl[k, m] += sumxll\n d2pxdldr[k, m] += sumxlr\n d2pxdrdr[k, m] += sumxrr\n d2pydldl[k, m] += sumyll\n d2pydldr[k, m] += sumylr\n d2pydrdr[k, m] += sumyrr\n\n self.d2pxdldl = d2pxdldl\n self.d2pxdldr = d2pxdldr\n self.d2pxdrdr = d2pxdrdr\n self.d2pydldl = d2pydldl\n self.d2pydldr = d2pydldr\n self.d2pydrdr = d2pydrdr\n return (d2pxdldl, d2pxdldr, d2pxdrdr, d2pydldl, d2pydldr, d2pydrdr)\n\n def loss(self,\n target_pose=(0.0, 0.0, 0.0),\n target_twist=(0.0, 0.0, 0.0),\n target_lr = (0.0, 0.0),\n Wmax=1.0e-4,\n Wjerk=1.0e-4,\n Wback=1.0e-4,\n mmax=1.0,\n details=False\n ):\n self.target_pose = target_pose\n self.target_twist = target_twist\n self.target_lr = target_lr\n self.Wmax = Wmax\n self.Wjerk = Wjerk\n self.Wback = Wback\n self.mmax = mmax\n return self.reloss(details=details)\n\n def reloss(self, details=False):\n target_pose = self.target_pose\n target_twist = self.target_twist\n target_lr = self.target_lr\n Wmax = self.Wmax\n Wjerk = self.Wjerk\n Wback = self.Wback\n mmax = self.mmax\n # given pose calculations, determine the loss\n vxj = self.vxj\n vyj = self.vyj\n omegaj = self.omegaj\n pxj = self.pxj\n pyj = self.pyj\n thetaj = self.thetaj\n\n lefts = self.als\n rights = self.ars\n\n # values requiring summing over i\n sumMax = 0.1 * Wmax * (\n np.power(lefts, 10.0).sum() +np.power(rights, 10.0).sum()\n ) / mmax ** 10\n\n # backing term\n sumBack = 0.1 * Wback * np.power((lefts + rights).clip(max=0.0), 10).sum()\n\n ldiff = lefts[1:] - lefts[:-1]\n rdiff = rights[1:] - rights[:-1]\n sumJerk = 0.5 * Wjerk * (np.square(ldiff).sum() + np.square(rdiff).sum())\n\n # values based on final targets\n vals = np.asarray([\n pxj[-1]\n , pyj[-1]\n , thetaj[-1]\n , vxj[-1]\n , vyj[-1]\n , omegaj[-1]\n , lefts[-1]\n , rights[-1]\n ])\n targets = np.concatenate([target_pose, target_twist, target_lr])\n #targets = np.concatenate([target_pose, target_twist[:1], target_lr])\n diffs = vals - targets\n # normalize theta difference from -pi to pi\n diffs[2] = (diffs[2] + math.pi) % (2 * math.pi) - math.pi\n sumTargets = 0.5 * np.square(diffs).sum()\n loss = sumMax + sumJerk + sumTargets + sumBack\n if details:\n print('target losses: ' + estr(0.5 * np.square(vals - targets)))\n print(estr({'loss': loss, 'sumMax': sumMax, 'sumJerk': sumJerk, 'sumTargets': sumTargets, 'sumBack': sumBack}))\n print(fstr({'vals': vals}, fmat='15.12g'))\n print(fstr({'targets': targets}))\n print(fstr({'diffs': diffs}))\n self.lossValue = loss\n return loss\n\n def jacobian(self):\n # the 1st derivative of the loss function\n vxj = self.vxj\n vyj = self.vyj\n omegaj = self.omegaj\n pxj = self.pxj\n pyj = self.pyj\n thetaj = self.thetaj\n\n (pxt, pyt, thetat) = self.target_pose\n (vxt, vyt, omegat) = self.target_twist\n (leftt, rightt) = self.target_lr\n dpxdl = self.dpxdl\n dpydl = self.dpydl\n dpxdr = self.dpxdr\n dpydr = self.dpydr\n\n (bhxl, bhxr, _) = self.bhes[0]\n (bhyl, bhyr, _) = self.bhes[1]\n (bhol, bhor, _) = self.bhes[2]\n alphaxj = self.alphaxj\n alphayj = self.alphayj\n alphaoj = self.alphaoj\n betaj = self.betaj\n Wmax = self.Wmax\n Wjerk = self.Wjerk\n Wback = self.Wback\n mmax = self.mmax\n\n lefts = self.als\n rights = self.ars\n leftsp9 = np.power(lefts / mmax, 9)\n rightsp9 = np.power(rights / mmax, 9)\n lprsp9 = np.power((lefts + rights).clip(max=0.0), 9)\n n = len(lefts)\n\n dlefts = np.zeros([n])\n drights = np.zeros([n])\n difft = (thetaj[-1] - thetat + math.pi) % (2 * math.pi) - math.pi\n\n for k in range(1, n):\n dlefts[k] = (\n +(vxj[-1] - vxt) * bhxl * alphaxj[n-1-k]\n +(vyj[-1] - vyt) * bhyl * alphayj[n-1-k]\n +(omegaj[-1] - omegat) * bhol * alphaoj[n-1-k]\n +(difft) * bhol * betaj[n-1-k]\n +(pxj[-1] - pxt) * dpxdl[-1, k]\n +(pyj[-1] - pyt) * dpydl[-1, k]\n +Wmax * leftsp9[k] / mmax\n +Wback * lprsp9[k]\n +Wjerk * (2 * lefts[k] -lefts[k-1] -lefts[min(k+1, n-1)])\n )\n drights[k] = (\n +(vxj[-1] - vxt) * bhxr * alphaxj[n-1-k]\n +(vyj[-1] - vyt) * bhyr * alphayj[n-1-k]\n +(omegaj[-1] - omegat) * bhor * alphaoj[n-1-k]\n +(difft) * bhor * betaj[n-1-k]\n +(pxj[-1] - pxt) * dpxdr[-1, k]\n +(pyj[-1] - pyt) * dpydr[-1, k]\n +Wmax * rightsp9[k]\n +Wback * lprsp9[k]\n +Wjerk * (2 * rights[k] -rights[k-1] -rights[min(k+1, n-1)])\n )\n # TODO: check this\n dlefts[-1] += (lefts[-1] - leftt)\n drights[-1] += (rights[-1] - rightt)\n self.dlefts = dlefts\n self.drights = drights\n return (dlefts, drights)\n\n def hessian(self):\n # second derivative of the loss function\n pxj = self.pxj\n pyj = self.pyj\n (pxt, pyt, _) = self.target_pose\n dpxdl = self.dpxdl\n dpydl = self.dpydl\n dpxdr = self.dpxdr\n dpydr = self.dpydr\n (bhxl, bhxr, _) = self.bhes[0]\n (bhyl, bhyr, _) = self.bhes[1]\n (bhol, bhor, _) = self.bhes[2]\n alphaxj = self.alphaxj\n alphayj = self.alphayj\n alphaoj = self.alphaoj\n betaj = self.betaj\n\n Wmax = self.Wmax\n Wjerk = self.Wjerk\n Wback = self.Wback\n mmax = self.mmax\n\n lefts = self.als\n rights = self.ars\n d2pxdldl = self.d2pxdldl\n d2pxdldr = self.d2pxdldr\n d2pxdrdr = self.d2pxdrdr\n d2pydldl = self.d2pydldl\n d2pydldr = self.d2pydldr\n d2pydrdr = self.d2pydrdr\n n = len(lefts) - 1\n\n # We'll define this as 0 -> n-1 are lefts[1:], n -> 2n-1 are rights[1:]\n hess = np.empty([2*n, 2*n])\n\n # values that vary with each k, m value\n deltapxn = pxj[-1] - pxt\n deltapyn = pyj[-1] - pyt\n for i in range(0, 2*n):\n k = i % n + 1\n\n kleft = (i < n)\n if kleft:\n dpxdu = dpxdl[n, k]\n dpydu = dpydl[n, k]\n dvxdu = alphaxj[n-k] * bhxl\n dvydu = alphayj[n-k] * bhyl\n domdu = alphaoj[n-k] * bhol\n dthdu = betaj[n-k] * bhol\n else:\n dpxdu = dpxdr[n, k]\n dpydu = dpydr[n, k]\n dvxdu = alphaxj[n-k] * bhxr\n dvydu = alphayj[n-k] * bhyr\n domdu = alphaoj[n-k] * bhor\n dthdu = betaj[n-k] * bhor\n\n for j in range(0, 2*n):\n m = j % n + 1\n mleft = (j < n)\n if mleft:\n dpxds = dpxdl[n, m]\n dpyds = dpydl[n, m]\n dvxds = alphaxj[n-m] * bhxl\n dvyds = alphayj[n-m] * bhyl\n domds = alphaoj[n-m] * bhol\n dthds = betaj[n-m] * bhol\n \n if kleft:\n d2px = d2pxdldl[k, m]\n d2py = d2pydldl[k, m]\n else:\n # note d2pxdrdl[i,j] = d2pxdldr[j,i]\n d2px = d2pxdldr[m, k]\n d2py = d2pydldr[m, k]\n else:\n dpxds = dpxdr[n, m]\n dpyds = dpydr[n, m]\n dvxds = alphaxj[n-m] * bhxr\n dvyds = alphayj[n-m] * bhyr\n domds = alphaoj[n-m] * bhor\n dthds = betaj[n-m] * bhor\n if kleft:\n d2px = d2pxdldr[k, m]\n d2py = d2pydldr[k, m]\n else:\n d2px = d2pxdrdr[k, m]\n d2py = d2pydrdr[k, m]\n hess[i, j] = (\n deltapxn * d2px + dpxdu * dpxds +\n deltapyn * d2py + dpydu * dpyds +\n dvxdu * dvxds + dvydu * dvyds + domdu * domds + dthdu * dthds\n )\n\n # values that require k == m\n for i in range(0, 2*n):\n k = i % n + 1\n kleft = (i < n)\n # max term\n # TODO: I need factor of 9 here?\n hess[i, i] += 9. * (Wmax / mmax**2) * (lefts[k]**8 if kleft else rights[k]**8)\n # back term\n if (lefts[k] + rights[k]) < 0.0:\n hess[i, i] += 9. * Wback * (lefts[k] + rights[k])**8\n # motor target value\n if k == n:\n hess[i, i] += 1.0\n # jerk term\n hess[i, i] += 2 *Wjerk\n if k > 1:\n hess[i, i-1] -= Wjerk\n if k == n:\n hess[i, i] -= Wjerk\n else:\n hess[i, i+1] -= Wjerk\n \n self.hess = hess\n return hess\n\n def dloss_dleft(self, j, eps=1.e-3):\n # numerical estimate of loss derivative at left[j]\n base_als = self.als.copy()\n\n lefts = base_als.copy()\n lefts[j] += eps\n nr.poses(lefts, self.ars)\n loss_plus = nr.reloss()\n\n lefts = base_als.copy()\n lefts[j] -= eps\n nr.poses(lefts, self.ars)\n loss_minus = nr.reloss()\n self.als = base_als\n\n dloss = 0.5 * (loss_plus - loss_minus) / eps\n return dloss\n\n def d2loss_dl_dl(self, k, eps=0.0001):\n # numerical estimate of second derivative of loss dl dl\n base_als = self.als.copy()\n n = len(self.als)\n\n d2lossj = [0.0]\n for j in range(1, n):\n lefts = base_als.copy()\n lefts[k] += eps\n self.als = lefts\n #dlossp = self.dloss_dleft(j, eps)\n nr.poses(lefts, self.ars)\n nr.gradients()\n nr.jacobian()\n dlossp = self.dlefts[j]\n pxp = self.pxj[-1]\n\n lefts = base_als.copy()\n lefts[k] -= eps\n self.als = lefts\n #dlossm = self.dloss_dleft(j, eps)\n nr.poses(lefts, self.ars)\n nr.gradients()\n nr.jacobian()\n dlossm = self.dlefts[j]\n pxm = self.pxj[-1]\n d2lossj.append(0.5 * (dlossp - dlossm) / eps)\n #print(estr({'pxp': pxp, 'pxm': pxm, 'pxp - pxm': pxp - pxm}))\n print(estr(({'dlossp': dlossp, 'dlossm': dlossm, 'dlossp-dlossm': dlossp-dlossm, 'wjerk': self.Wjerk})))\n self.als = base_als\n\n return d2lossj\n \n def dloss_dright(self, j, eps=0.0001):\n # numerical estimate of loss derivative at right[j]\n base_ars = self.ars.copy()\n\n rights = base_ars.copy()\n rights[j] += eps\n nr.poses(self.als, rights)\n loss_plus = nr.reloss()\n\n rights = base_ars.copy()\n rights[j] -= eps\n nr.poses(self.als, rights)\n loss_minus = nr.reloss()\n self.ars = base_ars\n\n dloss = 0.5 * (loss_plus - loss_minus) / eps\n\n return dloss\n\nif __name__ == '__main__':\n from bdbd_common.pathPlan2 import PathPlan\n import matplotlib.pyplot as plt\n\n fig = plt.figure(figsize=(12,4))\n\n dt = 0.05\n lr_model = default_lr_model()\n #lr_model = ((1.0, 1.0, 10.0), (-1.0, 1.0, 10.0), (-1.0, 10.0, 10.0))\n start_pose = [0.0, 0.0, 0.0]\n start_twist = [0.0, 0.0, 0.0]\n target_pose = [0.2, .1, D_TO_R * 180]\n target_twist = [0.0, 0.0, 0.0]\n approach_rho = 0.05\n min_rho = 0.02\n cruise_v = 0.25\n lr_start = (0.0, 0.0)\n gauss_iters = 0\n nr_iters = 20\n Wmax = dt * 1.e-3\n #Wmax = 0.0\n Wjerk = dt * 1.e-3\n Wback = 1.0\n #Wback = 0.0\n NRstart = 1.0\n NRfact = 2\n maxSlew = 1.00\n testNR = False\n\n pp = PathPlan(approach_rho=approach_rho, min_rho=min_rho)\n pathPlan = pp.start2(start_pose, target_pose)\n print('path_plan:')\n for segment in pathPlan:\n print(fstr(segment, fmat='10.7f'))\n\n # estimate left, right to achieve the path\n speedPlan = pp.speedPlan(start_twist[0], cruise_v, target_twist[0], u=0.10)\n print('speed_plan:')\n for segment in speedPlan:\n print(fstr(segment, fmat='10.7f'))\n\n vxr0 = start_twist[0] * math.cos(start_pose[2]) + start_twist[1] * math.sin(start_pose[2])\n vyr0 = -start_twist[0] * math.sin(start_pose[2]) + start_twist[1] * math.cos(start_pose[2])\n last_vx = vxr0\n last_omega = start_twist[2]\n\n vxres = [vxr0]\n vyres = [vyr0]\n omegas = [start_twist[2]]\n vvs = [pp.v(0.0)]\n vvs[0]['left'] = lr_start[0]\n vvs[0]['right'] = lr_start[1]\n lefts = [lr_start[0]]\n rights = [lr_start[1]]\n tt = 0.0\n tees = [tt]\n while True:\n tt += dt\n vv = pp.v(tt)\n vvs.append(vv)\n # vv gives vhat is in wheel frame. We need to convert to robot frame.\n vxres.append(vv['v'])\n vyres.append(vv['omega'] * pp.dwheel)\n omegas.append(vv['omega'])\n\n (left, right, last_vx, last_omega) = lr_est(vv['v'], vv['omega'], last_vx, last_omega, dt)\n lefts.append(left)\n rights.append(right)\n tees.append(tt)\n vv['left'] = left\n vv['right'] = right\n\n if vv['fraction'] > 0.9999:\n break\n for seg in vvs:\n print(estr(seg))\n\n # send to C++ node for processing\n rospy.init_node('NewRaph')\n lrPub = rospy.Publisher('rawLR', LeftRights, queue_size=10)\n lrMsg = LeftRights()\n lrMsg.dt = dt\n lrMsg.lefts = lefts\n lrMsg.rights = rights\n start_lefts = lefts.copy()\n start_rights = rights.copy()\n\n while not rospy.is_shutdown():\n lefts = start_lefts.copy()\n rights = start_rights.copy()\n lrPub.publish(lrMsg)\n print('\\n***** publishing rawLR *****')\n\n n = len(lefts)\n nr = NewRaph(n, dt\n ,lr_model=lr_model\n ,start_pose=start_pose\n ,start_twist=start_twist\n )\n eps = 1.0\n nhess = len(lefts) - 1\n axis3 = None\n gauss_count = 0\n nr_count = 0\n while True:\n if rospy.is_shutdown():\n break\n base_lefts = lefts.copy()\n base_rights = rights.copy()\n rospy.sleep(0.01)\n (pxj, pyj, thetaj, vxj, vyj, omegaj) = nr.poses(lefts, rights)\n loss = nr.loss(mmax=1.0, target_pose=target_pose, Wmax=Wmax, Wjerk=Wjerk, Wback=Wback, details=True)\n print('loss: ' + estr(loss))\n (dpxdl, dpxdr, dpydl, dpydr) = nr.gradients()\n (dlefts, drights) = nr.jacobian()\n #print(gstr({'dlefts': dlefts, '\\ndrights': drights}))\n\n if gauss_count < gauss_iters:\n #eps = 1.0\n gauss_count += 1\n slew = 0.0\n for i in range(1, n):\n if abs(dlefts[i]) > slew:\n slew = abs(dlefts[i])\n if abs(drights[i]) > slew:\n slew = abs(drights[i])\n # line search over deltax looking for best eps\n best_eps = 0.0\n best_loss = loss\n worst_eps = maxSlew / slew\n print('eps limited to ', worst_eps)\n eps = min(eps, worst_eps)\n for lcount in range(4):\n last_eps = eps\n for i in range(1, n):\n lefts[i] = base_lefts[i] - eps*dlefts[i]\n rights[i] = base_rights[i] - eps*drights[i]\n nr.poses(lefts, rights)\n loss = nr.reloss()\n if loss > best_loss:\n worst_eps = eps\n else:\n best_eps = eps\n best_loss = loss\n if eps * 2 < worst_eps:\n eps *= 2\n else:\n eps = 0.5 * (best_eps + worst_eps)\n print(estr({'(G)eps': last_eps, 'loss': loss, 'best_eps': best_eps, 'worst_eps': worst_eps, 'new_eps': eps}))\n \n eps = best_eps\n for i in range(1, n):\n lefts[i] = base_lefts[i] - eps*dlefts[i]\n rights[i] = base_rights[i] - eps*drights[i]\n\n else:\n if nr_count >= nr_iters:\n break\n nr_count += 1\n nr.seconds()\n hess = nr.hessian()\n b = np.concatenate([-nr.dlefts[1:], -nr.drights[1:]])\n deltax = np.linalg.solve(nr.hess, b)\n slew = np.amax(np.absolute(deltax))\n\n # line search over deltax looking for best eps\n best_eps = 0.0\n best_loss = loss\n worst_eps = maxSlew / slew\n eps = min(eps, worst_eps)\n for lcount in range(4):\n last_eps = eps\n lefts[1:] = base_lefts[1:] + eps * deltax[:nhess]\n rights[1:] = base_rights[1:] + eps * deltax[nhess:]\n nr.poses(lefts, rights)\n loss = nr.reloss()\n if loss > best_loss:\n worst_eps = eps\n else:\n best_eps = eps\n best_loss = loss\n if eps * 2 < worst_eps:\n eps *= 2\n else:\n eps = 0.5 * (best_eps + worst_eps)\n print(estr({'(N)eps': last_eps, 'loss': loss, 'best_eps': best_eps, 'worst_eps': worst_eps, 'new_eps': eps}))\n \n eps = best_eps\n #eps = min(best_eps, 1.0)\n print('using eps: ', eps)\n lefts[1:] = base_lefts[1:] + eps * deltax[:nhess]\n rights[1:] = base_rights[1:] + eps * deltax[nhess:]\n\n fig.clf()\n plt1 = fig.add_subplot(131)\n #plt1.axis([0.0, tfPath.lrs[-1]['t'], -1.5, 1.5])\n plt2 = fig.add_subplot(132)\n plt3 = fig.add_subplot(133)\n\n if axis3 is not None:\n plt3.axis(axis3)\n\n plt2.axis('equal')\n\n plt1.plot(tees, lefts)\n plt1.plot(tees, rights)\n plt1.plot(tees, omegaj)\n\n plt2.plot(pxj, pyj)\n plt3.plot(tees, pxj)\n plt3.plot(tees, pyj)\n\n if gauss_count == 1:\n plt.pause(1.0)\n else:\n plt.pause(1.0)\n if axis3 is None:\n axis3 = plt3.axis()\n\n plt.waitforbuttonpress()",
"# newton-raphson iteration of motion equations\n\nimport numpy as np\nimport rospy\nimport math\nimport time\nfrom bdbd_common.utils import fstr, gstr\nfrom bdbd_common.msg import LeftRights\nfrom bdbd_common.geometry import lr_est, default_lr_model, D_TO_R\n\ndef estr(a):\n return fstr(a, fmat='10.7g', n_per_line=10)\n\nclass NewRaph():\n def __init__(self, n, dt\n ,lr_model=default_lr_model()\n ,start_pose=(0.0, 0.0, 0.0)\n ,start_twist=(0.0, 0.0, 0.0)\n ):\n self.lr_model = lr_model\n self.n = n\n self.dt = dt\n self.start_pose = start_pose\n self.start_twist = start_twist\n\n # prep constants for calculations\n alr_model = np.array(self.lr_model)\n self.bhes = (dt * alr_model[0], dt * alr_model[1], dt * alr_model[2])\n (_, _, qhx) = self.bhes[0]\n (_, _, qhy) = self.bhes[1]\n (_, _, qho) = self.bhes[2]\n #print('(bhxl, bhxr, qhx): ' + estr((bhxl, bhxr, qhx)))\n #print('(bhyl, bhyr, qhy): ' + estr((bhyl, bhyr, qhy)))\n #print('(bhol, bhor, qho): ' + estr((bhol, bhor, qho)))\n (alphax, alphay, alphao) = 1.0 - np.array((qhx, qhy, qho))\n #print('(alphax, alphay, alphao):' + estr((alphax, alphay, alphao)))\n \n # alpha ** j\n alphaxj = [1.0]\n alphayj = [1.0]\n alphaoj = [1.0]\n betaj = [dt]\n for i in range(1, n):\n alphaxj.append(alphaxj[i-1] * alphax)\n alphayj.append(alphayj[i-1] * alphay)\n alphaoj.append(alphaoj[i-1] * alphao)\n betaj.append(betaj[i-1] + dt * alphaoj[i])\n self.alphaxj = np.array(alphaxj)\n self.alphayj = np.array(alphayj)\n self.alphaoj = np.array(alphaoj)\n self.betaj = np.array(betaj)\n #print('alphaxj:' + estr(self.alphaxj))\n #print('alphayj:' + estr(self.alphayj))\n #print('alphaoj:' + estr(self.alphaoj))\n #print('betaj:' + estr(self.betaj))\n\n def poses(self, ls, rs,\n details=False\n ):\n als = np.asarray(ls)\n ars = np.asarray(rs)\n self.als = als\n self.ars = ars\n #print('als:' + estr(als))\n (px0, py0, theta0) = self.start_pose\n (bhxl, bhxr, _) = self.bhes[0]\n (bhyl, bhyr, _) = self.bhes[1]\n (bhol, bhor, _) = self.bhes[2]\n (vxw0, vyw0, omega0) = self.start_twist\n n = self.n\n dt = self.dt\n alphaxj = self.alphaxj\n alphayj = self.alphayj\n alphaoj = self.alphaoj\n\n # initial robot velocities\n vx0 = vxw0 * math.cos(theta0) + vyw0 * math.cos(theta0)\n vy0 = -vxw0 * math.sin(theta0) + vyw0 * math.cos(theta0)\n\n # twist\n vxj = np.empty(n)\n vyj = np.empty(n)\n omegaj = np.empty(n)\n vxj[0] = vx0\n vyj[0] = vy0\n omegaj[0] = omega0\n bmotorxj = bhxl * als + bhxr * ars\n bmotoryj = bhyl * als + bhyr * ars\n bmotoroj = bhol * als + bhor * ars\n for i in range(1, n):\n vxj[i] = vx0 * alphaxj[i] + np.dot(alphaxj[i-1::-1], bmotorxj[1:i+1])\n vyj[i] = vy0 * alphayj[i] + np.dot(alphayj[i-1::-1], bmotoryj[1:i+1])\n omegaj[i] = omega0 * alphaoj[i] + np.dot(alphaoj[i-1::-1], bmotoroj[1:i+1])\n\n if details:\n print(estr({'alphaoj[n-2::-1]': alphaoj[n-2::-1]}))\n print(estr({'bmotoroj[1:n]': bmotoroj[1:n]}))\n # pose\n pxj = np.empty(n)\n pyj = np.empty(n)\n thetaj = np.empty(n)\n pxj[0] = px0\n pyj[0] = py0\n thetaj[0] = theta0\n for i in range(1, n):\n thetaj[i] = theta0 + omega0 * (self.betaj[i] - dt) \\\n + np.dot(self.betaj[i-1::-1], bmotoroj[1:i+1])\n\n # intermediate values as vectors\n cosj = np.cos(thetaj)\n sinj = np.sin(thetaj)\n vxcj = vxj * cosj\n vxsj = vxj * sinj\n vycj = vyj * cosj\n vysj = vyj * sinj\n vxwj = vxcj - vysj\n vywj = vxsj + vycj\n\n pxj[1:] = px0 + dt * np.cumsum(vxwj[1:])\n pyj[1:] = py0 + dt * np.cumsum(vywj[1:])\n\n # intermediate results\n self.cosj = cosj\n self.sinj = sinj\n self.vxcj = vxcj\n self.vxsj = vxsj\n self.vycj = vycj\n self.vysj = vysj\n self.vxwj = vxwj\n self.vywj = vywj\n\n self.vxj = vxj\n self.vyj = vyj\n self.omegaj = omegaj\n self.pxj = pxj\n self.pyj = pyj\n self.thetaj = thetaj\n return (pxj, pyj, thetaj, vxj, vyj, omegaj)\n\n def gradients(self):\n # gradients\n\n (bhxl, bhxr, _) = self.bhes[0]\n (bhyl, bhyr, _) = self.bhes[1]\n (bhol, bhor, _) = self.bhes[2]\n n = self.n\n dt = self.dt\n alphaxj = self.alphaxj\n alphayj = self.alphayj\n betaj = self.betaj\n\n cosj = self.cosj\n sinj = self.sinj\n vxcj = self.vxcj\n vxsj = self.vxsj\n vycj = self.vycj\n vysj = self.vysj\n\n dpxdl = np.zeros((n,n))\n dpydl = np.zeros((n,n))\n dpxdr = np.zeros((n,n))\n dpydr = np.zeros((n,n))\n\n for i in range(1, n):\n # gradients\n for k in range(1, i+1):\n doto = np.dot((-vxsj[k:i+1] - vycj[k:i+1]), betaj[:i+1-k])\n dotx = np.dot(cosj[k:i+1], alphaxj[:i+1-k])\n doty = np.dot(-sinj[k:i+1], alphayj[:i+1-k])\n dpxdl[i,k] = dt * (\n +bhol * doto\n +bhxl * dotx\n +bhyl * doty\n )\n dpxdr[i,k] = dt * (\n +bhor * doto\n +bhxr * dotx\n +bhyr * doty\n )\n #if i == 1 and k == 1:\n # print(estr({'bhor': bhor, 'doto': doto, 'bhxr': bhxr, 'dotx': dotx,\n # 'bhyr': bhyr, 'doty': doty}))\n doto = np.dot((vxcj[k:i+1] - vysj[k:i+1]), betaj[:i+1-k])\n dotx = np.dot(sinj[k:i+1], alphaxj[:i+1-k])\n doty = np.dot(cosj[k:i+1], alphayj[:i+1-k])\n dpydl[i,k] = dt * (\n +bhol * doto\n +bhxl * dotx\n +bhyl * doty\n )\n dpydr[i,k] = dt * (\n +bhor * doto\n +bhxr * dotx\n +bhyr * doty\n )\n self.dpxdl = dpxdl\n self.dpydl = dpydl\n self.dpxdr = dpxdr\n self.dpydr = dpydr\n\n return (dpxdl, dpxdr, dpydl, dpydr)\n\n def seconds(self):\n # second partial derivatives\n\n (bhxl, bhxr, _) = self.bhes[0]\n (bhyl, bhyr, _) = self.bhes[1]\n (bhol, bhor, _) = self.bhes[2]\n n = self.n\n dt = self.dt\n alphaxj = self.alphaxj\n alphayj = self.alphayj\n betaj = self.betaj\n\n cosj = self.cosj\n sinj = self.sinj\n vxwj = self.vxwj\n vywj = self.vywj\n\n d2pxdldl = np.zeros((n, n, n))\n d2pxdldr = np.zeros((n, n, n))\n d2pxdrdr = np.zeros((n, n, n))\n d2pydldl = np.zeros((n, n, n))\n d2pydldr = np.zeros((n, n, n))\n d2pydrdr = np.zeros((n, n, n))\n\n # This could be vectorized, but instead I do it discretely to more closely\n # match the C++ version which is what we will actually use.\n for j in range(1, n):\n vxwdt = vxwj[j] * dt\n vywdt = vywj[j] * dt\n sdt = sinj[j] * dt\n cdt = cosj[j] * dt\n for k in range(1, j + 1):\n betaljk = betaj[j-k] * bhol\n betarjk = betaj[j-k] * bhor\n alphaxljk = alphaxj[j-k] * bhxl\n alphaxrjk = alphaxj[j-k] * bhxr\n alphayljk = alphayj[j-k] * bhyl\n alphayrjk = alphayj[j-k] * bhyr\n for m in range(1, j + 1):\n betaljm = betaj[j-m] * bhol\n betarjm = betaj[j-m] * bhor\n alphaxljm = alphaxj[j-m] * bhxl\n alphaxrjm = alphaxj[j-m] * bhxr\n alphayljm = alphaxj[j-m] * bhyl\n alphayrjm = alphaxj[j-m] * bhyr\n\n sumxll = (\n -vxwdt * betaljk * betaljm\n +sdt * (-betaljk * alphaxljm -alphaxljk * betaljm)\n +cdt * (-betaljk * alphayljm -alphayljk * betaljm)\n )\n sumxlr = (\n -vxwdt * betaljk * betarjm\n +sdt * (-betaljk * alphaxrjm -alphaxljk * betarjm)\n +cdt * (-betaljk * alphayrjm -alphayljk * betarjm)\n )\n sumxrr = (\n -vxwdt * betarjk * betarjm\n +sdt * (-betarjk * alphaxrjm -alphaxrjk * betarjm)\n +cdt * (-betarjk * alphayrjm -alphayrjk * betarjm)\n )\n sumyll = (\n -vywdt * betaljk * betaljm\n +sdt * (-betaljk * alphayljm -alphayljk * betaljm)\n +cdt * (betaljk * alphayljm +alphayljk * betaljm)\n )\n sumylr = (\n -vywdt * betaljk * betarjm\n +sdt * (-betaljk * alphayrjm -alphayljk * betarjm)\n +cdt * (betaljk * alphayrjm +alphayljk * betarjm)\n )\n sumyrr = (\n -vywdt * betarjk * betarjm\n +sdt * (-betarjk * alphayrjm -alphayrjk * betarjm)\n +cdt * (betarjk * alphayrjm +alphayrjk * betarjm)\n )\n\n for i in range(j, n):\n #print('i,j,k,m', i, j, k, m)\n d2pxdldl[i, k, m] += sumxll\n d2pxdldr[i, k, m] += sumxlr\n d2pxdrdr[i, k, m] += sumxrr\n d2pydldl[i, k, m] += sumyll\n d2pydldr[i, k, m] += sumylr\n d2pydrdr[i, k, m] += sumyrr\n\n self.d2pxdldl = d2pxdldl\n self.d2pxdldr = d2pxdldr\n self.d2pxdrdr = d2pxdrdr\n self.d2pydldl = d2pydldl\n self.d2pydldr = d2pydldr\n self.d2pydrdr = d2pydrdr\n return (d2pxdldl, d2pxdldr, d2pxdrdr, d2pydldl, d2pydldr, d2pydrdr)\n\n def loss(self,\n target_pose=(0.0, 0.0, 0.0),\n target_twist=(0.0, 0.0, 0.0),\n target_lr = (0.0, 0.0),\n Wmax=1.0e-4,\n Wjerk=1.0e-4,\n Wback=1.0e-4,\n mmax=1.0,\n details=False\n ):\n self.target_pose = target_pose\n self.target_twist = target_twist\n self.target_lr = target_lr\n self.Wmax = Wmax\n self.Wjerk = Wjerk\n self.Wback = Wback\n self.mmax = mmax\n return self.reloss(details=details)\n\n def reloss(self, details=False):\n target_pose = self.target_pose\n target_twist = self.target_twist\n target_lr = self.target_lr\n Wmax = self.Wmax\n Wjerk = self.Wjerk\n Wback = self.Wback\n mmax = self.mmax\n # given pose calculations, determine the loss\n vxj = self.vxj\n vyj = self.vyj\n omegaj = self.omegaj\n pxj = self.pxj\n pyj = self.pyj\n thetaj = self.thetaj\n\n lefts = self.als\n rights = self.ars\n\n # values requiring summing over i\n sumMax = 0.1 * Wmax * (\n np.power(lefts, 10.0).sum() +np.power(rights, 10.0).sum()\n ) / mmax ** 10\n\n # backing term\n sumBack = 0.1 * Wback * np.power((lefts + rights).clip(max=0.0), 10).sum()\n\n ldiff = lefts[1:] - lefts[:-1]\n rdiff = rights[1:] - rights[:-1]\n sumJerk = 0.5 * Wjerk * (np.square(ldiff).sum() + np.square(rdiff).sum())\n\n # values based on final targets\n vals = np.asarray([\n pxj[-1]\n , pyj[-1]\n , thetaj[-1]\n , vxj[-1]\n , vyj[-1]\n , omegaj[-1]\n , lefts[-1]\n , rights[-1]\n ])\n targets = np.concatenate([target_pose, target_twist, target_lr])\n #targets = np.concatenate([target_pose, target_twist[:1], target_lr])\n sumTargets = 0.5 * np.square(vals - targets).sum()\n loss = sumMax + sumJerk + sumTargets + sumBack\n if details:\n print('target losses: ' + estr(0.5 * np.square(vals - targets)))\n print(estr({'loss': loss, 'sumMax': sumMax, 'sumJerk': sumJerk, 'sumTargets': sumTargets, 'sumBack': sumBack}))\n print(fstr({'vals': vals}, fmat='15.12g'))\n print(fstr({'targets': targets}))\n self.lossValue = loss\n return loss\n\n def jacobian(self):\n # the 1st derivative of the loss function\n vxj = self.vxj\n vyj = self.vyj\n omegaj = self.omegaj\n pxj = self.pxj\n pyj = self.pyj\n thetaj = self.thetaj\n\n (pxt, pyt, thetat) = self.target_pose\n (vxt, vyt, omegat) = self.target_twist\n (leftt, rightt) = self.target_lr\n dpxdl = self.dpxdl\n dpydl = self.dpydl\n dpxdr = self.dpxdr\n dpydr = self.dpydr\n\n (bhxl, bhxr, _) = self.bhes[0]\n (bhyl, bhyr, _) = self.bhes[1]\n (bhol, bhor, _) = self.bhes[2]\n alphaxj = self.alphaxj\n alphayj = self.alphayj\n alphaoj = self.alphaoj\n betaj = self.betaj\n Wmax = self.Wmax\n Wjerk = self.Wjerk\n Wback = self.Wback\n mmax = self.mmax\n\n lefts = self.als\n rights = self.ars\n leftsp9 = np.power(lefts / mmax, 9)\n rightsp9 = np.power(rights / mmax, 9)\n lprsp9 = np.power((lefts + rights).clip(max=0.0), 9)\n n = len(lefts)\n\n dlefts = np.zeros([n])\n drights = np.zeros([n])\n\n for k in range(1, n):\n dlefts[k] = (\n +(vxj[-1] - vxt) * bhxl * alphaxj[n-1-k]\n +(vyj[-1] - vyt) * bhyl * alphayj[n-1-k]\n +(omegaj[-1] - omegat) * bhol * alphaoj[n-1-k]\n +(thetaj[-1] - thetat) * bhol * betaj[n-1-k]\n +(pxj[-1] - pxt) * dpxdl[-1, k]\n +(pyj[-1] - pyt) * dpydl[-1, k]\n +Wmax * leftsp9[k] / mmax\n +Wback * lprsp9[k]\n +Wjerk * (2 * lefts[k] -lefts[k-1] -lefts[min(k+1, n-1)])\n )\n drights[k] = (\n +(vxj[-1] - vxt) * bhxr * alphaxj[n-1-k]\n +(vyj[-1] - vyt) * bhyr * alphayj[n-1-k]\n +(omegaj[-1] - omegat) * bhor * alphaoj[n-1-k]\n +(thetaj[-1] - thetat) * bhor * betaj[n-1-k]\n +(pxj[-1] - pxt) * dpxdr[-1, k]\n +(pyj[-1] - pyt) * dpydr[-1, k]\n +Wmax * rightsp9[k]\n +Wback * lprsp9[k]\n +Wjerk * (2 * rights[k] -rights[k-1] -rights[min(k+1, n-1)])\n )\n # TODO: check this\n dlefts[-1] += (lefts[-1] - leftt)\n drights[-1] += (rights[-1] - rightt)\n self.dlefts = dlefts\n self.drights = drights\n return (dlefts, drights)\n\n def hessian(self):\n # second derivative of the loss function\n pxj = self.pxj\n pyj = self.pyj\n (pxt, pyt, _) = self.target_pose\n dpxdl = self.dpxdl\n dpydl = self.dpydl\n dpxdr = self.dpxdr\n dpydr = self.dpydr\n (bhxl, bhxr, _) = self.bhes[0]\n (bhyl, bhyr, _) = self.bhes[1]\n (bhol, bhor, _) = self.bhes[2]\n alphaxj = self.alphaxj\n alphayj = self.alphayj\n alphaoj = self.alphaoj\n betaj = self.betaj\n\n Wmax = self.Wmax\n Wjerk = self.Wjerk\n Wback = self.Wback\n mmax = self.mmax\n\n lefts = self.als\n rights = self.ars\n d2pxdldl = self.d2pxdldl\n d2pxdldr = self.d2pxdldr\n d2pxdrdr = self.d2pxdrdr\n d2pydldl = self.d2pydldl\n d2pydldr = self.d2pydldr\n d2pydrdr = self.d2pydrdr\n n = len(lefts) - 1\n\n # We'll define this as 0 -> n-1 are lefts[1:], n -> 2n-1 are rights[1:]\n hess = np.empty([2*n, 2*n])\n\n # values that vary with each k, m value\n deltapxn = pxj[-1] - pxt\n deltapyn = pyj[-1] - pyt\n for i in range(0, 2*n):\n k = i % n + 1\n\n kleft = (i < n)\n if kleft:\n dpxdu = dpxdl[n, k]\n dpydu = dpydl[n, k]\n dvxdu = alphaxj[n-k] * bhxl\n dvydu = alphayj[n-k] * bhyl\n domdu = alphaoj[n-k] * bhol\n dthdu = betaj[n-k] * bhol\n else:\n dpxdu = dpxdr[n, k]\n dpydu = dpydr[n, k]\n dvxdu = alphaxj[n-k] * bhxr\n dvydu = alphayj[n-k] * bhyr\n domdu = alphaoj[n-k] * bhor\n dthdu = betaj[n-k] * bhor\n\n for j in range(0, 2*n):\n m = j % n + 1\n mleft = (j < n)\n if mleft:\n dpxds = dpxdl[n, m]\n dpyds = dpydl[n, m]\n dvxds = alphaxj[n-m] * bhxl\n dvyds = alphayj[n-m] * bhyl\n domds = alphaoj[n-m] * bhol\n dthds = betaj[n-m] * bhol\n \n if kleft:\n d2px = d2pxdldl[n, k, m]\n d2py = d2pydldl[n, k, m]\n else:\n # note d2pxdrdl[i,j] = d2pxdldr[j,i]\n d2px = d2pxdldr[n, m, k]\n d2py = d2pydldr[n, m, k]\n else:\n dpxds = dpxdr[n, m]\n dpyds = dpydr[n, m]\n dvxds = alphaxj[n-m] * bhxr\n dvyds = alphayj[n-m] * bhyr\n domds = alphaoj[n-m] * bhor\n dthds = betaj[n-m] * bhor\n if kleft:\n d2px = d2pxdldr[n, k, m]\n d2py = d2pydldr[n, k, m]\n else:\n d2px = d2pxdrdr[n, k, m]\n d2py = d2pydrdr[n, k, m]\n hess[i, j] = (\n deltapxn * d2px + dpxdu * dpxds +\n deltapyn * d2py + dpydu * dpyds +\n dvxdu * dvxds + dvydu * dvyds + domdu * domds + dthdu * dthds\n )\n\n # values that require k == m\n for i in range(0, 2*n):\n k = i % n + 1\n kleft = (i < n)\n # max term\n # TODO: I need factor of 9 here?\n hess[i, i] += 9. * (Wmax / mmax**2) * (lefts[k]**8 if kleft else rights[k]**8)\n # back term\n if (lefts[k] + rights[k]) < 0.0:\n hess[i, i] += 9. * Wback * (lefts[k] + rights[k])**8\n # motor target value\n if k == n:\n hess[i, i] += 1.0\n # jerk term\n hess[i, i] += 2 *Wjerk\n if k > 1:\n hess[i, i-1] -= Wjerk\n if k == n:\n hess[i, i] -= Wjerk\n else:\n hess[i, i+1] -= Wjerk\n \n self.hess = hess\n return hess\n\n def dloss_dleft(self, j, eps=1.e-3):\n # numerical estimate of loss derivative at left[j]\n base_als = self.als.copy()\n\n lefts = base_als.copy()\n lefts[j] += eps\n nr.poses(lefts, self.ars)\n loss_plus = nr.reloss()\n\n lefts = base_als.copy()\n lefts[j] -= eps\n nr.poses(lefts, self.ars)\n loss_minus = nr.reloss()\n self.als = base_als\n\n dloss = 0.5 * (loss_plus - loss_minus) / eps\n return dloss\n\n def d2loss_dl_dl(self, k, eps=0.0001):\n # numerical estimate of second derivative of loss dl dl\n base_als = self.als.copy()\n n = len(self.als)\n\n d2lossj = [0.0]\n for j in range(1, n):\n lefts = base_als.copy()\n lefts[k] += eps\n self.als = lefts\n #dlossp = self.dloss_dleft(j, eps)\n nr.poses(lefts, self.ars)\n nr.gradients()\n nr.jacobian()\n dlossp = self.dlefts[j]\n pxp = self.pxj[-1]\n\n lefts = base_als.copy()\n lefts[k] -= eps\n self.als = lefts\n #dlossm = self.dloss_dleft(j, eps)\n nr.poses(lefts, self.ars)\n nr.gradients()\n nr.jacobian()\n dlossm = self.dlefts[j]\n pxm = self.pxj[-1]\n d2lossj.append(0.5 * (dlossp - dlossm) / eps)\n #print(estr({'pxp': pxp, 'pxm': pxm, 'pxp - pxm': pxp - pxm}))\n print(estr(({'dlossp': dlossp, 'dlossm': dlossm, 'dlossp-dlossm': dlossp-dlossm, 'wjerk': self.Wjerk})))\n self.als = base_als\n\n return d2lossj\n \n def dloss_dright(self, j, eps=0.0001):\n # numerical estimate of loss derivative at right[j]\n base_ars = self.ars.copy()\n\n rights = base_ars.copy()\n rights[j] += eps\n nr.poses(self.als, rights)\n loss_plus = nr.reloss()\n\n rights = base_ars.copy()\n rights[j] -= eps\n nr.poses(self.als, rights)\n loss_minus = nr.reloss()\n self.ars = base_ars\n\n dloss = 0.5 * (loss_plus - loss_minus) / eps\n\n return dloss\n\nif __name__ == '__main__':\n from bdbd_common.pathPlan2 import PathPlan\n import matplotlib.pyplot as plt\n\n fig = plt.figure(figsize=(8,4))\n axis1 = None\n axis2 = None\n\n dt = 0.02\n lr_model = default_lr_model()\n #lr_model = ((1.0, 1.0, 10.0), (-1.0, 1.0, 10.0), (-1.0, 10.0, 10.0))\n start_pose = [0.0, 0.0, 0.0]\n start_twist = [0.0, 0.0, 0.0]\n target_pose = [0.2, .1, D_TO_R * 0]\n target_twist = [0.0, 0.0, 0.0]\n approach_rho = 0.02\n min_rho = 0.005\n cruise_v = 0.3\n lr_start = (0.0, 0.0)\n gauss_iters = 0\n nr_iters = 1\n Wmax = dt * 1.e-3\n #Wmax = 0.0\n Wjerk = dt * 1.e-3\n Wback = 1.0\n #Wback = 0.0\n NRstart = 1.0\n NRfact = 2\n maxSlew = 1.0\n testNR = False\n\n pp = PathPlan(approach_rho=approach_rho, min_rho=min_rho)\n pathPlan = pp.start2(start_pose, target_pose)\n print('path_plan:')\n for segment in pathPlan:\n print(fstr(segment, fmat='10.7f'))\n\n # estimate left, right to achieve the path\n speedPlan = pp.speedPlan(start_twist[0], cruise_v, target_twist[0], u=0.10)\n print('speed_plan:')\n for segment in speedPlan:\n print(fstr(segment, fmat='10.7f'))\n\n vxr0 = start_twist[0] * math.cos(start_pose[2]) + start_twist[1] * math.sin(start_pose[2])\n vyr0 = -start_twist[0] * math.sin(start_pose[2]) + start_twist[1] * math.cos(start_pose[2])\n last_vx = vxr0\n last_omega = start_twist[2]\n\n vxres = [vxr0]\n vyres = [vyr0]\n omegas = [start_twist[2]]\n vvs = [pp.v(0.0)]\n vvs[0]['left'] = lr_start[0]\n vvs[0]['right'] = lr_start[1]\n lefts = [lr_start[0]]\n rights = [lr_start[1]]\n tt = 0.0\n tees = [tt]\n while True:\n tt += dt\n vv = pp.v(tt)\n vvs.append(vv)\n # vv gives vhat is in wheel frame. We need to convert to robot frame.\n vxres.append(vv['v'])\n vyres.append(vv['omega'] * pp.dwheel)\n omegas.append(vv['omega'])\n\n (left, right, last_vx, last_omega) = lr_est(vv['v'], vv['omega'], last_vx, last_omega, dt)\n lefts.append(left)\n rights.append(right)\n tees.append(tt)\n vv['left'] = left\n vv['right'] = right\n\n if vv['fraction'] > 0.9999:\n break\n for seg in vvs:\n print(estr(seg))\n\n # send to C++ node for processing\n rospy.init_node('NewRaph')\n lrPub = rospy.Publisher('rawLR', LeftRights, queue_size=10)\n lrMsg = LeftRights()\n lrMsg.dt = dt\n lrMsg.lefts = lefts\n lrMsg.rights = rights\n base_lefts = lefts.copy()\n base_rights = rights.copy()\n\n while not rospy.is_shutdown():\n lefts = base_lefts.copy()\n rights = base_rights.copy()\n lrPub.publish(lrMsg)\n print('\\n***** publishing rawLR *****')\n n = len(lefts)\n nr = NewRaph(n, dt\n ,lr_model=lr_model\n ,start_pose=start_pose\n ,start_twist=start_twist\n )\n for count in range(1):\n (pxj, pyj, thetaj, vxj, vyj, omegaj) = nr.poses(lefts, rights)\n loss = nr.loss(mmax=1.0, target_pose=target_pose, Wmax=Wmax, Wjerk=Wjerk, Wback=Wback, details=False)\n print('loss: ' + estr(loss))\n print('lefts:' + fstr(lefts))\n print('rights:' + fstr(rights))\n (dpxdl, dpxdr, dpydl, dpydr) = nr.gradients()\n nr.jacobian()\n nr.seconds()\n hess = nr.hessian()\n b = np.concatenate([-nr.dlefts[1:], -nr.drights[1:]])\n print('b' + fstr(b, fmat='10.7f'))\n print('hessian[3:]\\n' + fstr(hess[3,:], n_per_line=11, fmat='11.6f'))\n deltax = np.linalg.solve(nr.hess, b)\n print('deltax:' + estr(deltax))\n\n #print('pxj:' + fstr(pxj))\n #print('pyj:' + fstr(pyj))\n eps = 1.0\n nhess = len(lefts) - 1\n lefts[1:] = lefts[1:] + eps * deltax[:nhess]\n rights[1:] = rights[1:] + eps * deltax[nhess:]\n #print('dpydr:' + estr(dpydr))\n #print('dlefts:' + fstr(nr.dlefts, fmat='10.7f'))\n #print('drights:' + fstr(nr.drights, fmat='10.7f'))\n rospy.sleep(2.0)\n exit(0)\n\n\n #lefts = lefts[:10]\n #rights = rights[:10]\n #tees = tees[:10]\n '''\n for i in range(10):\n lefts.append(0.0)\n rights.append(0.0)\n tees.append(tees[-1] + dt)\n '''\n\n n = len(lefts)\n nr = NewRaph(n, dt\n ,lr_model=lr_model\n ,start_pose=start_pose\n ,start_twist=start_twist\n )\n # gradient descent iteration\n eps = 0.5\n last_loss = 1.e10\n last_lefts = None\n last_rights = None\n last_dlefts = None\n last_drights = None\n last_deltax = None\n doNR = False\n deltax = None\n maxNR = False\n\n for count in range(gauss_iters + nr_iters):\n (pxj, pyj, thetaj, vxj, vyj, omegaj) = nr.poses(lefts, rights)\n loss = nr.loss(mmax=1.0, target_pose=target_pose, Wmax=Wmax, Wjerk=Wjerk, Wback=Wback, details=True)\n (dpxdl, dpxdr, dpydl, dpydr) = nr.gradients()\n (dlefts, drights) = nr.jacobian()\n\n #print('loss time:', time.time() - start)\n #print('loss:', loss)\n\n #print('jacobian time:', time.time() - start)\n '''\n start = time.time()\n (d2pxdldl, d2pxdldr, d2pxdrdr, d2pydldl, d2pydldr, d2pydrdr) = nr.seconds()\n print('seconds time:', time.time() - start)\n '''\n\n fig.clf()\n plt1 = fig.add_subplot(121)\n #plt1.axis([0.0, tfPath.lrs[-1]['t'], -1.5, 1.5])\n plt2 = fig.add_subplot(122)\n\n '''\n if axis1 is not None:\n plt1.axis(axis1)\n if axis2 is not None:\n plt2.axis(axis2)\n else:\n '''\n plt2.axis('equal')\n\n plt1.plot(tees, lefts)\n plt1.plot(tees, rights)\n plt1.plot(tees, omegaj)\n\n plt2.plot(pxj, pyj)\n\n plt.pause(0.001)\n if axis1 is None:\n axis1 = plt1.axis()\n if axis2 is None:\n axis2 = plt2.axis()\n\n nhess = len(lefts) - 1\n # update lefts, rights\n print(fstr({'count': count, 'eps': eps, 'loss': loss, 'last_loss': last_loss,\n 'lmax': np.amax(lefts), 'rmax': np.amax(rights)}, fmat='15.12f'))\n if count >= gauss_iters and not doNR:\n eps = NRstart\n doNR = True\n if loss < last_loss or (doNR and last_deltax is None):\n last_loss = loss\n last_lefts = lefts.copy()\n last_rights = rights.copy()\n last_dlefts = dlefts.copy()\n last_drights = drights.copy()\n\n\n if not doNR:\n # gradient descent\n for i in range(1, n):\n lefts[i] -= eps*dlefts[i]\n rights[i] -= eps*drights[i]\n eps *= 1.3\n else:\n #Newton-Raphson\n if eps > 0.99:\n maxNR = True\n print('Using Newton-Raphson!')\n (d2pxdldl, d2pxdldr, d2pxdrdr, d2pydldl, d2pydldr, d2pydrdr) = nr.seconds()\n hess = nr.hessian()\n b = np.concatenate([-nr.dlefts[1:], -nr.drights[1:]])\n if deltax is not None:\n last_deltax = deltax.copy()\n deltax = np.linalg.solve(nr.hess, b)\n slew = np.amax(np.absolute(deltax))\n\n # line search over deltax looking for best eps\n best_eps = 0.0\n best_loss = last_loss\n if slew > maxSlew:\n worst_eps = maxSlew / slew\n eps = worst_eps / 2.0\n print('eps limited by slew to {:6.3f}'.format(worst_eps))\n else:\n worst_eps = None\n base_lefts = lefts.copy()\n base_rights = rights.copy()\n for lcount in range(6):\n last_eps = eps\n lefts[1:] = base_lefts[1:] + eps * deltax[:nhess]\n rights[1:] = base_rights[1:] + eps * deltax[nhess:]\n nr.poses(lefts, rights)\n loss = nr.reloss()\n if loss > best_loss:\n worst_eps = eps\n else:\n best_eps = eps\n best_loss = loss\n if worst_eps is None:\n eps *= 2\n else:\n eps = 0.5 * (best_eps + worst_eps)\n print(estr({'eps': last_eps, 'loss': loss, 'best_eps': best_eps, 'worst_eps': worst_eps, 'new_eps': eps}))\n \n eps = min(best_eps, 1.0)\n lefts[1:] = base_lefts[1:] + eps * deltax[:nhess]\n rights[1:] = base_rights[1:] + eps * deltax[nhess:]\n print('deltax:' + estr(deltax))\n if deltax.ptp() < 1.e-6:\n print('done, little delta x')\n break\n \n '''\n # limit slew rate\n\n if testNR:\n eps *= 2\n if eps >= 1.0:\n break\n\n lefts[1:] = last_lefts[1:] + eps * deltax[:nhess]\n rights[1:] = last_rights[1:] + eps * deltax[nhess:]\n else:\n last_lefts = lefts.copy()\n last_rights = rights.copy()\n\n slew = np.amax(np.absolute(deltax))\n eps = min(eps, maxSlew / slew)\n\n lefts[1:] += eps * deltax[:nhess]\n rights[1:] += eps * deltax[nhess:]\n eps = min(1.0, NRfact*eps)\n '''\n\n else:\n print('uh, oh!')\n if maxNR:\n print('done')\n break\n\n if not doNR:\n eps *= .5\n lefts[1:] = last_lefts[1:] - eps * last_dlefts[1:]\n rights[1:] = last_rights[1:] - eps * last_drights[1:]\n else:\n eps *= .5 / NRfact\n lefts[1:] = last_lefts[1:] + eps * last_deltax[:nhess]\n rights[1:] = last_rights[1:] + eps * last_deltax[nhess:]\n\n #print('lefts:' + estr(lefts))\n #print('rights:' + estr(rights))\n time.sleep(1.0)\n print('press any button to continue')\n plt.waitforbuttonpress()\n print('pxj:' + estr(pxj))\n print('pyj:' + estr(pyj))\n print('thetaj:' + estr(thetaj))\n print('vxj:' + estr(vxj))\n print('vyj:' + estr(vyj))\n print('omegaj:' + estr(omegaj))\n print('lefts:' + estr(lefts))\n print('rights:' + estr(rights))\n '''\n #print('dlefts:' + fstr(dlefts, fmat='18.15g', n_per_line=10))\n #print('drights:' + fstr(drights, fmat='18.15g', n_per_line=10))\n #print('hessian:' + fstr(nr.hess, fmat='18.15g', n_per_line=10))\n\n print('dpxdl:' + gstr(nr.dpxdl, fmat='18.15f', n_per_line=10))\n print('dpxdr:' + gstr(nr.dpxdr, fmat='18.15f', n_per_line=10))\n print('dpydl:' + gstr(nr.dpydl, fmat='18.15f', n_per_line=10))\n print('dpydr:' + gstr(nr.dpydr, fmat='18.15f', n_per_line=10))\n print('d2pxdldl' + gstr(nr.d2pxdldl, fmat='18.15f', n_per_line=10))\n print('d2pxdldr' + gstr(nr.d2pxdldr, fmat='18.15f', n_per_line=10))\n print('d2pxdrdr' + gstr(nr.d2pxdrdr, fmat='18.15f', n_per_line=10))\n print('d2pydldl' + gstr(nr.d2pydldl, fmat='18.15f', n_per_line=10))\n print('d2pydldr' + gstr(nr.d2pydldr, fmat='18.15f', n_per_line=10))\n print('d2prdrdr' + gstr(nr.d2pydrdr, fmat='18.15f', n_per_line=10))\n print('sin(theta):' + fstr(np.sin(thetaj),fmat='18.15f', n_per_line=10))\n print('cos(theta):' + fstr(np.cos(thetaj),fmat='18.15f', n_per_line=10))\n print('vxw:' + fstr(vxj * np.cos(thetaj) - vyj * np.sin(thetaj),fmat='18.15f', n_per_line=10))\n print('vyw:' + fstr(vxj * np.sin(thetaj) + vyj * np.cos(thetaj),fmat='18.15f', n_per_line=10))\n '''\n\n # numerical estimate of dlefts\n # get numerical estimate of derivative for single values\n '''\n dlefts_n = [0.0]\n dpxdl_n = [0.0]\n dpydl_n = [0.0]\n dthetadl_n = [0.0]\n dvxdl_n = [0.0]\n dvydl_n = [0.0]\n domegadl_n = [0.0]\n for j in range(1, n):\n dloss = nr.dloss_dleft(j, eps=1.e-5)\n dlefts_n.append(dloss)\n\n drights_n = [0.0]\n dpxdr_n = [0.0]\n dpydr_n = [0.0]\n dthetadr_n = [0.0]\n dvxdr_n = [0.0]\n dvydr_n = [0.0]\n domegadr_n = [0.0]\n for j in range(1, n):\n dloss = nr.dloss_dright(j, eps=1.e-5)\n drights_n.append(dloss)\n\n #print('dpxdl_n:' + estr(dpxdl_n))\n print('dpxdl:' + estr(nr.dpxdl))\n #print('dpydl_n:' + estr(dpydl_n))\n print('dpydl:' + estr(nr.dpydl))\n print('dlefts_n:' + fstr(dlefts_n, fmat='18.15g', n_per_line=10))\n print('dlefts:' + fstr(nr.dlefts, fmat='18.15g', n_per_line=10)) \n print('drights_n:' + fstr(drights_n, fmat='18.15g', n_per_line=10))\n print('drights:' + fstr(nr.drights, fmat='18.15g', n_per_line=10))\n d2loss = []\n for k in range(1, len(lefts)):\n d2loss.append(nr.d2loss_dl_dl(k, eps=1.0e-4))\n \n print('d2lossj:' + fstr(d2loss, fmat='18.15g', n_per_line=10))\n #nr.poses(nr.als, nr.ars)\n #nr.reloss()\n #nr.gradients()\n #(dlefts, drights) = nr.jacobian()\n print('hessian:' + fstr(nr.hess, fmat='18.15g', n_per_line=10))\n #print('d2pxdldl' + gstr(nr.d2pxdldl, fmat='18.15f', n_per_line=10))\n '''\n\n\n# look at 5,5\n'''\neps = 1.0e-3\nbase_als = nr.als.copy()\n\nlefts = base_als.copy()\nlefts[5] += eps\n#dlossp = self.dloss_dleft(j, eps)\nnr.poses(lefts, nr.ars, details=True)\nlossp = nr.reloss(True)\nnr.gradients()\nnr.jacobian()\ndlossp = nr.dlefts[5]\npxp = nr.pxj[-1]\npyp = nr.pyj[-1]\n\nlefts[5] -= eps\nnr.poses(lefts, nr.ars, details=True)\nlossc = nr.reloss(True)\nnr.gradients()\nnr.jacobian()\ndlossc = nr.dlefts[5]\npxc = nr.pxj[-1]\npyc = nr.pyj[-1]\n\nlefts[5] -= eps\nnr.poses(lefts, nr.ars, details=True)\nlossm = nr.reloss(True)\nnr.gradients()\nnr.jacobian()\ndlossm = nr.dlefts[5]\npxm = nr.pxj[-1]\npym = nr.pyj[-1]\nprint(estr({'dlossp': dlossp, 'dlossc': dlossc, 'dlossm': dlossm}))\nprint(estr({'d1': 0.5 * (lossp - lossm)/eps, 'd2': (-lossp + 2*lossc - lossm)/eps**2}))\nprint(estr({'pxloss': .5 *(pxc-.01)**2, 'p1x': 0.5 * (pxp - pxm) / eps, 'p2x': (-pxp + 2*pxc - pxm)/eps**2}))\nprint(estr({'pyloss': 0.5 * pyc**2, 'p1y': 0.5 * (pyp - pym) / eps, 'p2y': (-pyp + 2*pyc - pym)/eps**2}))\n'''"
] |
[
[
"numpy.square",
"numpy.dot",
"numpy.linalg.solve",
"numpy.absolute",
"numpy.power",
"numpy.asarray",
"numpy.cos",
"numpy.cumsum",
"numpy.sin",
"numpy.concatenate",
"matplotlib.pyplot.waitforbuttonpress",
"numpy.array",
"numpy.zeros",
"matplotlib.pyplot.pause",
"numpy.empty",
"matplotlib.pyplot.figure"
],
[
"numpy.square",
"numpy.dot",
"numpy.amax",
"numpy.linalg.solve",
"numpy.absolute",
"numpy.power",
"numpy.asarray",
"numpy.cos",
"numpy.cumsum",
"numpy.sin",
"numpy.concatenate",
"matplotlib.pyplot.waitforbuttonpress",
"numpy.array",
"numpy.zeros",
"matplotlib.pyplot.pause",
"numpy.empty",
"matplotlib.pyplot.figure"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Zhangxu0501/models
|
[
"42a3da72313b8814ef0ced8f425af90b57313b9f",
"42a3da72313b8814ef0ced8f425af90b57313b9f"
] |
[
"research/ptn/train_ptn.py",
"research/object_detection/anchor_generators/multiple_grid_anchor_generator_test.py"
] |
[
"# Copyright 2017 The TensorFlow Authors All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Contains training plan for the Im2vox model.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom tensorflow import app\n\nimport model_ptn\n\nflags = tf.app.flags\nslim = tf.contrib.slim\n\nflags.DEFINE_string('inp_dir',\n '',\n 'Directory path containing the input data (tfrecords).')\nflags.DEFINE_string(\n 'dataset_name', 'shapenet_chair',\n 'Dataset name that is to be used for training and evaluation.')\nflags.DEFINE_integer('z_dim', 512, '')\nflags.DEFINE_integer('f_dim', 64, '')\nflags.DEFINE_integer('fc_dim', 1024, '')\nflags.DEFINE_integer('num_views', 24, 'Num of viewpoints in the input data.')\nflags.DEFINE_integer('image_size', 64,\n 'Input images dimension (pixels) - width & height.')\nflags.DEFINE_integer('vox_size', 32, 'Voxel prediction dimension.')\nflags.DEFINE_integer('step_size', 24, 'Steps to take in rotation to fetch viewpoints.')\nflags.DEFINE_integer('batch_size', 1, 'Batch size while training.')\nflags.DEFINE_float('focal_length', 0.866, 'Focal length parameter used in perspective projection.')\nflags.DEFINE_float('focal_range', 1.732, 'Focal length parameter used in perspective projection.')\nflags.DEFINE_string('encoder_name', 'ptn_encoder',\n 'Name of the encoder network being used.')\nflags.DEFINE_string('decoder_name', 'ptn_vox_decoder',\n 'Name of the decoder network being used.')\nflags.DEFINE_string('projector_name', 'perspective_projector',\n 'Name of the projector network being used.')\n# Save options\nflags.DEFINE_string('checkpoint_dir', '/tmp/ptn_train/',\n 'Directory path for saving trained models and other data.')\nflags.DEFINE_string('model_name', 'ptn_finetune',\n 'Name of the model used in naming the TF job. Must be different for each run.')\nflags.DEFINE_string('init_model', None,\n 'Checkpoint path of the model to initialize with.')\nflags.DEFINE_integer('save_every', 1000,\n 'Average period of steps after which we save a model.')\n# Optimization\nflags.DEFINE_float('proj_weight', 10, 'Weighting factor for projection loss.')\nflags.DEFINE_float('volume_weight', 0, 'Weighting factor for volume loss.')\nflags.DEFINE_float('viewpoint_weight', 1, 'Weighting factor for viewpoint loss.')\nflags.DEFINE_float('learning_rate', 0.0001, 'Learning rate.')\nflags.DEFINE_float('weight_decay', 0.001, 'Weight decay parameter while training.')\nflags.DEFINE_float('clip_gradient_norm', 0, 'Gradient clim norm, leave 0 if no gradient clipping.')\nflags.DEFINE_integer('max_number_of_steps', 10000, 'Maximum number of steps for training.')\n# Summary\nflags.DEFINE_integer('save_summaries_secs', 15, 'Seconds interval for dumping TF summaries.')\nflags.DEFINE_integer('save_interval_secs', 60 * 5, 'Seconds interval to save models.')\n\n# Scheduling\nflags.DEFINE_string('master', '', 'The address of the tensorflow master')\nflags.DEFINE_bool('sync_replicas', False, 'Whether to sync gradients between replicas for optimizer.')\nflags.DEFINE_integer('worker_replicas', 1, 'Number of worker replicas (train tasks).')\nflags.DEFINE_integer('backup_workers', 0, 'Number of backup workers.')\nflags.DEFINE_integer('ps_tasks', 0, 'Number of ps tasks.')\nflags.DEFINE_integer('task', 0,\n 'Task identifier flag to be set for each task running in distributed manner. Task number 0 '\n 'will be chosen as the chief.')\n\nFLAGS = flags.FLAGS\n\n\ndef main(_):\n train_dir = os.path.join(FLAGS.checkpoint_dir, FLAGS.model_name, 'train')\n save_image_dir = os.path.join(train_dir, 'images')\n if not os.path.exists(train_dir):\n os.makedirs(train_dir)\n if not os.path.exists(save_image_dir):\n os.makedirs(save_image_dir)\n\n g = tf.Graph()\n with g.as_default():\n with tf.device(tf.train.replica_device_setter(FLAGS.ps_tasks)):\n global_step = slim.get_or_create_global_step()\n ###########\n ## model ##\n ###########\n model = model_ptn.model_PTN(FLAGS)\n ##########\n ## data ##\n ##########\n train_data = model.get_inputs(\n FLAGS.inp_dir,\n FLAGS.dataset_name,\n 'train',\n FLAGS.batch_size,\n FLAGS.image_size,\n FLAGS.vox_size,\n is_training=True)\n inputs = model.preprocess(train_data, FLAGS.step_size)\n ##############\n ## model_fn ##\n ##############\n model_fn = model.get_model_fn(\n is_training=True, reuse=False, run_projection=True)\n outputs = model_fn(inputs)\n ##################\n ## train_scopes ##\n ##################\n if FLAGS.init_model:\n train_scopes = ['decoder']\n init_scopes = ['encoder']\n else:\n train_scopes = ['encoder', 'decoder']\n\n ##########\n ## loss ##\n ##########\n task_loss = model.get_loss(inputs, outputs)\n\n regularization_loss = model.get_regularization_loss(train_scopes)\n loss = task_loss + regularization_loss\n ###############\n ## optimizer ##\n ###############\n optimizer = tf.train.AdamOptimizer(FLAGS.learning_rate)\n if FLAGS.sync_replicas:\n optimizer = tf.train.SyncReplicasOptimizer(\n optimizer,\n replicas_to_aggregate=FLAGS.workers_replicas - FLAGS.backup_workers,\n total_num_replicas=FLAGS.worker_replicas)\n\n ##############\n ## train_op ##\n ##############\n train_op = model.get_train_op_for_scope(loss, optimizer, train_scopes)\n ###########\n ## saver ##\n ###########\n saver = tf.train.Saver(max_to_keep=np.minimum(5,\n FLAGS.worker_replicas + 1))\n\n if FLAGS.task == 0:\n params = FLAGS\n params.batch_size = params.num_views\n params.step_size = 1\n model.set_params(params)\n val_data = model.get_inputs(\n params.inp_dir,\n params.dataset_name,\n 'val',\n params.batch_size,\n params.image_size,\n params.vox_size,\n is_training=False)\n val_inputs = model.preprocess(val_data, params.step_size)\n # Note: don't compute loss here\n reused_model_fn = model.get_model_fn(is_training=False, reuse=True)\n val_outputs = reused_model_fn(val_inputs)\n\n with tf.device(tf.DeviceSpec(device_type='CPU')):\n vis_input_images = val_inputs['images_1'] * 255.0\n vis_gt_projs = (val_outputs['masks_1'] * (-1) + 1) * 255.0\n vis_pred_projs = (val_outputs['projs_1'] * (-1) + 1) * 255.0\n\n vis_gt_projs = tf.concat([vis_gt_projs] * 3, axis=3)\n vis_pred_projs = tf.concat([vis_pred_projs] * 3, axis=3)\n # rescale\n new_size = [FLAGS.image_size] * 2\n vis_gt_projs = tf.image.resize_nearest_neighbor(\n vis_gt_projs, new_size)\n vis_pred_projs = tf.image.resize_nearest_neighbor(\n vis_pred_projs, new_size)\n # flip\n # vis_gt_projs = utils.image_flipud(vis_gt_projs)\n # vis_pred_projs = utils.image_flipud(vis_pred_projs)\n # vis_gt_projs is of shape [batch, height, width, channels]\n write_disk_op = model.write_disk_grid(\n global_step=global_step,\n log_dir=save_image_dir,\n input_images=vis_input_images,\n gt_projs=vis_gt_projs,\n pred_projs=vis_pred_projs,\n input_voxels=val_inputs['voxels'],\n output_voxels=val_outputs['voxels_1'])\n with tf.control_dependencies([write_disk_op]):\n train_op = tf.identity(train_op)\n\n #############\n ## init_fn ##\n #############\n if FLAGS.init_model:\n init_fn = model.get_init_fn(init_scopes)\n else:\n init_fn = None\n\n ##############\n ## training ##\n ##############\n slim.learning.train(\n train_op=train_op,\n logdir=train_dir,\n init_fn=init_fn,\n master=FLAGS.master,\n is_chief=(FLAGS.task == 0),\n number_of_steps=FLAGS.max_number_of_steps,\n saver=saver,\n save_summaries_secs=FLAGS.save_summaries_secs,\n save_interval_secs=FLAGS.save_interval_secs)\n\n\nif __name__ == '__main__':\n app.run()\n",
"# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Tests for anchor_generators.multiple_grid_anchor_generator_test.py.\"\"\"\n\nimport numpy as np\n\nimport tensorflow as tf\n\nfrom object_detection.anchor_generators import multiple_grid_anchor_generator as ag\n\n\nclass MultipleGridAnchorGeneratorTest(tf.test.TestCase):\n\n def test_construct_single_anchor_grid(self):\n \"\"\"Builds a 1x1 anchor grid to test the size of the output boxes.\"\"\"\n exp_anchor_corners = [[-121, -35, 135, 29], [-249, -67, 263, 61],\n [-505, -131, 519, 125], [-57, -67, 71, 61],\n [-121, -131, 135, 125], [-249, -259, 263, 253],\n [-25, -131, 39, 125], [-57, -259, 71, 253],\n [-121, -515, 135, 509]]\n\n box_specs_list = [[(.5, .25), (1.0, .25), (2.0, .25),\n (.5, 1.0), (1.0, 1.0), (2.0, 1.0),\n (.5, 4.0), (1.0, 4.0), (2.0, 4.0)]]\n anchor_generator = ag.MultipleGridAnchorGenerator(\n box_specs_list,\n base_anchor_size=tf.constant([256, 256], dtype=tf.float32),\n anchor_strides=[(16, 16)],\n anchor_offsets=[(7, -3)])\n anchors = anchor_generator.generate(feature_map_shape_list=[(1, 1)])\n anchor_corners = anchors.get()\n with self.test_session():\n anchor_corners_out = anchor_corners.eval()\n self.assertAllClose(anchor_corners_out, exp_anchor_corners)\n\n def test_construct_anchor_grid(self):\n box_specs_list = [[(0.5, 1.0), (1.0, 1.0), (2.0, 1.0)]]\n\n exp_anchor_corners = [[-2.5, -2.5, 2.5, 2.5], [-5., -5., 5., 5.],\n [-10., -10., 10., 10.], [-2.5, 16.5, 2.5, 21.5],\n [-5., 14., 5, 24], [-10., 9., 10, 29],\n [16.5, -2.5, 21.5, 2.5], [14., -5., 24, 5],\n [9., -10., 29, 10], [16.5, 16.5, 21.5, 21.5],\n [14., 14., 24, 24], [9., 9., 29, 29]]\n\n anchor_generator = ag.MultipleGridAnchorGenerator(\n box_specs_list,\n base_anchor_size=tf.constant([10, 10], dtype=tf.float32),\n anchor_strides=[(19, 19)],\n anchor_offsets=[(0, 0)])\n anchors = anchor_generator.generate(feature_map_shape_list=[(2, 2)])\n anchor_corners = anchors.get()\n\n with self.test_session():\n anchor_corners_out = anchor_corners.eval()\n self.assertAllClose(anchor_corners_out, exp_anchor_corners)\n\n def test_construct_anchor_grid_non_square(self):\n box_specs_list = [[(1.0, 1.0)]]\n\n exp_anchor_corners = [[0., -0.25, 1., 0.75], [0., 0.25, 1., 1.25]]\n\n anchor_generator = ag.MultipleGridAnchorGenerator(\n box_specs_list, base_anchor_size=tf.constant([1, 1], dtype=tf.float32))\n anchors = anchor_generator.generate(feature_map_shape_list=[(tf.constant(\n 1, dtype=tf.int32), tf.constant(2, dtype=tf.int32))])\n anchor_corners = anchors.get()\n\n with self.test_session():\n anchor_corners_out = anchor_corners.eval()\n self.assertAllClose(anchor_corners_out, exp_anchor_corners)\n\n def test_construct_anchor_grid_normalized(self):\n box_specs_list = [[(1.0, 1.0)]]\n\n exp_anchor_corners = [[0., 0., 1., 0.5], [0., 0.5, 1., 1.]]\n\n anchor_generator = ag.MultipleGridAnchorGenerator(\n box_specs_list, base_anchor_size=tf.constant([1, 1], dtype=tf.float32))\n anchors = anchor_generator.generate(\n feature_map_shape_list=[(tf.constant(1, dtype=tf.int32), tf.constant(\n 2, dtype=tf.int32))],\n im_height=320,\n im_width=640)\n anchor_corners = anchors.get()\n\n with self.test_session():\n anchor_corners_out = anchor_corners.eval()\n self.assertAllClose(anchor_corners_out, exp_anchor_corners)\n\n def test_construct_multiple_grids(self):\n box_specs_list = [[(1.0, 1.0), (2.0, 1.0), (1.0, 0.5)],\n [(1.0, 1.0), (1.0, 0.5)]]\n\n # height and width of box with .5 aspect ratio\n h = np.sqrt(2)\n w = 1.0/np.sqrt(2)\n exp_small_grid_corners = [[-.25, -.25, .75, .75],\n [.25-.5*h, .25-.5*w, .25+.5*h, .25+.5*w],\n [-.25, .25, .75, 1.25],\n [.25-.5*h, .75-.5*w, .25+.5*h, .75+.5*w],\n [.25, -.25, 1.25, .75],\n [.75-.5*h, .25-.5*w, .75+.5*h, .25+.5*w],\n [.25, .25, 1.25, 1.25],\n [.75-.5*h, .75-.5*w, .75+.5*h, .75+.5*w]]\n # only test first entry of larger set of anchors\n exp_big_grid_corners = [[.125-.5, .125-.5, .125+.5, .125+.5],\n [.125-1.0, .125-1.0, .125+1.0, .125+1.0],\n [.125-.5*h, .125-.5*w, .125+.5*h, .125+.5*w],]\n\n anchor_generator = ag.MultipleGridAnchorGenerator(\n box_specs_list,\n base_anchor_size=tf.constant([1.0, 1.0], dtype=tf.float32),\n anchor_strides=[(.25, .25), (.5, .5)],\n anchor_offsets=[(.125, .125), (.25, .25)])\n anchors = anchor_generator.generate(feature_map_shape_list=[(4, 4), (2, 2)])\n anchor_corners = anchors.get()\n\n with self.test_session():\n anchor_corners_out = anchor_corners.eval()\n self.assertEquals(anchor_corners_out.shape, (56, 4))\n big_grid_corners = anchor_corners_out[0:3, :]\n small_grid_corners = anchor_corners_out[48:, :]\n self.assertAllClose(small_grid_corners, exp_small_grid_corners)\n self.assertAllClose(big_grid_corners, exp_big_grid_corners)\n\n def test_construct_multiple_grids_with_clipping(self):\n box_specs_list = [[(1.0, 1.0), (2.0, 1.0), (1.0, 0.5)],\n [(1.0, 1.0), (1.0, 0.5)]]\n\n # height and width of box with .5 aspect ratio\n h = np.sqrt(2)\n w = 1.0/np.sqrt(2)\n exp_small_grid_corners = [[0, 0, .75, .75],\n [0, 0, .25+.5*h, .25+.5*w],\n [0, .25, .75, 1],\n [0, .75-.5*w, .25+.5*h, 1],\n [.25, 0, 1, .75],\n [.75-.5*h, 0, 1, .25+.5*w],\n [.25, .25, 1, 1],\n [.75-.5*h, .75-.5*w, 1, 1]]\n\n clip_window = tf.constant([0, 0, 1, 1], dtype=tf.float32)\n anchor_generator = ag.MultipleGridAnchorGenerator(\n box_specs_list,\n base_anchor_size=tf.constant([1.0, 1.0], dtype=tf.float32),\n clip_window=clip_window)\n anchors = anchor_generator.generate(feature_map_shape_list=[(4, 4), (2, 2)])\n anchor_corners = anchors.get()\n\n with self.test_session():\n anchor_corners_out = anchor_corners.eval()\n small_grid_corners = anchor_corners_out[48:, :]\n self.assertAllClose(small_grid_corners, exp_small_grid_corners)\n\n def test_invalid_box_specs(self):\n # not all box specs are pairs\n box_specs_list = [[(1.0, 1.0), (2.0, 1.0), (1.0, 0.5)],\n [(1.0, 1.0), (1.0, 0.5, .3)]]\n with self.assertRaises(ValueError):\n ag.MultipleGridAnchorGenerator(box_specs_list)\n\n # box_specs_list is not a list of lists\n box_specs_list = [(1.0, 1.0), (2.0, 1.0), (1.0, 0.5)]\n with self.assertRaises(ValueError):\n ag.MultipleGridAnchorGenerator(box_specs_list)\n\n def test_invalid_generate_arguments(self):\n box_specs_list = [[(1.0, 1.0), (2.0, 1.0), (1.0, 0.5)],\n [(1.0, 1.0), (1.0, 0.5)]]\n\n # incompatible lengths with box_specs_list\n with self.assertRaises(ValueError):\n anchor_generator = ag.MultipleGridAnchorGenerator(\n box_specs_list,\n base_anchor_size=tf.constant([1.0, 1.0], dtype=tf.float32),\n anchor_strides=[(.25, .25)],\n anchor_offsets=[(.125, .125), (.25, .25)])\n anchor_generator.generate(feature_map_shape_list=[(4, 4), (2, 2)])\n with self.assertRaises(ValueError):\n anchor_generator = ag.MultipleGridAnchorGenerator(\n box_specs_list,\n base_anchor_size=tf.constant([1.0, 1.0], dtype=tf.float32),\n anchor_strides=[(.25, .25), (.5, .5)],\n anchor_offsets=[(.125, .125), (.25, .25)])\n anchor_generator.generate(feature_map_shape_list=[(4, 4), (2, 2), (1, 1)])\n with self.assertRaises(ValueError):\n anchor_generator = ag.MultipleGridAnchorGenerator(\n box_specs_list,\n base_anchor_size=tf.constant([1.0, 1.0], dtype=tf.float32),\n anchor_strides=[(.5, .5)],\n anchor_offsets=[(.25, .25)])\n anchor_generator.generate(feature_map_shape_list=[(4, 4), (2, 2)])\n\n # not pairs\n with self.assertRaises(ValueError):\n anchor_generator = ag.MultipleGridAnchorGenerator(\n box_specs_list,\n base_anchor_size=tf.constant([1.0, 1.0], dtype=tf.float32),\n anchor_strides=[(.25, .25), (.5, .5)],\n anchor_offsets=[(.125, .125), (.25, .25)])\n anchor_generator.generate(feature_map_shape_list=[(4, 4, 4), (2, 2)])\n with self.assertRaises(ValueError):\n anchor_generator = ag.MultipleGridAnchorGenerator(\n box_specs_list,\n base_anchor_size=tf.constant([1.0, 1.0], dtype=tf.float32),\n anchor_strides=[(.25, .25, .1), (.5, .5)],\n anchor_offsets=[(.125, .125), (.25, .25)])\n anchor_generator.generate(feature_map_shape_list=[(4, 4), (2, 2)])\n with self.assertRaises(ValueError):\n anchor_generator = ag.MultipleGridAnchorGenerator(\n box_specs_list,\n base_anchor_size=tf.constant([1.0, 1.0], dtype=tf.float32),\n anchor_strides=[(.25, .25), (.5, .5)],\n anchor_offsets=[(.125, .125), (.25, .25)])\n anchor_generator.generate(feature_map_shape_list=[(4), (2, 2)])\n\n\nclass CreateSSDAnchorsTest(tf.test.TestCase):\n\n def test_create_ssd_anchors_returns_correct_shape(self):\n anchor_generator = ag.create_ssd_anchors(\n num_layers=6,\n min_scale=0.2,\n max_scale=0.95,\n aspect_ratios=(1.0, 2.0, 3.0, 1.0 / 2, 1.0 / 3),\n reduce_boxes_in_lowest_layer=True)\n\n feature_map_shape_list = [(38, 38), (19, 19), (10, 10),\n (5, 5), (3, 3), (1, 1)]\n anchors = anchor_generator.generate(\n feature_map_shape_list=feature_map_shape_list)\n anchor_corners = anchors.get()\n with self.test_session():\n anchor_corners_out = anchor_corners.eval()\n self.assertEquals(anchor_corners_out.shape, (7308, 4))\n\n anchor_generator = ag.create_ssd_anchors(\n num_layers=6, min_scale=0.2, max_scale=0.95,\n aspect_ratios=(1.0, 2.0, 3.0, 1.0/2, 1.0/3),\n reduce_boxes_in_lowest_layer=False)\n\n feature_map_shape_list = [(38, 38), (19, 19), (10, 10),\n (5, 5), (3, 3), (1, 1)]\n anchors = anchor_generator.generate(\n feature_map_shape_list=feature_map_shape_list)\n anchor_corners = anchors.get()\n with self.test_session():\n anchor_corners_out = anchor_corners.eval()\n self.assertEquals(anchor_corners_out.shape, (11640, 4))\n\n\nif __name__ == '__main__':\n tf.test.main()\n"
] |
[
[
"tensorflow.Graph",
"numpy.minimum",
"tensorflow.concat",
"tensorflow.image.resize_nearest_neighbor",
"tensorflow.control_dependencies",
"tensorflow.identity",
"tensorflow.train.replica_device_setter",
"tensorflow.train.SyncReplicasOptimizer",
"tensorflow.train.AdamOptimizer",
"tensorflow.DeviceSpec",
"tensorflow.app.run"
],
[
"numpy.sqrt",
"tensorflow.constant",
"tensorflow.test.main"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
jaegonlee/fer
|
[
"628651d88568103f5b2b3e081d5c6b5dab39267d"
] |
[
"FER_osc.py"
] |
[
"#!/usr/local/bin/python3\n\nfrom keras.models import model_from_json\nimport numpy as np\nimport cv2\nimport argparse\nimport os\nos.environ['TF_CPP_MIN_LOG_LEVEL']='2'\nimport math\n\nfrom pythonosc import udp_client\n\nclass FacialExpressionModel(object):\n EMOTIONS_LIST = [\"Angry\", \"Disgust\", \"Fear\", \"Happy\", \"Sad\", \"Surprise\", \"Neutral\"]\n\n def __init__(self, model_json_file, model_weights_file):\n with open(model_json_file, \"r\") as json_file:\n loaded_model_json = json_file.read()\n self.loaded_model = model_from_json(loaded_model_json)\n\n self.loaded_model.load_weights(model_weights_file)\n # print(\"Model loaded from disk\")\n # self.loaded_model.summary()\n\n def predict_emotion(self, img):\n self.preds = self.loaded_model.predict(img)\n return self.preds#FacialExpressionModel.EMOTIONS_LIST[np.argmax(self.preds)]\n\n#parser = argparse.ArgumentParser()\n#parser.add_argument(\"source\")\n#parser.add_argument(\"fps\")\n#args = parser.parse_args()\ncap = cv2.VideoCapture(0)#os.path.abspath(args.source) if not args.source == 'webcam' else 0)\nfaceCascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')\nfont = cv2.FONT_HERSHEY_SIMPLEX\ncap.set(cv2.CAP_PROP_FPS, 30)#int(args.fps))\ncap.set(cv2.CAP_PROP_FRAME_WIDTH, 640)\ncap.set(cv2.CAP_PROP_FRAME_HEIGHT, 480)\n\nDNN = \"TF\"\nif DNN == \"CAFFE\":\n modelFile = \"res10_300x300_ssd_iter_140000_fp16.caffemodel\"\n configFile = \"deploy.prototxt\"\n net = cv2.dnn.readNetFromCaffe(configFile, modelFile)\nelse:\n modelFile = \"opencv_face_detector_uint8.pb\"\n configFile = \"opencv_face_detector.pbtxt\"\n net = cv2.dnn.readNetFromTensorflow(modelFile, configFile)\n\ndef getdata():\n _, fr = cap.read()\n fr = cv2.flip(fr, 1)\n gray = cv2.cvtColor(fr, cv2.COLOR_BGR2GRAY)\n # faces = faceCascade.detectMultiScale(gray, 1.3, 5)\n frameOpencvDnn = fr.copy()\n frameHeight = frameOpencvDnn.shape[0]\n frameWidth = frameOpencvDnn.shape[1]\n blob = cv2.dnn.blobFromImage(frameOpencvDnn, 1.0, (300, 300), [104, 117, 123], False, False)\n \n net.setInput(blob)\n detections = net.forward()\n bboxes = []\n for i in range(detections.shape[2]):\n confidence = detections[0, 0, i, 2]\n if confidence > 0.7:\n x1 = int(detections[0, 0, i, 3] * frameWidth)\n y1 = int(detections[0, 0, i, 4] * frameHeight)\n x2 = int(detections[0, 0, i, 5] * frameWidth)\n y2 = int(detections[0, 0, i, 6] * frameHeight)\n # print(\"has face!\",x1,y1,x2,y2)\n bboxes.append([x1, y1, x2, y2])\n return bboxes, fr, gray\n\ndef start_app(cnn):\n while cap.isOpened():\n faces, fr, gray_fr = getdata()\n for (x, y, x2, y2) in faces:\n if y<0 or x<0:\n break\n fc = gray_fr[y:y2, x:x2]\n roi = cv2.resize(fc, (48, 48))\n pred = cnn.predict_emotion(roi[np.newaxis, :, :, np.newaxis])\n emotion = FacialExpressionModel.EMOTIONS_LIST[np.argmax(pred)]\n for idx,i in enumerate(FacialExpressionModel.EMOTIONS_LIST):\n color = (211, 211, 211) if pred[0][idx] < 0.01 else (0, 255, 0)\n emotion_score = \"{}: {}\".format(i, \"{:.2f}\".format(pred[0][idx]) if pred[0][idx] > 0.01 else \"\")\n cv2.putText(fr, emotion_score, (x2 + 5, y + 15 + idx*18), font, 0.5, color, 1, cv2.LINE_AA)\n cv2.rectangle(fr, (x, y), (x2, y2), (255, 0, 0), 2)\n\n client.send_message(\"/found\",1)\n client.send_message(\"/face\",[x,y,x2-x,y2-y])\n client.send_message(\"/emotion\", emotion)\n\n if cv2.waitKey(1) == 27:\n break\n cv2.imshow('Facial Emotion Recognition', fr)\n cap.release()\n cv2.destroyAllWindows()\n\nif __name__ == '__main__':\n ip = \"127.0.0.1\"\n port = 12345\n client = udp_client.SimpleUDPClient(ip, port)\n\n model = FacialExpressionModel(\"model.json\", \"weights.h5\")\n start_app(model)\n"
] |
[
[
"numpy.argmax"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
harshasunder-1/pyleecan
|
[
"32ae60f98b314848eb9b385e3652d7fc50a77420",
"32ae60f98b314848eb9b385e3652d7fc50a77420",
"32ae60f98b314848eb9b385e3652d7fc50a77420"
] |
[
"Tests/Methods/Machine/test_Magnet_Type_10_meth.py",
"pyleecan/Methods/Slot/HoleM52/comp_surface.py",
"pyleecan/Methods/Slot/SlotW25/build_geometry_wind.py"
] |
[
"# -*- coding: utf-8 -*-\n\nimport pytest\n\nfrom pyleecan.Classes.LamSlotMag import LamSlotMag\nfrom pyleecan.Classes.SlotMFlat import SlotMFlat\nfrom pyleecan.Classes.MagnetType10 import MagnetType10\nfrom pyleecan.Classes.Segment import Segment\nfrom pyleecan.Methods.Machine.Magnet.comp_surface import comp_surface\nfrom pyleecan.Methods import ParentMissingError\n\nfrom numpy import exp\n\nMag10_test = list()\n# Internal Slot\nlam = LamSlotMag(is_internal=True, Rext=0.1325)\nlam.slot = SlotMFlat(H0=5e-3, W0=10e-3, Zs=12)\nlam.slot.magnet = [MagnetType10(Hmag=5e-3, Wmag=10e-3)]\nMag10_test.append(\n {\"test_obj\": lam, \"S_exp\": 5e-5, \"Ao\": 0.078449, \"H_exp\": 5e-3, \"Rmec\": 0.1325}\n)\n\n# Outward Slot\nlam = LamSlotMag(is_internal=False, Rint=0.1325)\nlam.slot = SlotMFlat(H0=5e-3, W0=10e-3, Zs=12)\nlam.slot.magnet = [MagnetType10(Hmag=5e-3, Wmag=10e-3)]\nMag10_test.append(\n {\n \"test_obj\": lam,\n \"S_exp\": 5e-5,\n \"Ao\": 0.072745,\n \"H_exp\": 5e-3,\n \"Rmec\": 0.1324056630650208,\n }\n)\n\n# For AlmostEqual\nDELTA = 1e-4\n\n\[email protected]\nclass Test_Magnet_Type_10_meth(object):\n \"\"\"unittest for MagnetType10 methods\"\"\"\n\n @pytest.mark.parametrize(\"test_dict\", Mag10_test)\n def test_comp_surface(self, test_dict):\n \"\"\"Check that the computation of the surface is correct\"\"\"\n test_obj = test_dict[\"test_obj\"]\n result = test_obj.slot.magnet[0].comp_surface()\n\n a = result\n b = test_dict[\"S_exp\"]\n msg = \"Return \" + str(a) + \" expected \" + str(b)\n assert abs((a - b) / a - 0) < DELTA, msg\n\n # Compare numerical and analytical results\n b = comp_surface(test_obj.slot.magnet[0])\n msg = \"Analytical: \" + str(a) + \" Numerical \" + str(b)\n assert abs((a - b) / a - 0) < DELTA, msg\n\n @pytest.mark.parametrize(\"test_dict\", Mag10_test)\n def test_comp_height(self, test_dict):\n \"\"\"Check that the computation of the height is correct\"\"\"\n test_obj = test_dict[\"test_obj\"]\n result = test_obj.slot.magnet[0].comp_height()\n\n a = result\n b = test_dict[\"H_exp\"]\n msg = \"Return \" + str(a) + \" expected \" + str(b)\n assert abs((a - b) / a - 0) < DELTA, msg\n\n @pytest.mark.parametrize(\"test_dict\", Mag10_test)\n def test_comp_angle_op(self, test_dict):\n \"\"\"Check that the computation of the opening angle is correct\"\"\"\n test_obj = test_dict[\"test_obj\"]\n result = test_obj.slot.magnet[0].comp_angle_opening()\n\n a = result\n b = test_dict[\"Ao\"]\n msg = \"Return \" + str(a) + \" expected \" + str(b)\n assert abs((a - b) / a - 0) < DELTA, msg\n\n # Checking the error\n\n magnet = MagnetType10(Hmag=5e-3, Wmag=10e-3)\n with pytest.raises(ParentMissingError) as context:\n magnet.comp_angle_opening()\n\n @pytest.mark.parametrize(\"test_dict\", Mag10_test)\n def test_comp_radius_mec(self, test_dict):\n \"\"\"Check that the computation of the opening angle is correct\"\"\"\n test_obj = test_dict[\"test_obj\"]\n result = test_obj.comp_radius_mec()\n\n assert result == test_dict[\"Rmec\"]\n\n def test_build_geometry_in(self):\n \"\"\"check that curve_list is correct (inwards magnet)\"\"\"\n lam = LamSlotMag(\n Rint=40e-3,\n Rext=1,\n is_internal=True,\n is_stator=False,\n L1=0.45,\n Nrvd=1,\n Wrvd=0.05,\n )\n lam.slot = SlotMFlat(\n Zs=8, W0=0.6, H0=0.2, magnet=[MagnetType10(Wmag=0.6, Hmag=0.2)]\n )\n test_obj = lam.slot.magnet[0]\n alpha = lam.slot.comp_angle_opening_magnet()\n Z1 = 1 * exp(-1j * alpha / 2) - 0.2\n Z2 = 1 * exp(1j * alpha / 2) - 0.2\n Z3 = Z1 + 0.2\n Z4 = Z2 + 0.2\n\n # Creation of curve\n curve_list = list()\n curve_list.append(Segment(Z1, Z3))\n curve_list.append(Segment(Z3, Z4))\n curve_list.append(Segment(Z4, Z2))\n curve_list.append(Segment(Z2, Z1))\n\n surface = test_obj.build_geometry()\n result = surface[0].get_lines()\n for i in range(0, len(result)):\n a = result[i].begin\n b = curve_list[i].begin\n assert abs((a - b) / a - 0) < DELTA\n\n a = result[i].end\n b = curve_list[i].end\n assert abs((a - b) / a - 0) < DELTA\n\n # Checking the error\n\n magnet = MagnetType10(Hmag=5e-3, Wmag=10e-3)\n with pytest.raises(ParentMissingError) as context:\n magnet.build_geometry()\n\n def test_build_geometry_out(self):\n \"\"\"check that curve_list is correct (outwards magnet)\"\"\"\n lam = LamSlotMag(\n Rint=1,\n Rext=0.09,\n is_internal=False,\n is_stator=False,\n L1=0.45,\n Nrvd=1,\n Wrvd=0.05,\n )\n lam.slot = SlotMFlat(\n Zs=8, W0=0.6, H0=0.2, magnet=[MagnetType10(Wmag=0.6, Hmag=0.2)]\n )\n test_obj = lam.slot.magnet[0]\n alpha = lam.slot.comp_angle_opening_magnet()\n Z1 = 1 * exp(-1j * alpha / 2) + 0.2\n Z2 = 1 * exp(1j * alpha / 2) + 0.2\n Z3 = Z1 - 0.2\n Z4 = Z2 - 0.2\n\n # Creation of curve\n curve_list = list()\n curve_list.append(Segment(Z1, Z3))\n curve_list.append(Segment(Z3, Z4))\n curve_list.append(Segment(Z4, Z2))\n curve_list.append(Segment(Z2, Z1))\n\n surface = test_obj.build_geometry()\n result = surface[0].get_lines()\n for i in range(0, len(result)):\n a = result[i].begin\n b = curve_list[i].begin\n assert abs((a - b) / a - 0) < DELTA\n\n a = result[i].end\n b = curve_list[i].end\n assert abs((a - b) / a - 0) < DELTA\n\n # Checking the error\n\n magnet = MagnetType10(Hmag=5e-3, Wmag=10e-3)\n with pytest.raises(ParentMissingError) as context:\n magnet.build_geometry()\n\n # Is simplified + W0 > Wmag\n\n lam = LamSlotMag(\n Rint=1,\n Rext=0.09,\n is_internal=False,\n is_stator=False,\n L1=0.45,\n Nrvd=1,\n Wrvd=0.05,\n )\n lam.slot = SlotMFlat(\n Zs=8, W0=0.8, H0=0.2, magnet=[MagnetType10(Wmag=0.6, Hmag=0.2)]\n )\n test_obj = lam.slot.magnet[0]\n surface = test_obj.build_geometry(is_simplified=True)\n\n assert len(surface[0].line_list) == 3\n\n # Is simplified + H0 < Hmag\n\n lam = LamSlotMag(\n Rint=1,\n Rext=0.09,\n is_internal=False,\n is_stator=False,\n L1=0.45,\n Nrvd=1,\n Wrvd=0.05,\n )\n lam.slot = SlotMFlat(\n Zs=8, W0=0.6, H0=0.2, magnet=[MagnetType10(Wmag=0.6, Hmag=0.8)]\n )\n test_obj = lam.slot.magnet[0]\n surface = test_obj.build_geometry(is_simplified=True)\n\n assert len(surface[0].line_list) == 3\n\n # Type Magnet 1\n\n lam = LamSlotMag(\n Rint=1,\n Rext=0.09,\n is_internal=False,\n is_stator=False,\n L1=0.45,\n Nrvd=1,\n Wrvd=0.05,\n )\n lam.slot = SlotMFlat(\n Zs=8, W0=0.6, H0=0.2, magnet=[MagnetType10(Wmag=0.6, Hmag=0.8)]\n )\n lam.slot.magnet[0].type_magnetization = 1\n surface = lam.build_geometry()\n\n assert len(surface) == 9\n\n # Type Magnet 2\n\n lam = LamSlotMag(\n Rint=1,\n Rext=0.09,\n is_internal=False,\n is_stator=False,\n L1=0.45,\n Nrvd=1,\n Wrvd=0.05,\n )\n lam.slot = SlotMFlat(\n Zs=8, W0=0.6, H0=0.2, magnet=[MagnetType10(Wmag=0.6, Hmag=0.8)]\n )\n lam.slot.magnet[0].type_magnetization = 2\n surface = lam.build_geometry()\n\n assert len(surface) == 9\n",
"# -*- coding: utf-8 -*-\n\nfrom numpy import exp, pi, tan\n\n\ndef comp_surface(self):\n \"\"\"Compute the surface of the Hole\n\n Parameters\n ----------\n self : HoleM52\n A HoleM52 object\n\n Returns\n -------\n S: float\n Surface of the Hole. [m**2]\n\n \"\"\"\n\n Rbo = self.get_Rbo()\n\n alpha = self.comp_alpha()\n\n # Angle between (P1,P2) and (0,P0) is slot_pitch /2\n # It is also the angle (P1,P2,S)\n hsp = pi / self.Zh # Half Slot Pitch\n\n # Distance P1,P9\n D19 = ((Rbo - self.H0) * exp(1j * alpha / 2)).imag * 2\n\n # S is the intersectioni between (P1,P9) and the parallel to x passing by P2\n D1S = tan(hsp) * (self.H1 - self.H2)\n\n W1 = D19 / 2.0 - D1S - self.W0 / 2.0\n\n Smag = self.comp_surface_magnets()\n # Two air trapeze area\n Strap = ((D19 / 2.0 - self.W0 / 2.0) + W1) * (self.H1 - self.H2)\n\n return Smag + Strap\n",
"# -*- coding: utf-8 -*-\n\nfrom numpy import angle, exp, linspace, zeros\n\nfrom ....Classes.Arc1 import Arc1\nfrom ....Classes.Segment import Segment\nfrom ....Classes.SurfLine import SurfLine\n\n\ndef build_geometry_wind(self, Nrad, Ntan, is_simplified=False, alpha=0, delta=0):\n \"\"\"Split the slot winding area in several zone\n\n Parameters\n ----------\n self : SlotW25\n A SlotW25 object\n Nrad : int\n Number of radial layer\n Ntan : int\n Number of tangentiel layer\n is_simplified : bool\n boolean to specify if coincident lines are considered as one or different lines (Default value = False)\n alpha : float\n Angle for rotation (Default value = 0) [rad]\n delta : Complex\n complex for translation (Default value = 0)\n\n Returns\n -------\n surf_list: list\n List of surface delimiting the winding zone\n\n \"\"\"\n # get the name of the lamination\n st = self.get_name_lam()\n\n [Z8, Z7, Z6, Z5, Z4, Z3, Z2, Z1] = self._comp_point_coordinate()\n X = linspace(Z3, Z4, Nrad + 1)\n\n # Nrad+1 and Ntan+1 because 3 points => 2 zones\n Z = zeros((Nrad + 1, Ntan + 1), dtype=complex)\n for ii in range(Nrad + 1):\n Z[ii][:] = abs(X[ii]) * exp(\n 1j * linspace(angle(X[ii]), angle(X[ii].conjugate()), Ntan + 1)\n )\n\n if self.is_outwards():\n assert abs(Z3) < abs(Z4)\n assert abs(Z6) < abs(Z5)\n assert abs(Z[0][0] - Z3) < 1e-6\n assert abs(Z[Nrad][0] - Z4) < 1e-6\n assert abs(Z[0][Ntan] - Z6) < 1e-6\n assert abs(Z[Nrad][Ntan] - Z5) < 1e-6\n\n # We go thought the zone by Rad then Tan, starting by (0,0)\n surf_list = list()\n for jj in range(Ntan): # jj from 0 to Ntan-1\n for ii in range(Nrad): # ii from 0 to Nrad-1\n Z1 = Z[ii][jj]\n Z2 = Z[ii][jj + 1]\n Z3 = Z[ii + 1][jj + 1]\n Z4 = Z[ii + 1][jj]\n point_ref = (Z1 + Z2 + Z3 + Z4) / 4 # reference point of the surface\n # With one zone the order would be [Z7,Z4,Z5,Z6]\n if is_simplified:\n curve_list = list()\n if ii == 0:\n curve_list.append(Arc1(Z1, Z2, abs(Z1), is_trigo_direction=True))\n if jj != Ntan - 1:\n curve_list.append(Segment(Z2, Z3))\n if ii != Nrad - 1:\n curve_list.append(Arc1(Z3, Z4, -abs(Z3), is_trigo_direction=False))\n surface = SurfLine(\n line_list=curve_list,\n label=\"Wind_\" + st + \"_R\" + str(ii) + \"_T\" + str(jj) + \"_S0\",\n point_ref=point_ref,\n )\n surf_list.append(surface)\n else:\n curve_list = list()\n curve_list.append(Arc1(Z1, Z2, abs(Z1), is_trigo_direction=True))\n curve_list.append(Segment(Z2, Z3))\n curve_list.append(Arc1(Z3, Z4, -abs(Z3), is_trigo_direction=False))\n curve_list.append(Segment(Z4, Z1))\n surface = SurfLine(\n line_list=curve_list,\n label=\"Wind_\" + st + \"_R\" + str(ii) + \"_T\" + str(jj) + \"_S0\",\n point_ref=point_ref,\n )\n surf_list.append(surface)\n\n for surf in surf_list:\n surf.rotate(alpha)\n surf.translate(delta)\n return surf_list\n"
] |
[
[
"numpy.exp"
],
[
"numpy.tan",
"numpy.exp"
],
[
"numpy.angle",
"numpy.zeros",
"numpy.linspace"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ajlee21/Batch_effects_simulation
|
[
"d707321346de48de5e63cf251280bdf9372be59c",
"d707321346de48de5e63cf251280bdf9372be59c",
"d707321346de48de5e63cf251280bdf9372be59c"
] |
[
"simulate_expression_compendia_modules/cca_core.py",
"archive/scripts/functions/cca_core.py",
"archive/scripts/functions/vae.py"
] |
[
"# Copyright 2016 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\n\"\"\"\nThe core code for applying Canonical Correlation Analysis to deep networks.\nThis module contains the core functions to apply canonical correlation analysis\nto deep neural networks. The main function is get_cca_similarity, which takes in\ntwo sets of activations, typically the neurons in two layers and their outputs\non all of the datapoints D = [d_1,...,d_m] that have been passed through.\nInputs have shape (num_neurons1, m), (num_neurons2, m). This can be directly\napplied used on fully connected networks. For convolutional layers, the 3d block\nof neurons can either be flattened entirely, along channels, or alternatively,\nthe dft_ccas (Discrete Fourier Transform) module can be used.\nSee https://arxiv.org/abs/1706.05806 for full details.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nimport numpy as np\n\nnum_cca_trials = 5\nepsilon = 1e-6\n\n\ndef positivedef_matrix_sqrt(array):\n \"\"\"Stable method for computing matrix square roots, supports complex matrices.\n Args:\n array: A numpy 2d array, can be complex valued that is a positive\n definite symmetric (or hermitian) matrix\n Returns:\n sqrtarray: The matrix square root of array\n \"\"\"\n w, v = np.linalg.eigh(array)\n # A - np.dot(v, np.dot(np.diag(w), v.T))\n wsqrt = np.sqrt(w)\n sqrtarray = np.dot(v, np.dot(np.diag(wsqrt), np.conj(v).T))\n return sqrtarray\n\n\ndef remove_small(sigma_xx, sigma_xy, sigma_yx, sigma_yy, threshold=1e-6):\n \"\"\"Takes covariance between X, Y, and removes values of small magnitude.\n Args:\n sigma_xx: 2d numpy array, variance matrix for x\n sigma_xy: 2d numpy array, crossvariance matrix for x,y\n sigma_yx: 2d numpy array, crossvariance matrixy for x,y,\n (conjugate) transpose of sigma_xy\n sigma_yy: 2d numpy array, variance matrix for y\n threshold: cutoff value for norm below which directions are thrown\n away\n Returns:\n sigma_xx_crop: 2d array with low x norm directions removed\n sigma_xy_crop: 2d array with low x and y norm directions removed\n sigma_yx_crop: 2d array with low x and y norm directiosn removed\n sigma_yy_crop: 2d array with low y norm directions removed\n x_idxs: indexes of sigma_xx that were removed\n y_idxs: indexes of sigma_yy that were removed\n \"\"\"\n\n x_diag = np.abs(np.diagonal(sigma_xx))\n y_diag = np.abs(np.diagonal(sigma_yy))\n x_idxs = x_diag >= threshold\n y_idxs = y_diag >= threshold\n\n sigma_xx_crop = sigma_xx[x_idxs][:, x_idxs]\n sigma_xy_crop = sigma_xy[x_idxs][:, y_idxs]\n sigma_yx_crop = sigma_yx[y_idxs][:, x_idxs]\n sigma_yy_crop = sigma_yy[y_idxs][:, y_idxs]\n\n return (sigma_xx_crop, sigma_xy_crop, sigma_yx_crop, sigma_yy_crop, x_idxs, y_idxs)\n\n\ndef compute_ccas(sigma_xx, sigma_xy, sigma_yx, sigma_yy, verbose=True):\n \"\"\"Main cca computation function, takes in variances and crossvariances.\n This function takes in the covariances and cross covariances of X, Y,\n preprocesses them (removing small magnitudes) and outputs the raw results of\n the cca computation, including cca directions in a rotated space, and the\n cca correlation coefficient values.\n Args:\n sigma_xx: 2d numpy array, (num_neurons_x, num_neurons_x)\n variance matrix for x\n sigma_xy: 2d numpy array, (num_neurons_x, num_neurons_y)\n crossvariance matrix for x,y\n sigma_yx: 2d numpy array, (num_neurons_y, num_neurons_x)\n crossvariance matrix for x,y (conj) transpose of sigma_xy\n sigma_yy: 2d numpy array, (num_neurons_y, num_neurons_y)\n variance matrix for y\n verbose: boolean on whether to print intermediate outputs\n Returns:\n [ux, sx, vx]: [numpy 2d array, numpy 1d array, numpy 2d array]\n ux and vx are (conj) transposes of each other, being\n the canonical directions in the X subspace.\n sx is the set of canonical correlation coefficients-\n how well corresponding directions in vx, Vy correlate\n with each other.\n [uy, sy, vy]: Same as above, but for Y space\n invsqrt_xx: Inverse square root of sigma_xx to transform canonical\n directions back to original space\n invsqrt_yy: Same as above but for sigma_yy\n x_idxs: The indexes of the input sigma_xx that were pruned\n by remove_small\n y_idxs: Same as above but for sigma_yy\n \"\"\"\n\n (sigma_xx, sigma_xy, sigma_yx, sigma_yy, x_idxs, y_idxs) = remove_small(\n sigma_xx, sigma_xy, sigma_yx, sigma_yy\n )\n\n numx = sigma_xx.shape[0]\n numy = sigma_yy.shape[0]\n\n if numx == 0 or numy == 0:\n return (\n [0, 0, 0],\n [0, 0, 0],\n np.zeros_like(sigma_xx),\n np.zeros_like(sigma_yy),\n x_idxs,\n y_idxs,\n )\n\n if verbose:\n print(\"adding eps to diagonal and taking inverse\")\n sigma_xx += epsilon * np.eye(numx)\n sigma_yy += epsilon * np.eye(numy)\n inv_xx = np.linalg.pinv(sigma_xx)\n inv_yy = np.linalg.pinv(sigma_yy)\n\n if verbose:\n print(\"taking square root\")\n invsqrt_xx = positivedef_matrix_sqrt(inv_xx)\n invsqrt_yy = positivedef_matrix_sqrt(inv_yy)\n\n if verbose:\n print(\"dot products...\")\n arr_x = np.dot(sigma_yx, invsqrt_xx)\n arr_x = np.dot(inv_yy, arr_x)\n arr_x = np.dot(invsqrt_xx, np.dot(sigma_xy, arr_x))\n arr_y = np.dot(sigma_xy, invsqrt_yy)\n arr_y = np.dot(inv_xx, arr_y)\n arr_y = np.dot(invsqrt_yy, np.dot(sigma_yx, arr_y))\n\n if verbose:\n print(\"trying to take final svd\")\n arr_x_stable = arr_x + epsilon * np.eye(arr_x.shape[0])\n arr_y_stable = arr_y + epsilon * np.eye(arr_y.shape[0])\n try:\n ux, sx, vx = np.linalg.svd(arr_x_stable)\n uy, sy, vy = np.linalg.svd(arr_y_stable)\n except:\n return [0, 0, 0], [0, 0, 0], 0, 0, 0, 0\n sx = np.sqrt(np.abs(sx))\n sy = np.sqrt(np.abs(sy))\n if verbose:\n print(\"computed everything!\")\n\n return [ux, sx, vx], [uy, sy, vy], invsqrt_xx, invsqrt_yy, x_idxs, y_idxs\n\n\ndef sum_threshold(array, threshold):\n \"\"\"Computes threshold index of decreasing nonnegative array by summing.\n This function takes in a decreasing array nonnegative floats, and a\n threshold between 0 and 1. It returns the index i at which the sum of the\n array up to i is threshold*total mass of the array.\n Args:\n array: a 1d numpy array of decreasing, nonnegative floats\n threshold: a number between 0 and 1\n Returns:\n i: index at which np.sum(array[:i]) >= threshold\n \"\"\"\n assert (threshold >= 0) and (threshold <= 1), \"print incorrect threshold\"\n\n for i in range(len(array)):\n if np.sum(array[:i]) / np.sum(array) >= threshold:\n return i\n\n\ndef create_zero_dict(compute_dirns, dimension):\n \"\"\"Outputs a zero dict when neuron activation norms too small.\n This function creates a return_dict with appropriately shaped zero entries\n when all neuron activations are very small.\n Args:\n compute_dirns: boolean, whether to have zero vectors for directions\n dimension: int, defines shape of directions\n Returns:\n return_dict: a dict of appropriately shaped zero entries\n \"\"\"\n return_dict = {}\n return_dict[\"mean\"] = (np.asarray(0), np.asarray(0))\n return_dict[\"sum\"] = (np.asarray(0), np.asarray(0))\n return_dict[\"cca_coef1\"] = np.asarray(0)\n return_dict[\"cca_coef2\"] = np.asarray(0)\n return_dict[\"idx1\"] = 0\n return_dict[\"idx2\"] = 0\n\n if compute_dirns:\n return_dict[\"cca_dirns1\"] = np.zeros((1, dimension))\n return_dict[\"cca_dirns2\"] = np.zeros((1, dimension))\n\n return return_dict\n\n\ndef get_cca_similarity(acts1, acts2, threshold=0.98, compute_dirns=True, verbose=True):\n \"\"\"The main function for computing cca similarities.\n This function computes the cca similarity between two sets of activations,\n returning a dict with the cca coefficients, a few statistics of the cca\n coefficients, and (optionally) the actual directions.\n Args:\n acts1: (num_neurons1, data_points) a 2d numpy array of neurons by\n datapoints where entry (i,j) is the output of neuron i on\n datapoint j.\n acts2: (num_neurons2, data_points) same as above, but (potentially)\n for a different set of neurons. Note that acts1 and acts2\n can have different numbers of neurons, but must agree on the\n number of datapoints\n threshold: float between 0, 1 used to get rid of trailing zeros in\n the cca correlation coefficients to output more accurate\n summary statistics of correlations.\n compute_dirns: boolean value determining whether actual cca\n directions are computed. (For very large neurons and\n datasets, may be better to compute these on the fly\n instead of store in memory.)\n verbose: Boolean, whether info about intermediate outputs printed\n Returns:\n return_dict: A dictionary with outputs from the cca computations.\n Contains neuron coefficients (combinations of neurons\n that correspond to cca directions), the cca correlation\n coefficients (how well aligned directions correlate),\n x and y idxs (for computing cca directions on the fly\n if compute_dirns=False), and summary statistics. If\n compute_dirns=True, the cca directions are also\n computed.\n \"\"\"\n\n # assert dimensionality equal\n assert acts1.shape[1] == acts2.shape[1], \"dimensions don't match\"\n # check that acts1, acts2 are transposition\n assert acts1.shape[0] < acts1.shape[1], (\n \"input must be number of neurons\" \"by datapoints\"\n )\n return_dict = {}\n\n # compute covariance with numpy function for extra stability\n numx = acts1.shape[0]\n\n covariance = np.cov(acts1, acts2)\n sigmaxx = covariance[:numx, :numx]\n sigmaxy = covariance[:numx, numx:]\n sigmayx = covariance[numx:, :numx]\n sigmayy = covariance[numx:, numx:]\n\n # rescale covariance to make cca computation more stable\n xmax = np.max(np.abs(sigmaxx))\n ymax = np.max(np.abs(sigmayy))\n sigmaxx /= xmax\n sigmayy /= ymax\n sigmaxy /= np.sqrt(xmax * ymax)\n sigmayx /= np.sqrt(xmax * ymax)\n\n ([_, sx, vx], [_, sy, vy], invsqrt_xx, invsqrt_yy, x_idxs, y_idxs) = compute_ccas(\n sigmaxx, sigmaxy, sigmayx, sigmayy, verbose\n )\n\n # if x_idxs or y_idxs is all false, return_dict has zero entries\n if (not np.any(x_idxs)) or (not np.any(y_idxs)):\n return create_zero_dict(compute_dirns, acts1.shape[1])\n\n if compute_dirns:\n # orthonormal directions that are CCA directions\n cca_dirns1 = np.dot(vx, np.dot(invsqrt_xx, acts1[x_idxs]))\n cca_dirns2 = np.dot(vy, np.dot(invsqrt_yy, acts2[y_idxs]))\n\n # get rid of trailing zeros in the cca coefficients\n idx1 = sum_threshold(sx, threshold)\n idx2 = sum_threshold(sy, threshold)\n\n return_dict[\"neuron_coeffs1\"] = np.dot(vx, invsqrt_xx)\n return_dict[\"neuron_coeffs2\"] = np.dot(vy, invsqrt_yy)\n return_dict[\"cca_coef1\"] = sx\n return_dict[\"cca_coef2\"] = sy\n return_dict[\"x_idxs\"] = x_idxs\n return_dict[\"y_idxs\"] = y_idxs\n # summary statistics\n return_dict[\"mean\"] = (np.mean(sx[:idx1]), np.mean(sy[:idx2]))\n return_dict[\"sum\"] = (np.sum(sx), np.sum(sy))\n\n if compute_dirns:\n return_dict[\"cca_dirns1\"] = cca_dirns1\n return_dict[\"cca_dirns2\"] = cca_dirns2\n\n return return_dict\n\n\ndef robust_cca_similarity(\n acts1, acts2, threshold=0.98, compute_dirns=True, verbose=False\n):\n \"\"\"Calls get_cca_similarity multiple times while adding noise.\n This function is very similar to get_cca_similarity, and can be used if\n get_cca_similarity doesn't converge for some pair of inputs. This function\n adds some noise to the activations to help convergence.\n Args:\n acts1: (num_neurons1, data_points) a 2d numpy array of neurons by\n datapoints where entry (i,j) is the output of neuron i on\n datapoint j.\n acts2: (num_neurons2, data_points) same as above, but (potentially)\n for a different set of neurons. Note that acts1 and acts2\n can have different numbers of neurons, but must agree on the\n number of datapoints\n threshold: float between 0, 1 used to get rid of trailing zeros in\n the cca correlation coefficients to output more accurate\n summary statistics of correlations.\n compute_dirns: boolean value determining whether actual cca\n directions are computed. (For very large neurons and\n datasets, may be better to compute these on the fly\n instead of store in memory.)\n Returns:\n return_dict: A dictionary with outputs from the cca computations.\n Contains neuron coefficients (combinations of neurons\n that correspond to cca directions), the cca correlation\n coefficients (how well aligned directions correlate),\n x and y idxs (for computing cca directions on the fly\n if compute_dirns=False), and summary statistics. If\n compute_dirns=True, the cca directions are also\n computed.\n \"\"\"\n\n for trial in range(num_cca_trials):\n try:\n return_dict = get_cca_similarity(\n acts1, acts2, threshold, compute_dirns, verbose=verbose\n )\n except np.LinAlgError:\n acts1 = acts1 * 1e-1 + np.random.normal(size=acts1.shape) * epsilon\n acts2 = acts2 * 1e-1 + np.random.normal(size=acts1.shape) * epsilon\n if trial + 1 == num_cca_trials:\n raise\n\n return return_dict\n",
"# Copyright 2016 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\n\"\"\"\nThe core code for applying Canonical Correlation Analysis to deep networks.\nThis module contains the core functions to apply canonical correlation analysis\nto deep neural networks. The main function is get_cca_similarity, which takes in\ntwo sets of activations, typically the neurons in two layers and their outputs\non all of the datapoints D = [d_1,...,d_m] that have been passed through.\nInputs have shape (num_neurons1, m), (num_neurons2, m). This can be directly\napplied used on fully connected networks. For convolutional layers, the 3d block\nof neurons can either be flattened entirely, along channels, or alternatively,\nthe dft_ccas (Discrete Fourier Transform) module can be used.\nSee https://arxiv.org/abs/1706.05806 for full details.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nimport numpy as np\n\nnum_cca_trials = 5\nepsilon = 1e-6\n\n\ndef positivedef_matrix_sqrt(array):\n \"\"\"Stable method for computing matrix square roots, supports complex matrices.\n Args:\n array: A numpy 2d array, can be complex valued that is a positive\n definite symmetric (or hermitian) matrix\n Returns:\n sqrtarray: The matrix square root of array\n \"\"\"\n w, v = np.linalg.eigh(array)\n # A - np.dot(v, np.dot(np.diag(w), v.T))\n wsqrt = np.sqrt(w)\n sqrtarray = np.dot(v, np.dot(np.diag(wsqrt), np.conj(v).T))\n return sqrtarray\n\n\ndef remove_small(sigma_xx, sigma_xy, sigma_yx, sigma_yy, threshold=1e-6):\n \"\"\"Takes covariance between X, Y, and removes values of small magnitude.\n Args:\n sigma_xx: 2d numpy array, variance matrix for x\n sigma_xy: 2d numpy array, crossvariance matrix for x,y\n sigma_yx: 2d numpy array, crossvariance matrixy for x,y,\n (conjugate) transpose of sigma_xy\n sigma_yy: 2d numpy array, variance matrix for y\n threshold: cutoff value for norm below which directions are thrown\n away\n Returns:\n sigma_xx_crop: 2d array with low x norm directions removed\n sigma_xy_crop: 2d array with low x and y norm directions removed\n sigma_yx_crop: 2d array with low x and y norm directiosn removed\n sigma_yy_crop: 2d array with low y norm directions removed\n x_idxs: indexes of sigma_xx that were removed\n y_idxs: indexes of sigma_yy that were removed\n \"\"\"\n\n x_diag = np.abs(np.diagonal(sigma_xx))\n y_diag = np.abs(np.diagonal(sigma_yy))\n x_idxs = (x_diag >= threshold)\n y_idxs = (y_diag >= threshold)\n\n sigma_xx_crop = sigma_xx[x_idxs][:, x_idxs]\n sigma_xy_crop = sigma_xy[x_idxs][:, y_idxs]\n sigma_yx_crop = sigma_yx[y_idxs][:, x_idxs]\n sigma_yy_crop = sigma_yy[y_idxs][:, y_idxs]\n\n return (sigma_xx_crop, sigma_xy_crop, sigma_yx_crop, sigma_yy_crop, x_idxs,\n y_idxs)\n\n\ndef compute_ccas(sigma_xx, sigma_xy, sigma_yx, sigma_yy, verbose=True):\n \"\"\"Main cca computation function, takes in variances and crossvariances.\n This function takes in the covariances and cross covariances of X, Y,\n preprocesses them (removing small magnitudes) and outputs the raw results of\n the cca computation, including cca directions in a rotated space, and the\n cca correlation coefficient values.\n Args:\n sigma_xx: 2d numpy array, (num_neurons_x, num_neurons_x)\n variance matrix for x\n sigma_xy: 2d numpy array, (num_neurons_x, num_neurons_y)\n crossvariance matrix for x,y\n sigma_yx: 2d numpy array, (num_neurons_y, num_neurons_x)\n crossvariance matrix for x,y (conj) transpose of sigma_xy\n sigma_yy: 2d numpy array, (num_neurons_y, num_neurons_y)\n variance matrix for y\n verbose: boolean on whether to print intermediate outputs\n Returns:\n [ux, sx, vx]: [numpy 2d array, numpy 1d array, numpy 2d array]\n ux and vx are (conj) transposes of each other, being\n the canonical directions in the X subspace.\n sx is the set of canonical correlation coefficients-\n how well corresponding directions in vx, Vy correlate\n with each other.\n [uy, sy, vy]: Same as above, but for Y space\n invsqrt_xx: Inverse square root of sigma_xx to transform canonical\n directions back to original space\n invsqrt_yy: Same as above but for sigma_yy\n x_idxs: The indexes of the input sigma_xx that were pruned\n by remove_small\n y_idxs: Same as above but for sigma_yy\n \"\"\"\n\n (sigma_xx, sigma_xy, sigma_yx, sigma_yy, x_idxs, y_idxs) = remove_small(\n sigma_xx, sigma_xy, sigma_yx, sigma_yy)\n\n numx = sigma_xx.shape[0]\n numy = sigma_yy.shape[0]\n\n if numx == 0 or numy == 0:\n return ([0, 0, 0], [0, 0, 0], np.zeros_like(sigma_xx),\n np.zeros_like(sigma_yy), x_idxs, y_idxs)\n\n if verbose:\n print(\"adding eps to diagonal and taking inverse\")\n sigma_xx +=epsilon * np.eye(numx)\n sigma_yy +=epsilon * np.eye(numy)\n inv_xx = np.linalg.pinv(sigma_xx)\n inv_yy = np.linalg.pinv(sigma_yy)\n\n if verbose:\n print(\"taking square root\")\n invsqrt_xx = positivedef_matrix_sqrt(inv_xx)\n invsqrt_yy = positivedef_matrix_sqrt(inv_yy)\n\n if verbose:\n print(\"dot products...\")\n arr_x = np.dot(sigma_yx, invsqrt_xx)\n arr_x = np.dot(inv_yy, arr_x)\n arr_x = np.dot(invsqrt_xx, np.dot(sigma_xy, arr_x))\n arr_y = np.dot(sigma_xy, invsqrt_yy)\n arr_y = np.dot(inv_xx, arr_y)\n arr_y = np.dot(invsqrt_yy, np.dot(sigma_yx, arr_y))\n\n if verbose:\n print(\"trying to take final svd\")\n arr_x_stable = arr_x + epsilon * np.eye(arr_x.shape[0])\n arr_y_stable = arr_y + epsilon * np.eye(arr_y.shape[0])\n try:\n ux, sx, vx = np.linalg.svd(arr_x_stable)\n uy, sy, vy = np.linalg.svd(arr_y_stable)\n except:\n return [0, 0, 0], [0, 0, 0], 0, 0, 0, 0\n sx = np.sqrt(np.abs(sx))\n sy = np.sqrt(np.abs(sy))\n if verbose:\n print(\"computed everything!\")\n\n return [ux, sx, vx], [uy, sy, vy], invsqrt_xx, invsqrt_yy, x_idxs, y_idxs\n\n\ndef sum_threshold(array, threshold):\n \"\"\"Computes threshold index of decreasing nonnegative array by summing.\n This function takes in a decreasing array nonnegative floats, and a\n threshold between 0 and 1. It returns the index i at which the sum of the\n array up to i is threshold*total mass of the array.\n Args:\n array: a 1d numpy array of decreasing, nonnegative floats\n threshold: a number between 0 and 1\n Returns:\n i: index at which np.sum(array[:i]) >= threshold\n \"\"\"\n assert (threshold >= 0) and (threshold <= 1), \"print incorrect threshold\"\n\n for i in range(len(array)):\n if np.sum(array[:i]) / np.sum(array) >= threshold:\n return i\n\n\ndef create_zero_dict(compute_dirns, dimension):\n \"\"\"Outputs a zero dict when neuron activation norms too small.\n This function creates a return_dict with appropriately shaped zero entries\n when all neuron activations are very small.\n Args:\n compute_dirns: boolean, whether to have zero vectors for directions\n dimension: int, defines shape of directions\n Returns:\n return_dict: a dict of appropriately shaped zero entries\n \"\"\"\n return_dict = {}\n return_dict[\"mean\"] = (np.asarray(0), np.asarray(0))\n return_dict[\"sum\"] = (np.asarray(0), np.asarray(0))\n return_dict[\"cca_coef1\"] = np.asarray(0)\n return_dict[\"cca_coef2\"] = np.asarray(0)\n return_dict[\"idx1\"] = 0\n return_dict[\"idx2\"] = 0\n\n if compute_dirns:\n return_dict[\"cca_dirns1\"] = np.zeros((1, dimension))\n return_dict[\"cca_dirns2\"] = np.zeros((1, dimension))\n\n return return_dict\n\n\ndef get_cca_similarity(acts1, acts2, threshold=0.98, compute_dirns=True,\n verbose=True):\n \"\"\"The main function for computing cca similarities.\n This function computes the cca similarity between two sets of activations,\n returning a dict with the cca coefficients, a few statistics of the cca\n coefficients, and (optionally) the actual directions.\n Args:\n acts1: (num_neurons1, data_points) a 2d numpy array of neurons by\n datapoints where entry (i,j) is the output of neuron i on\n datapoint j.\n acts2: (num_neurons2, data_points) same as above, but (potentially)\n for a different set of neurons. Note that acts1 and acts2\n can have different numbers of neurons, but must agree on the\n number of datapoints\n threshold: float between 0, 1 used to get rid of trailing zeros in\n the cca correlation coefficients to output more accurate\n summary statistics of correlations.\n compute_dirns: boolean value determining whether actual cca\n directions are computed. (For very large neurons and\n datasets, may be better to compute these on the fly\n instead of store in memory.)\n verbose: Boolean, whether info about intermediate outputs printed\n Returns:\n return_dict: A dictionary with outputs from the cca computations.\n Contains neuron coefficients (combinations of neurons\n that correspond to cca directions), the cca correlation\n coefficients (how well aligned directions correlate),\n x and y idxs (for computing cca directions on the fly\n if compute_dirns=False), and summary statistics. If\n compute_dirns=True, the cca directions are also\n computed.\n \"\"\"\n\n # assert dimensionality equal\n assert acts1.shape[1] == acts2.shape[1], \"dimensions don't match\"\n # check that acts1, acts2 are transposition\n assert acts1.shape[0] < acts1.shape[1], (\"input must be number of neurons\"\n \"by datapoints\")\n return_dict = {}\n\n # compute covariance with numpy function for extra stability\n numx = acts1.shape[0]\n\n covariance = np.cov(acts1, acts2)\n sigmaxx = covariance[:numx, :numx]\n sigmaxy = covariance[:numx, numx:]\n sigmayx = covariance[numx:, :numx]\n sigmayy = covariance[numx:, numx:]\n\n # rescale covariance to make cca computation more stable\n xmax = np.max(np.abs(sigmaxx))\n ymax = np.max(np.abs(sigmayy))\n sigmaxx /= xmax\n sigmayy /= ymax\n sigmaxy /= np.sqrt(xmax * ymax)\n sigmayx /= np.sqrt(xmax * ymax)\n\n ([_, sx, vx], [_, sy, vy], invsqrt_xx, invsqrt_yy, x_idxs,\n y_idxs) = compute_ccas(sigmaxx, sigmaxy, sigmayx, sigmayy,\n verbose)\n\n # if x_idxs or y_idxs is all false, return_dict has zero entries\n if (not np.any(x_idxs)) or (not np.any(y_idxs)):\n return create_zero_dict(compute_dirns, acts1.shape[1])\n\n if compute_dirns:\n # orthonormal directions that are CCA directions\n cca_dirns1 = np.dot(vx, np.dot(invsqrt_xx, acts1[x_idxs]))\n cca_dirns2 = np.dot(vy, np.dot(invsqrt_yy, acts2[y_idxs]))\n\n # get rid of trailing zeros in the cca coefficients\n idx1 = sum_threshold(sx, threshold)\n idx2 = sum_threshold(sy, threshold)\n\n return_dict[\"neuron_coeffs1\"] = np.dot(vx, invsqrt_xx)\n return_dict[\"neuron_coeffs2\"] = np.dot(vy, invsqrt_yy)\n return_dict[\"cca_coef1\"] = sx\n return_dict[\"cca_coef2\"] = sy\n return_dict[\"x_idxs\"] = x_idxs\n return_dict[\"y_idxs\"] = y_idxs\n # summary statistics\n return_dict[\"mean\"] = (np.mean(sx[:idx1]), np.mean(sy[:idx2]))\n return_dict[\"sum\"] = (np.sum(sx), np.sum(sy))\n\n if compute_dirns:\n return_dict[\"cca_dirns1\"] = cca_dirns1\n return_dict[\"cca_dirns2\"] = cca_dirns2\n\n return return_dict\n\n\ndef robust_cca_similarity(acts1, acts2, threshold=0.98, compute_dirns=True,\n verbose=False):\n \"\"\"Calls get_cca_similarity multiple times while adding noise.\n This function is very similar to get_cca_similarity, and can be used if\n get_cca_similarity doesn't converge for some pair of inputs. This function\n adds some noise to the activations to help convergence.\n Args:\n acts1: (num_neurons1, data_points) a 2d numpy array of neurons by\n datapoints where entry (i,j) is the output of neuron i on\n datapoint j.\n acts2: (num_neurons2, data_points) same as above, but (potentially)\n for a different set of neurons. Note that acts1 and acts2\n can have different numbers of neurons, but must agree on the\n number of datapoints\n threshold: float between 0, 1 used to get rid of trailing zeros in\n the cca correlation coefficients to output more accurate\n summary statistics of correlations.\n compute_dirns: boolean value determining whether actual cca\n directions are computed. (For very large neurons and\n datasets, may be better to compute these on the fly\n instead of store in memory.)\n Returns:\n return_dict: A dictionary with outputs from the cca computations.\n Contains neuron coefficients (combinations of neurons\n that correspond to cca directions), the cca correlation\n coefficients (how well aligned directions correlate),\n x and y idxs (for computing cca directions on the fly\n if compute_dirns=False), and summary statistics. If\n compute_dirns=True, the cca directions are also\n computed.\n \"\"\"\n\n for trial in range(num_cca_trials):\n try:\n return_dict = get_cca_similarity(acts1, acts2, threshold, compute_dirns,\n verbose=verbose)\n except np.LinAlgError:\n acts1 = acts1 * 1e-1 + np.random.normal(size=acts1.shape) * epsilon\n acts2 = acts2 * 1e-1 + np.random.normal(size=acts1.shape) * epsilon\n if trial + 1 == num_cca_trials:\n raise\n\n return return_dict",
"# By Alexandra Lee\n# (updated October 2018)\n#\n# Encode gene expression data into low dimensional latent space using\n# Tybalt with 2-hidden layers\n\nimport os\nimport argparse\nimport pandas as pd\nimport tensorflow as tf\n\n# To ensure reproducibility using Keras during development\n# https://keras.io/getting-started/faq/#how-can-i-obtain-reproducible-results-using-keras-during-development\nimport numpy as np\nimport random as rn\n\nfrom keras.layers import Input, Dense, Lambda, Layer, Activation\nfrom keras.layers.normalization import BatchNormalization\nfrom keras.models import Model, Sequential\nfrom keras import metrics, optimizers\nfrom keras.callbacks import Callback\n\nfrom functions.helper_ae import sampling_maker, CustomVariationalLayer, WarmUpCallback\n\n\ndef tybalt_2layer_model(\n learning_rate,\n batch_size,\n epochs,\n kappa,\n intermediate_dim,\n latent_dim,\n epsilon_std,\n rnaseq,\n base_dir,\n analysis_name):\n \"\"\"\n Train 2-layer Tybalt model using input dataset\n\n Arguments\n ----------\n learning_rate: float\n \tStep size used for gradient descent. In other words, it's how quickly the methods is learning\n \n batch_size: int\n \tTraining is performed in batches. So this determines the number of samples to consider at a given time.\n \n epochs: int\n \tThe number of times to train over the entire input dataset.\n \n kappa: float\n \tHow fast to linearly ramp up KL loss \n \n intermediate_dim: int\n \tSize of the hidden layer\n \n latent_dim: int\n \tSize of the bottleneck layer\n \n epsilon_std: float\n \tStandard deviation of Normal distribution to sample latent space\n \n rnaseq: pandas.dataframe \n \tGene expression data\n\n base_dir: str\n \tParent directory where data/, scripts/, models/ are subdirectories\n \n analysis_name: str\n \tName that will be used to create a subdirectory where results and models will be stored\n\n Returns\n --------\n model_decoder_file, weights_decoder_file: .h5 file\n \tFiles used to generate decoding neural networks to use in downstream analysis\n\n model_encoder_file, weights_encoder_file: .h5 file\n \tFiles used to generate encoding neural networks to use in downstream analysis\n\n encoded_file: .txt file\n \tFile containing input data encoded into latent space using encoder neural network\n \n \"\"\"\n\n # The below is necessary in Python 3.2.3 onwards to\n # have reproducible behavior for certain hash-based operations.\n # See these references for further details:\n # https://docs.python.org/3.4/using/cmdline.html#envvar-PYTHONHASHSEED\n # https://github.com/keras-team/keras/issues/2280#issuecomment-306959926\n randomState = 123\n import os\n os.environ['PYTHONHASHSEED'] = '0'\n\n # The below is necessary for starting Numpy generated random numbers\n # in a well-defined initial state.\n\n np.random.seed(42)\n\n # The below is necessary for starting core Python generated random numbers\n # in a well-defined state.\n\n rn.seed(12345)\n\n # Force TensorFlow to use single thread.\n # Multiple threads are a potential source of\n # non-reproducible results.\n # For further details, see: https://stackoverflow.com/questions/42022950/which-seeds-have-to-be-set-where-to-realize-100-reproducibility-of-training-res\n\n session_conf = tf.ConfigProto(\n intra_op_parallelism_threads=1, inter_op_parallelism_threads=1)\n\n from keras import backend as K\n\n # The below tf.set_random_seed() will make random number generation\n # in the TensorFlow backend have a well-defined initial state.\n # For further details, see: https://www.tensorflow.org/api_docs/python/tf/set_random_seed\n\n tf.set_random_seed(1234)\n\n sess = tf.Session(graph=tf.get_default_graph(), config=session_conf)\n K.set_session(sess)\n\n # Load rnaseq data\n rnaseq = rnaseq\n\n # Initialize hyper parameters\n\n original_dim = rnaseq.shape[1]\n beta = K.variable(0)\n\n stat_file = os.path.join(\n base_dir,\n \"output\",\n \"stats\",\n analysis_name,\n \"tybalt_2layer_{}latent_stats.tsv\".format(latent_dim))\n\n hist_plot_file = os.path.join(\n base_dir,\n \"output\",\n \"viz\",\n analysis_name,\n \"tybalt_2layer_{}latent_hist.png\".format(latent_dim))\n\n encoded_file = os.path.join(\n base_dir,\n \"data\",\n \"encoded\",\n analysis_name,\n \"train_input_2layer_{}latent_encoded.txt\".format(latent_dim))\n\n model_encoder_file = os.path.join(\n base_dir,\n \"models\",\n analysis_name,\n \"tybalt_2layer_{}latent_encoder_model.h5\".format(latent_dim))\n\n weights_encoder_file = os.path.join(\n base_dir,\n \"models\",\n analysis_name,\n \"tybalt_2layer_{}latent_encoder_weights.h5\".format(latent_dim))\n\n model_decoder_file = os.path.join(\n base_dir,\n \"models\",\n analysis_name,\n \"tybalt_2layer_{}latent_decoder_model.h5\".format(latent_dim))\n\n weights_decoder_file = os.path.join(\n base_dir,\n \"models\",\n analysis_name,\n \"tybalt_2layer_{}latent_decoder_weights.h5\".format(latent_dim))\n\n # Data initalizations\n\n # Split 10% test set randomly\n test_set_percent = 0.1\n rnaseq_test_df = rnaseq.sample(\n frac=test_set_percent, random_state=randomState)\n rnaseq_train_df = rnaseq.drop(rnaseq_test_df.index)\n\n # Create a placeholder for an encoded (original-dimensional)\n rnaseq_input = Input(shape=(original_dim, ))\n\n # Architecture of VAE\n\n # ENCODER\n\n # Input layer is compressed into a mean and log variance vector of size\n # `latent_dim`. Each layer is initialized with glorot uniform weights and each\n # step (dense connections, batch norm,and relu activation) are funneled\n # separately\n # Each vector of length `latent_dim` are connected to the rnaseq input tensor\n\n # \"z_mean_dense_linear\" is the encoded representation of the input\n # Take as input arrays of shape (*, original dim) and output arrays of shape (*, latent dim)\n # Combine input from previous layer using linear summ\n # Normalize the activations (combined weighted nodes of the previous layer)\n # Transformation that maintains the mean activation close to 0 and the activation standard deviation close to 1.\n # Apply ReLU activation function to combine weighted nodes from previous layer\n # relu = threshold cutoff (cutoff value will be learned)\n # ReLU function filters noise\n\n # X is encoded using Q(z|X) to yield mu(X), sigma(X) that describes latent space distribution\n hidden_dense_linear = Dense(\n intermediate_dim, kernel_initializer='glorot_uniform')(rnaseq_input)\n hidden_dense_batchnorm = BatchNormalization()(hidden_dense_linear)\n hidden_encoded = Activation('relu')(hidden_dense_batchnorm)\n\n # Note:\n # Normalize and relu filter at each layer adds non-linear component (relu is non-linear function)\n # If architecture is layer-layer-normalization-relu then the computation is still linear\n # Add additional layers in triplicate\n z_mean_dense_linear = Dense(\n latent_dim, kernel_initializer='glorot_uniform')(hidden_encoded)\n z_mean_dense_batchnorm = BatchNormalization()(z_mean_dense_linear)\n z_mean_encoded = Activation('relu')(z_mean_dense_batchnorm)\n\n z_log_var_dense_linear = Dense(\n latent_dim, kernel_initializer='glorot_uniform')(rnaseq_input)\n z_log_var_dense_batchnorm = BatchNormalization()(z_log_var_dense_linear)\n z_log_var_encoded = Activation('relu')(z_log_var_dense_batchnorm)\n\n # Customized layer\n # Returns the encoded and randomly sampled z vector\n # Takes two keras layers as input to the custom sampling function layer with a\n # latent_dim` output\n #\n # sampling():\n # randomly sample similar points z from the latent normal distribution that is assumed to generate the data,\n # via z = z_mean + exp(z_log_sigma) * epsilon, where epsilon is a random normal tensor\n # z ~ Q(z|X)\n # Note: there is a trick to reparameterize to standard normal distribution so that the space is differentiable and\n # therefore gradient descent can be used\n #\n # Returns the encoded and randomly sampled z vector\n # Takes two keras layers as input to the custom sampling function layer with a\n # latent_dim` output\n z = Lambda(sampling_maker(epsilon_std),\n output_shape=(latent_dim, ))([z_mean_encoded, z_log_var_encoded])\n\n # DECODER\n\n # The decoding layer is much simpler with a single layer glorot uniform\n # initialized and sigmoid activation\n # Reconstruct P(X|z)\n decoder_model = Sequential()\n decoder_model.add(\n Dense(intermediate_dim, activation='relu', input_dim=latent_dim))\n decoder_model.add(Dense(original_dim, activation='sigmoid'))\n rnaseq_reconstruct = decoder_model(z)\n\n # CONNECTIONS\n # fully-connected network\n adam = optimizers.Adam(lr=learning_rate)\n vae_layer = CustomVariationalLayer(original_dim, z_log_var_encoded, z_mean_encoded, beta)([\n rnaseq_input, rnaseq_reconstruct])\n vae = Model(rnaseq_input, vae_layer)\n vae.compile(optimizer=adam, loss=None, loss_weights=[beta])\n\n # Training\n\n # fit Model\n # hist: record of the training loss at each epoch\n hist = vae.fit(\n np.array(rnaseq_train_df),\n shuffle=True,\n epochs=epochs,\n batch_size=batch_size,\n validation_data=(np.array(rnaseq_test_df), None),\n callbacks=[WarmUpCallback(beta, kappa)])\n\n # Use trained model to make predictions\n encoder = Model(rnaseq_input, z_mean_encoded)\n\n encoded_rnaseq_df = encoder.predict_on_batch(rnaseq)\n encoded_rnaseq_df = pd.DataFrame(encoded_rnaseq_df, index=rnaseq.index)\n\n encoded_rnaseq_df.columns.name = 'sample_id'\n encoded_rnaseq_df.columns = encoded_rnaseq_df.columns + 1\n\n # Visualize training performance\n history_df = pd.DataFrame(hist.history)\n ax = history_df.plot()\n ax.set_xlabel('Epochs')\n ax.set_ylabel('VAE Loss')\n fig = ax.get_figure()\n fig.savefig(hist_plot_file)\n\n del ax, fig\n\n # Output\n\n # Save training performance\n history_df = pd.DataFrame(hist.history)\n history_df = history_df.assign(learning_rate=learning_rate)\n history_df = history_df.assign(batch_size=batch_size)\n history_df = history_df.assign(epochs=epochs)\n history_df = history_df.assign(kappa=kappa)\n history_df.to_csv(stat_file, sep='\\t', index=False)\n\n # Save latent space representation\n encoded_rnaseq_df.to_csv(encoded_file, sep='\\t')\n\n # Save models\n # (source) https://machinelearningmastery.com/save-load-keras-deep-learning-models/\n # Save encoder model\n encoder.save(model_encoder_file)\n\n # serialize weights to HDF5\n encoder.save_weights(weights_encoder_file)\n\n # Save decoder model\n # (source) https://github.com/greenelab/tybalt/blob/master/scripts/nbconverted/tybalt_vae.py\n # can generate from any sampled z vector\n decoder_input = Input(shape=(latent_dim, ))\n _x_decoded_mean = decoder_model(decoder_input)\n decoder = Model(decoder_input, _x_decoded_mean)\n\n decoder.save(model_decoder_file)\n\n # serialize weights to HDF5\n decoder.save_weights(weights_decoder_file)\n\n # Save weight matrix: how each gene contribute to each feature\n # build a generator that can sample from the learned distribution\n # can generate from any sampled z vector\n decoder_input = Input(shape=(latent_dim, ))\n x_decoded_mean = decoder_model(decoder_input)\n decoder = Model(decoder_input, x_decoded_mean)\n weights = []\n for layer in decoder.layers:\n weights.append(layer.get_weights())\n"
] |
[
[
"numpy.diag",
"numpy.dot",
"numpy.linalg.svd",
"numpy.sum",
"numpy.sqrt",
"numpy.abs",
"numpy.conj",
"numpy.asarray",
"numpy.eye",
"numpy.linalg.pinv",
"numpy.random.normal",
"numpy.linalg.eigh",
"numpy.cov",
"numpy.mean",
"numpy.zeros_like",
"numpy.any",
"numpy.zeros",
"numpy.diagonal"
],
[
"numpy.diag",
"numpy.dot",
"numpy.linalg.svd",
"numpy.sum",
"numpy.sqrt",
"numpy.abs",
"numpy.conj",
"numpy.asarray",
"numpy.eye",
"numpy.linalg.pinv",
"numpy.random.normal",
"numpy.linalg.eigh",
"numpy.cov",
"numpy.mean",
"numpy.zeros_like",
"numpy.any",
"numpy.zeros",
"numpy.diagonal"
],
[
"numpy.random.seed",
"pandas.DataFrame",
"tensorflow.ConfigProto",
"tensorflow.set_random_seed",
"tensorflow.get_default_graph",
"numpy.array"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
}
] |
chenjun2hao/segmentation.pytorch
|
[
"a319d0f006559dd58bd853065e6fe79ae8c23791"
] |
[
"eval_multipro.py"
] |
[
"# System libs\nimport os\nimport argparse\nfrom distutils.version import LooseVersion\nfrom multiprocessing import Queue, Process\n# Numerical libs\nimport numpy as np\nimport math\nimport torch\nimport torch.nn as nn\nfrom scipy.io import loadmat\n# Our libs\nfrom mit_semseg.config import cfg\nfrom mit_semseg.dataset import ValDataset\nfrom mit_semseg.models import ModelBuilder, SegmentationModule\nfrom mit_semseg.utils import AverageMeter, colorEncode, accuracy, intersectionAndUnion, parse_devices, setup_logger\nfrom mit_semseg.lib.nn import user_scattered_collate, async_copy_to\nfrom mit_semseg.lib.utils import as_numpy\nfrom PIL import Image\nfrom tqdm import tqdm\n\ncolors = loadmat('data/color150.mat')['colors']\n\n\ndef visualize_result(data, pred, dir_result):\n (img, seg, info) = data\n\n # segmentation\n seg_color = colorEncode(seg, colors)\n\n # prediction\n pred_color = colorEncode(pred, colors)\n\n # aggregate images and save\n im_vis = np.concatenate((img, seg_color, pred_color),\n axis=1).astype(np.uint8)\n\n img_name = info.split('/')[-1]\n Image.fromarray(im_vis).save(os.path.join(dir_result, img_name.replace('.jpg', '.png')))\n\n\ndef evaluate(segmentation_module, loader, cfg, gpu_id, result_queue):\n segmentation_module.eval()\n\n for batch_data in loader:\n # process data\n batch_data = batch_data[0]\n seg_label = as_numpy(batch_data['seg_label'][0])\n img_resized_list = batch_data['img_data']\n\n with torch.no_grad():\n segSize = (seg_label.shape[0], seg_label.shape[1])\n scores = torch.zeros(1, cfg.DATASET.num_class, segSize[0], segSize[1])\n scores = async_copy_to(scores, gpu_id)\n\n for img in img_resized_list:\n feed_dict = batch_data.copy()\n feed_dict['img_data'] = img\n del feed_dict['img_ori']\n del feed_dict['info']\n feed_dict = async_copy_to(feed_dict, gpu_id)\n\n # forward pass\n scores_tmp = segmentation_module(feed_dict, segSize=segSize)\n scores = scores + scores_tmp / len(cfg.DATASET.imgSizes)\n\n _, pred = torch.max(scores, dim=1)\n pred = as_numpy(pred.squeeze(0).cpu())\n\n # calculate accuracy and SEND THEM TO MASTER\n acc, pix = accuracy(pred, seg_label)\n intersection, union = intersectionAndUnion(pred, seg_label, cfg.DATASET.num_class)\n result_queue.put_nowait((acc, pix, intersection, union))\n\n # visualization\n if cfg.VAL.visualize:\n visualize_result(\n (batch_data['img_ori'], seg_label, batch_data['info']),\n pred,\n os.path.join(cfg.DIR, 'result')\n )\n\n\ndef worker(cfg, gpu_id, start_idx, end_idx, result_queue):\n torch.cuda.set_device(gpu_id)\n\n # Dataset and Loader\n dataset_val = ValDataset(\n cfg.DATASET.root_dataset,\n cfg.DATASET.list_val,\n cfg.DATASET,\n start_idx=start_idx, end_idx=end_idx)\n loader_val = torch.utils.data.DataLoader(\n dataset_val,\n batch_size=cfg.VAL.batch_size,\n shuffle=False,\n collate_fn=user_scattered_collate,\n num_workers=2)\n\n # Network Builders\n net_encoder = ModelBuilder.build_encoder(\n arch=cfg.MODEL.arch_encoder.lower(),\n fc_dim=cfg.MODEL.fc_dim,\n weights=cfg.MODEL.weights_encoder)\n net_decoder = ModelBuilder.build_decoder(\n arch=cfg.MODEL.arch_decoder.lower(),\n fc_dim=cfg.MODEL.fc_dim,\n num_class=cfg.DATASET.num_class,\n weights=cfg.MODEL.weights_decoder,\n use_softmax=True)\n\n crit = nn.NLLLoss(ignore_index=-1)\n\n segmentation_module = SegmentationModule(net_encoder, net_decoder, crit)\n\n segmentation_module.cuda()\n\n # Main loop\n evaluate(segmentation_module, loader_val, cfg, gpu_id, result_queue)\n\n\ndef main(cfg, gpus):\n with open(cfg.DATASET.list_val, 'r') as f:\n lines = f.readlines()\n num_files = len(lines)\n\n num_files_per_gpu = math.ceil(num_files / len(gpus))\n\n pbar = tqdm(total=num_files)\n\n acc_meter = AverageMeter()\n intersection_meter = AverageMeter()\n union_meter = AverageMeter()\n\n result_queue = Queue(500)\n procs = []\n for idx, gpu_id in enumerate(gpus):\n start_idx = idx * num_files_per_gpu\n end_idx = min(start_idx + num_files_per_gpu, num_files)\n proc = Process(target=worker, args=(cfg, gpu_id, start_idx, end_idx, result_queue))\n print('gpu:{}, start_idx:{}, end_idx:{}'.format(gpu_id, start_idx, end_idx))\n proc.start()\n procs.append(proc)\n\n # master fetches results\n processed_counter = 0\n while processed_counter < num_files:\n if result_queue.empty():\n continue\n (acc, pix, intersection, union) = result_queue.get()\n acc_meter.update(acc, pix)\n intersection_meter.update(intersection)\n union_meter.update(union)\n processed_counter += 1\n pbar.update(1)\n\n for p in procs:\n p.join()\n\n # summary\n iou = intersection_meter.sum / (union_meter.sum + 1e-10)\n for i, _iou in enumerate(iou):\n print('class [{}], IoU: {:.4f}'.format(i, _iou))\n\n print('[Eval Summary]:')\n print('Mean IoU: {:.4f}, Accuracy: {:.2f}%'\n .format(iou.mean(), acc_meter.average()*100))\n\n print('Evaluation Done!')\n\n\nif __name__ == '__main__':\n assert LooseVersion(torch.__version__) >= LooseVersion('0.4.0'), \\\n 'PyTorch>=0.4.0 is required'\n\n parser = argparse.ArgumentParser(\n description=\"PyTorch Semantic Segmentation Validation\"\n )\n parser.add_argument(\n \"--cfg\",\n default=\"config/ade20k-resnet50dilated-ppm_deepsup.yaml\",\n metavar=\"FILE\",\n help=\"path to config file\",\n type=str,\n )\n parser.add_argument(\n \"--gpus\",\n default=\"0\",\n help=\"gpus to use, e.g. 0-3 or 0,1,2,3\"\n )\n parser.add_argument(\n \"opts\",\n help=\"Modify config options using the command-line\",\n default=None,\n nargs=argparse.REMAINDER,\n )\n args = parser.parse_args()\n\n cfg.merge_from_file(args.cfg)\n cfg.merge_from_list(args.opts)\n # cfg.freeze()\n\n logger = setup_logger(distributed_rank=0) # TODO\n logger.info(\"Loaded configuration file {}\".format(args.cfg))\n logger.info(\"Running with config:\\n{}\".format(cfg))\n\n # absolute paths of model weights\n cfg.MODEL.weights_encoder = os.path.join(\n cfg.DIR, 'encoder_' + cfg.VAL.checkpoint)\n cfg.MODEL.weights_decoder = os.path.join(\n cfg.DIR, 'decoder_' + cfg.VAL.checkpoint)\n assert os.path.exists(cfg.MODEL.weights_encoder) and \\\n os.path.exists(cfg.MODEL.weights_decoder), \"checkpoint does not exitst!\"\n\n if not os.path.isdir(os.path.join(cfg.DIR, \"result\")):\n os.makedirs(os.path.join(cfg.DIR, \"result\"))\n\n # Parse gpu ids\n gpus = parse_devices(args.gpus)\n gpus = [x.replace('gpu', '') for x in gpus]\n gpus = [int(x) for x in gpus]\n\n main(cfg, gpus)\n"
] |
[
[
"torch.nn.NLLLoss",
"torch.max",
"torch.cuda.set_device",
"torch.zeros",
"torch.utils.data.DataLoader",
"scipy.io.loadmat",
"numpy.concatenate",
"torch.no_grad"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
}
] |
rohitmalik10/MOJOwork
|
[
"8767a39b4397032b98d878bbc9aea7642a83f75c"
] |
[
"Assignments 2/question3.py"
] |
[
"import requests\nimport json\n\nurl = \"https://awm16002.srv.wifi.arista.com/new/webservice/login/modScanWifi/86400\"\n\npayload = \"{\\n\\\"type\\\":\\\"apikeycredentials\\\",\\n\\\"keyId\\\":\\\"KEY-ATN565039-674\\\",\\n\\\"keyValue\\\":\\\"16d7b32456a7700568d359fa452818bd\\\"\\n}\"\nheaders1 = {\n 'Content-Type': \"application/json\",\n 'cache-control': \"no-cache\",\n 'Postman-Token': \"305e8595-dcd0-4d7b-82c7-0b27f86ba1fe\"\n }\n\nresponse = requests.request(\"POST\", url, data=payload, headers=headers1)\nprint(response)\n\nurl1 = \"https://awm16002.srv.wifi.arista.com/new/webservice/v2/devices/clients\"\n\nheaders1 = {\n 'User-Agent': \"PostmanRuntime/7.15.2\",\n 'Accept': \"*/*\",\n 'Cache-Control': \"no-cache\",\n 'Postman-Token': \"0bdf46b9-a7fc-40d0-a7be-dafe1405f330,11166b29-8af0-43ff-a155-eb71a74dafb7\",\n 'Host': \"awm16002.srv.wifi.arista.com\",\n 'Cookie': \"JSESSIONID=22E95AE729DED106F391529AFE1855EA\",\n 'Accept-Encoding': \"gzip, deflate\",\n 'Connection': \"keep-alive\",\n 'cache-control': \"no-cache\"\n }\n\nresponse1 = requests.request(\"GET\", url1, headers=headers1)\n\nprint(response1)\ndata=response1.json()\n\nimport pandas as pd \ndf=pd.DataFrame(data)\nprint(df)\ndf.to_csv(\"Visualize.csv\", index = False)"
] |
[
[
"pandas.DataFrame"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
fourierer/mmpose
|
[
"1b481a4e46cea7cf47e01669d61ac4670f361e07"
] |
[
"mmpose/models/losses/mse_loss.py"
] |
[
"import torch\nimport torch.nn as nn\n\nfrom ..registry import LOSSES\n\n\[email protected]_module()\nclass JointsMSELoss(nn.Module):\n \"\"\"MSE loss for heatmaps.\n\n Args:\n use_target_weight (bool): Option to use weighted MSE loss.\n Different joint types may have different target weights.\n \"\"\"\n\n def __init__(self, use_target_weight=False):\n super().__init__()\n self.criterion = nn.MSELoss()\n self.use_target_weight = use_target_weight\n\n def forward(self, output, target, target_weight):\n \"\"\"Forward function.\"\"\"\n batch_size = output.size(0)\n num_joints = output.size(1)\n\n heatmaps_pred = output.reshape(\n (batch_size, num_joints, -1)).split(1, 1)\n heatmaps_gt = target.reshape((batch_size, num_joints, -1)).split(1, 1)\n\n loss = 0.\n\n for idx in range(num_joints):\n heatmap_pred = heatmaps_pred[idx].squeeze(1)\n heatmap_gt = heatmaps_gt[idx].squeeze(1)\n if self.use_target_weight:\n loss += self.criterion(\n heatmap_pred.mul(target_weight[:, idx]),\n heatmap_gt.mul(target_weight[:, idx]))\n else:\n loss += self.criterion(heatmap_pred, heatmap_gt)\n\n return loss / num_joints\n\n\[email protected]_module()\nclass CombinedTargetMSELoss(nn.Module):\n \"\"\"MSE loss for combined target.\n CombinedTarget: The combination of classification target\n (response map) and regression target (offset map).\n Paper ref: Huang et al. The Devil is in the Details: Delving into\n Unbiased Data Processing for Human Pose Estimation (CVPR 2020).\n\n Args:\n use_target_weight (bool): Option to use weighted MSE loss.\n Different joint types may have different target weights.\n \"\"\"\n\n def __init__(self, use_target_weight):\n super().__init__()\n self.criterion = nn.MSELoss(reduction='mean')\n self.use_target_weight = use_target_weight\n\n def forward(self, output, target, target_weight):\n batch_size = output.size(0)\n num_channels = output.size(1)\n heatmaps_pred = output.reshape(\n (batch_size, num_channels, -1)).split(1, 1)\n heatmaps_gt = target.reshape(\n (batch_size, num_channels, -1)).split(1, 1)\n loss = 0.\n num_joints = num_channels // 3\n for idx in range(num_joints):\n heatmap_pred = heatmaps_pred[idx * 3].squeeze()\n heatmap_gt = heatmaps_gt[idx * 3].squeeze()\n offset_x_pred = heatmaps_pred[idx * 3 + 1].squeeze()\n offset_x_gt = heatmaps_gt[idx * 3 + 1].squeeze()\n offset_y_pred = heatmaps_pred[idx * 3 + 2].squeeze()\n offset_y_gt = heatmaps_gt[idx * 3 + 2].squeeze()\n if self.use_target_weight:\n heatmap_pred = heatmap_pred.mul(target_weight[:, idx])\n heatmap_gt = heatmap_gt.mul(target_weight[:, idx])\n # classification loss\n loss += 0.5 * self.criterion(heatmap_pred, heatmap_gt)\n # regression loss\n loss += 0.5 * self.criterion(heatmap_gt * offset_x_pred,\n heatmap_gt * offset_x_gt)\n loss += 0.5 * self.criterion(heatmap_gt * offset_y_pred,\n heatmap_gt * offset_y_gt)\n return loss / num_joints\n\n\[email protected]_module()\nclass JointsOHKMMSELoss(nn.Module):\n \"\"\"MSE loss with online hard keypoint mining.\n\n Args:\n use_target_weight (bool): Option to use weighted MSE loss.\n Different joint types may have different target weights.\n topk (int): Only top k joint losses are kept.\n \"\"\"\n\n def __init__(self, use_target_weight=False, topk=8):\n super().__init__()\n assert topk > 0\n self.criterion = nn.MSELoss(reduction='none')\n self.use_target_weight = use_target_weight\n self.topk = topk\n\n def _ohkm(self, loss):\n \"\"\"Online hard keypoint mining.\"\"\"\n ohkm_loss = 0.\n N = len(loss)\n for i in range(N):\n sub_loss = loss[i]\n _, topk_idx = torch.topk(\n sub_loss, k=self.topk, dim=0, sorted=False)\n tmp_loss = torch.gather(sub_loss, 0, topk_idx)\n ohkm_loss += torch.sum(tmp_loss) / self.topk\n ohkm_loss /= N\n return ohkm_loss\n\n def forward(self, output, target, target_weight):\n \"\"\"Forward function.\"\"\"\n batch_size = output.size(0)\n num_joints = output.size(1)\n if num_joints < self.topk:\n raise ValueError(f'topk ({self.topk}) should not '\n f'larger than num_joints ({num_joints}).')\n heatmaps_pred = output.reshape(\n (batch_size, num_joints, -1)).split(1, 1)\n heatmaps_gt = target.reshape((batch_size, num_joints, -1)).split(1, 1)\n\n losses = []\n for idx in range(num_joints):\n heatmap_pred = heatmaps_pred[idx].squeeze(1)\n heatmap_gt = heatmaps_gt[idx].squeeze(1)\n if self.use_target_weight:\n losses.append(\n self.criterion(\n heatmap_pred.mul(target_weight[:, idx]),\n heatmap_gt.mul(target_weight[:, idx])))\n else:\n losses.append(self.criterion(heatmap_pred, heatmap_gt))\n\n losses = [loss.mean(dim=1).unsqueeze(dim=1) for loss in losses]\n losses = torch.cat(losses, dim=1)\n\n return self._ohkm(losses)\n\nif __name__=='__main__':\n '''\n 从下面代码的调试结果来看,OHKM实际上是取top-k个关键点的平均损失进行反传\n '''\n def ohkm(loss):\n ohkm_loss = 0.\n N = len(loss) # 64\n for i in range(N):\n sub_loss = loss[i]\n _, topk_idx = torch.topk(\n sub_loss, k=8, dim=0, sorted=False)\n # print(topk_idx) # 损失值最大的k个关键点的索引,如tensor([ 8, 16, 15, 4, 3, 5, 2, 14])\n tmp_loss = torch.gather(sub_loss, 0, topk_idx)\n # print(tmp_loss.size()) # torch.Size([8])\n ohkm_loss += torch.sum(tmp_loss) / 8\n ohkm_loss /= N\n return ohkm_loss\n \n\n criterion = nn.MSELoss(reduction='none')\n output = torch.randn(64,17,48,64)\n target = torch.randn(64,17,48,64)\n batch_size = output.size(0)\n num_joints = output.size(1)\n if num_joints < 8:\n raise ValueError(f'topk ({self.topk}) should not '\n f'larger than num_joints ({num_joints}).')\n heatmaps_pred = output.reshape(\n (batch_size, num_joints, -1)).split(1, 1)\n heatmaps_gt = target.reshape((batch_size, num_joints, -1)).split(1, 1)\n # print(len(heatmaps_pred)) # 17\n # print(heatmaps_pred[0].size()) # torch.Size([64,1,3072])\n losses = []\n for idx in range(num_joints):\n heatmap_pred = heatmaps_pred[idx].squeeze(1)\n # print(heatmap_pred.size()) # torch.Size([64,3072])\n heatmap_gt = heatmaps_gt[idx].squeeze(1)\n losses.append(criterion(heatmap_pred, heatmap_gt))\n # print(len(losses)) # 17\n # print(losses[0].size()) # torch.Size([64,3072])\n losses = [loss.mean(dim=1).unsqueeze(dim=1) for loss in losses]\n losses = torch.cat(losses, dim=1)\n # print(len(losses)) # 64\n # print(losses[0].size()) #torch.Size([17])\n\n final_loss = ohkm(losses)\n"
] |
[
[
"torch.cat",
"torch.randn",
"torch.gather",
"torch.sum",
"torch.topk",
"torch.nn.MSELoss"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
hunglvfimo/keras-retinanet
|
[
"af5948a1d52edf30f2eb50d7332a6a3a9971ad66"
] |
[
"keras_retinanet/callbacks/eval.py"
] |
[
"\"\"\"\nCopyright 2017-2018 Fizyr (https://fizyr.com)\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\n\nimport keras\nfrom ..utils.eval import evaluate\n\nimport os\nimport csv\n\nclass Evaluate(keras.callbacks.Callback):\n \"\"\" Evaluation callback for arbitrary datasets.\n \"\"\"\n\n def __init__(\n self,\n generator,\n iou_threshold=0.5,\n score_threshold=0.05,\n max_detections=100,\n save_path=None,\n tensorboard=None,\n csv_logger=None,\n weighted_average=False,\n verbose=1\n ):\n \"\"\" Evaluate a given dataset using a given model at the end of every epoch during training.\n\n # Arguments\n generator : The generator that represents the dataset to evaluate.\n iou_threshold : The threshold used to consider when a detection is positive or negative.\n score_threshold : The score confidence threshold to use for detections.\n max_detections : The maximum number of detections to use per image.\n save_path : The path to save images with visualized detections to.\n tensorboard : Instance of keras.callbacks.TensorBoard used to log the mAP value.\n weighted_average : Compute the mAP using the weighted average of precisions among classes.\n verbose : Set the verbosity level, by default this is set to 1.\n \"\"\"\n self.generator = generator\n self.iou_threshold = iou_threshold\n self.score_threshold = score_threshold\n self.max_detections = max_detections\n self.save_path = save_path\n self.tensorboard = tensorboard\n self.csv_logger = csv_logger\n self.weighted_average = weighted_average\n self.verbose = verbose\n\n super(Evaluate, self).__init__()\n\n def on_epoch_end(self, epoch, logs=None):\n logs = logs or {}\n\n # run evaluation\n average_precisions = evaluate(\n self.generator,\n self.model,\n iou_threshold=self.iou_threshold,\n score_threshold=self.score_threshold,\n max_detections=self.max_detections,\n save_path=self.save_path\n )\n\n # compute per class average precision\n total_instances = []\n precisions = []\n label_names = []\n for label, (average_precision, num_annotations ) in average_precisions.items():\n label_names.append(self.generator.label_to_name(label))\n if self.verbose == 1:\n print('{:.0f} instances of class'.format(num_annotations),\n self.generator.label_to_name(label), 'with average precision: {:.4f}'.format(average_precision))\n total_instances.append(num_annotations)\n precisions.append(average_precision)\n if self.weighted_average:\n self.mean_ap = sum([a * b for a, b in zip(total_instances, precisions)]) / sum(total_instances)\n else:\n self.mean_ap = sum(precisions) / sum(x > 0 for x in total_instances)\n\n if self.tensorboard is not None and self.tensorboard.writer is not None:\n import tensorflow as tf\n summary = tf.Summary()\n summary_value = summary.value.add()\n summary_value.simple_value = self.mean_ap\n summary_value.tag = \"mAP\"\n self.tensorboard.writer.add_summary(summary, epoch)\n\n if self.csv_logger is not None:\n if os.path.isfile (self.csv_logger):\n with open(self.csv_logger, mode='a', newline='') as csv_f:\n writer = csv.writer(csv_f)\n writer.writerow([epoch, self.mean_ap] + precisions)\n else:\n with open(self.csv_logger, mode='w', newline='') as csv_f:\n writer = csv.writer(csv_f)\n # write header\n writer.writerow([\"epoch\", \"mAP\"] + label_names)\n writer.writerow([epoch, self.mean_ap] + precisions)\n\n logs['mAP'] = self.mean_ap\n\n if self.verbose == 1:\n print('mAP: {:.4f}'.format(self.mean_ap))\n"
] |
[
[
"tensorflow.Summary"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
shuvrag/breathe-easy
|
[
"0d20e5f7a81669e1e2f298fb8999837310c0593c"
] |
[
"breathe-easy.py"
] |
[
"import streamlit as st\nimport pandas as pd\nimport numpy as np\nimport datetime\nfrom datetime import timedelta\nimport plotly.graph_objects as go\n\ntimechoice = [' ', '00:00:00', '01:00:00', '02:00:00', '03:00:00',\n '04:00:00', '05:00:00', '06:00:00', '07:00:00',\n'08:00:00', '09:00:00', '10:00:00', '11:00:00',\n'12:00:00', '13:00:00', '14:00:00', '15:00:00',\n'16:00:00', '17:00:00', '18:00:00', '19:00:00',\n'20:00:00', '21:00:00', '22:00:00', '23:00:00']\n\n\nt1 = '10:00:00'\nt1 = pd.to_datetime(t1)\ntime=[]\ntime.append(t1)\nfor i in range(1,25):\n time.append(t1 + timedelta(hours=i))\n\n#st.write(time)\n\n#time = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24]\n\ntime = ['10:00:00', '11:00:00',\n'12:00:00', '13:00:00', '14:00:00', '15:00:00',\n'16:00:00', '17:00:00', '18:00:00', '19:00:00',\n'20:00:00', '21:00:00', '22:00:00', '23:00:00', \n'00:00:00', '01:00:00', '02:00:00', '03:00:00',\n '04:00:00', '05:00:00', '06:00:00', '07:00:00',\n'08:00:00', '09:00:00']\n\n#pm = [167, 191, 229, 249, 172, 171, 174, 105, 86, 67, 53, 56, 63, 61, 88, 139, 124, 98, 93, 111, 134, 97, 111, 101, 130]\npm = [166, 140, 113, 90, 87, 93, 90, 77, 76, 87, 116, 135, 144, 132, 105, 103, 143, 131, 154, 182, 184, 187, 157, 129, 118]\n\n#st.header(\"Beta Distribution Tutorial\")\n\nst.title('Breathe-Easy PM\\u2082\\u22C5\\u2085 Forecast')\n\n#input_city = st.selectbox(\"What city are you in?\", [\"Kolkata\"])\n\n#input_date = st.text_input('What is the date and time you are thinking of going out?', '2020-01-11 15:00:00')\n\ninput_date = '2020-02-18 15:00:00'\n\ninput_date = pd.to_datetime(input_date)\n\n#input_day = st.date_input('Choose the date when you want to go outside:', input_date)\ninput_day = st.date_input('Please choose the date when you want to go outside:')\n\n#st.write(input_day)\n\n#input_time = st.text_input('Choose the time when you want to go outside?', '10:00:00')\ninput_time = st.selectbox('Please choose the time when you want to go outside:', timechoice)\n\n#in_date = pd.to_datetime(prediction, format = '%j')\n\n#in_date = in_date.replace(year = 2020)\n\n#input_datetime = pd.to_datetime(input_date)\n#st.write(input_datetime)\n\ninput_date_time = str(input_day) + ' ' + input_time\n\n#st.write('The particulate matter and weather forecast in Kolkata at', input_date_time, 'is:')\n\ninput_date_time = pd.to_datetime(input_date_time)\n\nif input_time != ' ':\n \n st.write('The particulate matter forecast in Kolkata at', input_date_time, 'is:', pm[time.index(input_time)])\n\n# st.write('The particulate matter and weather forecast in Kolkata for the next 24 hours is as follows:')\n# st.write('The best particulate matter forecast in Kolkata is at:') \n\n fig1 = go.Figure()\n\n # Add scatter trace for line\n fig1.add_trace(go.Scatter(\n x = time,\n y = pm,\n mode=\"lines\",\n name=\"pollutant concentration\",\nhovertext=[\"Temp 82, Hmdty 82, PM2.5 166\", \"Temp 81, Hmdty 81, PM2.5 140\", \"Temp 79, Hmdty 79, PM2.5 113\",\n \"Temp 77, Hmdty 77, PM2.5 90\", \"Temp 73, Hmdty 73, PM2.5 87\", \"Temp 72, Hmdty 72, PM2.5 93\",\n \"Temp 70, Hmdty 70, PM2.5 90\", \"Temp 66, Hmdty 72, PM2.5 77\", \"Temp 64, Hmdty 70, PM2.5 76\",\n \"Temp 63, Hmdty 78, PM2.5 87\", \"Temp 63, Hmdty 83, PM2.5 116\", \"Temp 61, Hmdty 88, PM2.5 135\",\n \"Temp 61, Hmdty 82, PM2.5 144\",\n \"Temp 63, Hmdty 82, PM2.5 132\", \"Temp 68, Hmdty 88, PM2.5 105\", \"Temp 72, Hmdty 88, PM2.5 103\",\n \"Temp 77, Hmdty 82, PM2.5 143\", \"Temp 81, Hmdty 73, PM2.5 131\", \"Temp 82, Hmdty 64, PM2.5 154\",\n \"Temp 84, Hmdty 57, PM2.5 182\", \"Temp 84, Hmdty 51, PM2.5 184\", \"Temp 84, Hmdty 48, PM2.5 187\",\n \"Temp 82, Hmdty 42, PM2.5 157\", \"Temp 81, Hmdty 48, PM2.5 129\", \"Temp 77, Hmdty 42, PM2.5 118\"\n ],\n\n hoverinfo=\"text\",\n marker=dict(color=\"green\"),\n showlegend=False\n ))\n\n fig1.update_layout(\n title=\"Pollution for the next 24 hours\",\n xaxis_title=\"Time\",\n yaxis_title=\"Conc. of PM 2.5 in micrograms/m^3\",\n font=dict(\n family=\"Gravitas One, monospace\",\n size=18,\n color=\"#7f7f7f\"\n ),\n shapes=[\n go.layout.Shape(\n type=\"rect\",\n # x-reference is assigned to the x-values\n xref=\"paper\",\n # y-reference is assigned to the plot paper [0,1]\n yref=\"y\",\n x0=0,\n y0=0,\n x1=1,\n y1=50,\n fillcolor=\"Green\",\n opacity=0.4,\n layer=\"below\",\n line_width=0,\n ),\n go.layout.Shape(\n type=\"rect\",\n xref=\"paper\",\n yref=\"y\",\n x0=0,\n y0=50,\n x1=1,\n y1=100,\n fillcolor=\"Yellow\",\n opacity=0.4,\n layer=\"below\",\n line_width=0,\n ),\n go.layout.Shape(\n type=\"rect\",\n xref=\"paper\",\n yref=\"y\",\n x0=0,\n y0=100,\n x1=1,\n y1=150,\n fillcolor=\"Orange\",\n opacity=0.4,\n layer=\"below\",\n line_width=0,\n ),\n go.layout.Shape(\n type=\"rect\",\n xref=\"paper\",\n yref=\"y\",\n x0=0,\n y0=150,\n x1=1,\n y1=200,\n fillcolor=\"Red\",\n opacity=0.4,\n layer=\"below\",\n line_width=0,\n ),\n go.layout.Shape(\n type=\"rect\",\n xref=\"paper\",\n yref=\"y\",\n x0=0,\n y0=201,\n x1=1,\n y1=300,\n fillcolor=\"Purple\",\n opacity=0.4,\n layer=\"below\",\n line_width=0,\n ),\n go.layout.Shape(\n type=\"rect\",\n xref=\"paper\",\n yref=\"y\",\n x0=0,\n y0=300,\n x1=1,\n y1=500,\n fillcolor=\"Purple\",\n opacity=0.4,\n layer=\"below\",\n line_width=0,\n ),\n # dict(\n go.layout.Shape(\n type=\"rect\",\n xref=\"x\",\n yref=\"y\",\n x0=7,\n y0=0,\n x1=8,\n y1=pm[7],\n fillcolor=\"Blue\",\n opacity=0.5,\n layer=\"below\",\n line_width=0,\n )\n ]\n )\n\n st.write(fig1)\n #fig.show()\n\n fig2 = go.Figure()\n\n # Add scatter trace for line\n fig2.add_trace(go.Scatter(\n #x=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24],\n x = time,\n# y=[77, 81, 82, 82, 84, 82, 82, 79, 73, 75, 72, 72, 72, 68, 66, 64, 64, 64, 64, 63, 64, 63, 61, 61, 61],\n y = [82, 81, 79, 77, 73, 72, 70, 72, 70, 66, 64, 63, 63, 61, 61, 63, 68, 72, 77, 81, 82, 84, 84, 84, 82],\n mode=\"lines\",\n name=\"temperature\"\n ))\n\n fig2.update_layout(\n title=\"Temperature for the next 24 hours\",\n xaxis_title=\"Time\",\n yaxis_title=\"Temperature (in F)\",\n font=dict(\n family=\"Gravitas One, monospace\",\n size=18,\n color=\"#7f7f7f\"\n )\n )\n\n st.write(fig2)\n\n fig3 = go.Figure()\n\n # Add scatter trace for line\n fig3.add_trace(go.Scatter(\n# x=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24],\nx = time,\n # y=[57, 51, 48, 45, 42, 42, 39, 44, 50, 57, 57, 60, 60, 68, 73, 77, 73, 73, 77, 77, 77, 82, 88, 88, 94],\n y = [82, 81, 79, 77, 73, 72, 70, 72, 70, 78, 83, 88, 82, 82, 88, 88, 82, 73, 64, 57, 51, 48, 42, 48, 42],\n mode=\"lines\",\n name=\"humidity\"\n ))\n\n fig3.update_layout(\n title=\"Humidity for the next 24 hours\",\n xaxis_title=\"Time\",\n yaxis_title=\"Humidity %\",\n font=dict(\n family=\"Gravitas One, monospace\",\n size=18,\n color=\"#7f7f7f\"\n )\n )\n\n st.write(fig3)\n #fig.show()\n"
] |
[
[
"pandas.to_datetime"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
wunlung/tensorflow
|
[
"e2b05608125d66716ada95433850b3ce8405c1e4"
] |
[
"tensorflow/contrib/tensor_forest/client/random_forest.py"
] |
[
"# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"A tf.learn implementation of tensor_forest (extremely random forests).\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom tensorflow.contrib import framework as contrib_framework\n\nfrom tensorflow.contrib.learn.python.learn.estimators import estimator\nfrom tensorflow.contrib.learn.python.learn.estimators import head as head_lib\nfrom tensorflow.contrib.learn.python.learn.estimators import model_fn as model_fn_lib\n\nfrom tensorflow.contrib.tensor_forest.client import eval_metrics\nfrom tensorflow.contrib.tensor_forest.python import tensor_forest\n\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import sparse_tensor\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import control_flow_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import resource_variable_ops\nfrom tensorflow.python.ops import state_ops\nfrom tensorflow.python.ops import variable_scope\nfrom tensorflow.python.platform import tf_logging as logging\nfrom tensorflow.python.summary import summary\nfrom tensorflow.python.training import basic_session_run_hooks\nfrom tensorflow.python.training import session_run_hook\n\n\nKEYS_NAME = 'keys'\nLOSS_NAME = 'rf_training_loss'\nTREE_PATHS_PREDICTION_KEY = 'tree_paths'\n\nEPSILON = 0.000001\n\n\ndef _assert_float32(tensors):\n \"\"\"Assert all tensors are float32.\n\n Args:\n tensors: `Tensor` or `dict` of `Tensor` objects.\n\n Raises:\n TypeError: if any tensor is not float32.\n \"\"\"\n if not isinstance(tensors, dict):\n tensors = [tensors]\n else:\n tensors = tensors.values()\n for tensor in tensors:\n if tensor.dtype.base_dtype != dtypes.float32:\n raise TypeError('Expected dtype=float32, %s.' % tensor)\n\n\nclass TensorForestRunOpAtEndHook(session_run_hook.SessionRunHook):\n\n def __init__(self, op_dict):\n \"\"\"Ops is a dict of {name: op} to run before the session is destroyed.\"\"\"\n self._ops = op_dict\n\n def end(self, session):\n for name in sorted(self._ops.keys()):\n logging.info('{0}: {1}'.format(name, session.run(self._ops[name])))\n\n\nclass TensorForestLossHook(session_run_hook.SessionRunHook):\n \"\"\"Monitor to request stop when loss stops decreasing.\"\"\"\n\n def __init__(self,\n early_stopping_rounds,\n early_stopping_loss_threshold=None,\n loss_op=None):\n self.early_stopping_rounds = early_stopping_rounds\n self.early_stopping_loss_threshold = early_stopping_loss_threshold\n self.loss_op = loss_op\n self.min_loss = None\n self.last_step = -1\n # self.steps records the number of steps for which the loss has been\n # non-decreasing\n self.steps = 0\n\n def before_run(self, run_context):\n loss = (self.loss_op if self.loss_op is not None else\n run_context.session.graph.get_operation_by_name(\n LOSS_NAME).outputs[0])\n return session_run_hook.SessionRunArgs(\n {'global_step': contrib_framework.get_global_step(),\n 'current_loss': loss})\n\n def after_run(self, run_context, run_values):\n current_loss = run_values.results['current_loss']\n current_step = run_values.results['global_step']\n self.steps += 1\n # Guard against the global step going backwards, which might happen\n # if we recover from something.\n if self.last_step == -1 or self.last_step > current_step:\n logging.info('TensorForestLossHook resetting last_step.')\n self.last_step = current_step\n self.steps = 0\n self.min_loss = None\n return\n\n self.last_step = current_step\n if (self.min_loss is None or current_loss <\n (self.min_loss - self.min_loss * self.early_stopping_loss_threshold)):\n self.min_loss = current_loss\n self.steps = 0\n if self.steps > self.early_stopping_rounds:\n logging.info('TensorForestLossHook requesting stop.')\n run_context.request_stop()\n\n\nclass EveryCheckpointPreSaveListener(\n basic_session_run_hooks.CheckpointSaverListener):\n \"\"\"Runs a given op before each checkpoint save.\"\"\"\n\n def __init__(self, op):\n \"\"\"Initializes the object.\n\n Args:\n op: An op to run before each checkpoint save.\n \"\"\"\n self._op = op\n\n def before_save(self, session, global_step_value):\n session.run(self._op)\n\n\ndef get_default_head(params, weights_name, name=None):\n if params.regression:\n return head_lib.regression_head(\n weight_column_name=weights_name,\n label_dimension=params.num_outputs,\n enable_centered_bias=False,\n head_name=name)\n else:\n return head_lib.multi_class_head(\n params.num_classes,\n weight_column_name=weights_name,\n enable_centered_bias=False,\n head_name=name)\n\n\ndef get_model_fn(params,\n graph_builder_class,\n device_assigner,\n weights_name=None,\n model_head=None,\n keys_name=None,\n early_stopping_rounds=100,\n early_stopping_loss_threshold=0.001,\n num_trainers=1,\n trainer_id=0,\n report_feature_importances=False,\n local_eval=False,\n head_scope=None):\n \"\"\"Return a model function given a way to construct a graph builder.\"\"\"\n if model_head is None:\n model_head = get_default_head(params, weights_name)\n\n def _model_fn(features, labels, mode):\n \"\"\"Function that returns predictions, training loss, and training op.\"\"\"\n if (isinstance(features, ops.Tensor) or\n isinstance(features, sparse_tensor.SparseTensor)):\n features = {'features': features}\n weights = None\n if weights_name and weights_name in features:\n weights = features.pop(weights_name)\n\n keys = None\n if keys_name and keys_name in features:\n keys = features.pop(keys_name)\n\n # If we're doing eval, optionally ignore device_assigner.\n # Also ignore device assigner if we're exporting (mode == INFER)\n dev_assn = device_assigner\n if (mode == model_fn_lib.ModeKeys.INFER or\n (local_eval and mode == model_fn_lib.ModeKeys.EVAL)):\n dev_assn = None\n\n graph_builder = graph_builder_class(params,\n device_assigner=dev_assn)\n\n logits, tree_paths = graph_builder.inference_graph(features)\n\n summary.scalar('average_tree_size', graph_builder.average_size())\n # For binary classification problems, convert probabilities to logits.\n # Includes hack to get around the fact that a probability might be 0 or 1.\n if not params.regression and params.num_classes == 2:\n class_1_probs = array_ops.slice(logits, [0, 1], [-1, 1])\n logits = math_ops.log(\n math_ops.maximum(class_1_probs / math_ops.maximum(\n 1.0 - class_1_probs, EPSILON), EPSILON))\n\n # labels might be None if we're doing prediction (which brings up the\n # question of why we force everything to adhere to a single model_fn).\n training_graph = None\n training_hooks = []\n if labels is not None and mode == model_fn_lib.ModeKeys.TRAIN:\n with ops.control_dependencies([logits.op]):\n training_graph = control_flow_ops.group(\n graph_builder.training_graph(\n features, labels, input_weights=weights,\n num_trainers=num_trainers,\n trainer_id=trainer_id),\n state_ops.assign_add(contrib_framework.get_global_step(), 1))\n\n # Put weights back in\n if weights is not None:\n features[weights_name] = weights\n\n # TensorForest's training graph isn't calculated directly from the loss\n # like many other models.\n def _train_fn(unused_loss):\n return training_graph\n\n model_ops = model_head.create_model_fn_ops(\n features=features,\n labels=labels,\n mode=mode,\n train_op_fn=_train_fn,\n logits=logits,\n scope=head_scope)\n\n # Ops are run in lexigraphical order of their keys. Run the resource\n # clean-up op last.\n all_handles = graph_builder.get_all_resource_handles()\n ops_at_end = {\n '9: clean up resources': control_flow_ops.group(\n *[resource_variable_ops.destroy_resource_op(handle)\n for handle in all_handles])}\n\n if report_feature_importances:\n ops_at_end['1: feature_importances'] = (\n graph_builder.feature_importances())\n\n training_hooks.append(TensorForestRunOpAtEndHook(ops_at_end))\n\n if early_stopping_rounds:\n training_hooks.append(\n TensorForestLossHook(\n early_stopping_rounds,\n early_stopping_loss_threshold=early_stopping_loss_threshold,\n loss_op=model_ops.loss))\n\n model_ops.training_hooks.extend(training_hooks)\n\n if keys is not None:\n model_ops.predictions[keys_name] = keys\n\n if params.inference_tree_paths:\n model_ops.predictions[TREE_PATHS_PREDICTION_KEY] = tree_paths\n\n return model_ops\n\n return _model_fn\n\n\nclass TensorForestEstimator(estimator.Estimator):\n \"\"\"An estimator that can train and evaluate a random forest.\n\n Example:\n\n ```python\n params = tf.contrib.tensor_forest.python.tensor_forest.ForestHParams(\n num_classes=2, num_features=40, num_trees=10, max_nodes=1000)\n\n # Estimator using the default graph builder.\n estimator = TensorForestEstimator(params, model_dir=model_dir)\n\n # Or estimator using TrainingLossForest as the graph builder.\n estimator = TensorForestEstimator(\n params, graph_builder_class=tensor_forest.TrainingLossForest,\n model_dir=model_dir)\n\n # Input builders\n def input_fn_train: # returns x, y\n ...\n def input_fn_eval: # returns x, y\n ...\n estimator.fit(input_fn=input_fn_train)\n estimator.evaluate(input_fn=input_fn_eval)\n\n # Predict returns an iterable of dicts.\n results = list(estimator.predict(x=x))\n prob0 = results[0][eval_metrics.INFERENCE_PROB_NAME]\n prediction0 = results[0][eval_metrics.INFERENCE_PRED_NAME]\n ```\n \"\"\"\n\n def __init__(self,\n params,\n device_assigner=None,\n model_dir=None,\n graph_builder_class=tensor_forest.RandomForestGraphs,\n config=None,\n weights_name=None,\n keys_name=None,\n feature_engineering_fn=None,\n early_stopping_rounds=100,\n early_stopping_loss_threshold=0.001,\n num_trainers=1,\n trainer_id=0,\n report_feature_importances=False,\n local_eval=False,\n version=None,\n head=None):\n \"\"\"Initializes a TensorForestEstimator instance.\n\n Args:\n params: ForestHParams object that holds random forest hyperparameters.\n These parameters will be passed into `model_fn`.\n device_assigner: An `object` instance that controls how trees get\n assigned to devices. If `None`, will use\n `tensor_forest.RandomForestDeviceAssigner`.\n model_dir: Directory to save model parameters, graph, etc. To continue\n training a previously saved model, load checkpoints saved to this\n directory into an estimator.\n graph_builder_class: An `object` instance that defines how TF graphs for\n random forest training and inference are built. By default will use\n `tensor_forest.RandomForestGraphs`. Can be overridden by version\n kwarg.\n config: `RunConfig` object to configure the runtime settings.\n weights_name: A string defining feature column name representing\n weights. Will be multiplied by the loss of the example. Used to\n downweight or boost examples during training.\n keys_name: A string naming one of the features to strip out and\n pass through into the inference/eval results dict. Useful for\n associating specific examples with their prediction.\n feature_engineering_fn: Feature engineering function. Takes features and\n labels which are the output of `input_fn` and returns features and\n labels which will be fed into the model.\n early_stopping_rounds: Allows training to terminate early if the forest is\n no longer growing. 100 by default. Set to a Falsy value to disable\n the default training hook.\n early_stopping_loss_threshold: Percentage (as fraction) that loss must\n improve by within early_stopping_rounds steps, otherwise training will\n terminate.\n num_trainers: Number of training jobs, which will partition trees\n among them.\n trainer_id: Which trainer this instance is.\n report_feature_importances: If True, print out feature importances\n during evaluation.\n local_eval: If True, don't use a device assigner for eval. This is to\n support some common setups where eval is done on a single machine, even\n though training might be distributed.\n version: Unused.\n head: A heads_lib.Head object that calculates losses and such. If None,\n one will be automatically created based on params.\n\n Returns:\n A `TensorForestEstimator` instance.\n \"\"\"\n super(TensorForestEstimator, self).__init__(\n model_fn=get_model_fn(\n params.fill(),\n graph_builder_class,\n device_assigner,\n model_head=head,\n weights_name=weights_name,\n keys_name=keys_name,\n early_stopping_rounds=early_stopping_rounds,\n early_stopping_loss_threshold=early_stopping_loss_threshold,\n num_trainers=num_trainers,\n trainer_id=trainer_id,\n report_feature_importances=report_feature_importances,\n local_eval=local_eval),\n model_dir=model_dir,\n config=config,\n feature_engineering_fn=feature_engineering_fn)\n\n\ndef get_combined_model_fn(model_fns):\n \"\"\"Get a combined model function given a list of other model fns.\n\n The model function returned will call the individual model functions and\n combine them appropriately. For:\n\n training ops: tf.group them.\n loss: average them.\n predictions: concat probabilities such that predictions[*][0-C1] are the\n probablities for output 1 (where C1 is the number of classes in output 1),\n predictions[*][C1-(C1+C2)] are the probabilities for output 2 (where C2\n is the number of classes in output 2), etc. Also stack predictions such\n that predictions[i][j] is the class prediction for example i and output j.\n\n This assumes that labels are 2-dimensional, with labels[i][j] being the\n label for example i and output j, where forest j is trained using only\n output j.\n\n Args:\n model_fns: A list of model functions obtained from get_model_fn.\n\n Returns:\n A ModelFnOps instance.\n \"\"\"\n def _model_fn(features, labels, mode):\n \"\"\"Function that returns predictions, training loss, and training op.\"\"\"\n model_fn_ops = []\n for i in range(len(model_fns)):\n with variable_scope.variable_scope('label_{0}'.format(i)):\n sliced_labels = array_ops.slice(labels, [0, i], [-1, 1])\n model_fn_ops.append(\n model_fns[i](features, sliced_labels, mode))\n training_hooks = []\n for mops in model_fn_ops:\n training_hooks += mops.training_hooks\n predictions = {}\n if (mode == model_fn_lib.ModeKeys.EVAL or\n mode == model_fn_lib.ModeKeys.INFER):\n # Flatten the probabilities into one dimension.\n predictions[eval_metrics.INFERENCE_PROB_NAME] = array_ops.concat(\n [mops.predictions[eval_metrics.INFERENCE_PROB_NAME]\n for mops in model_fn_ops], axis=1)\n predictions[eval_metrics.INFERENCE_PRED_NAME] = array_ops.stack(\n [mops.predictions[eval_metrics.INFERENCE_PRED_NAME]\n for mops in model_fn_ops], axis=1)\n loss = None\n if (mode == model_fn_lib.ModeKeys.EVAL or\n mode == model_fn_lib.ModeKeys.TRAIN):\n loss = math_ops.reduce_sum(\n array_ops.stack(\n [mops.loss for mops in model_fn_ops])) / len(model_fn_ops)\n\n train_op = None\n if mode == model_fn_lib.ModeKeys.TRAIN:\n train_op = control_flow_ops.group(\n *[mops.train_op for mops in model_fn_ops])\n return model_fn_lib.ModelFnOps(\n mode=mode,\n predictions=predictions,\n loss=loss,\n train_op=train_op,\n training_hooks=training_hooks,\n scaffold=None,\n output_alternatives=None)\n\n return _model_fn\n\n\nclass MultiForestMultiHeadEstimator(estimator.Estimator):\n \"\"\"An estimator that can train a forest for a multi-headed problems.\n\n This class essentially trains separate forests (each with their own\n ForestHParams) for each output.\n\n For multi-headed regression, a single-headed TensorForestEstimator can\n be used to train a single model that predicts all outputs. This class can\n be used to train separate forests for each output.\n \"\"\"\n\n def __init__(self, params_list, device_assigner=None, model_dir=None,\n graph_builder_class=tensor_forest.RandomForestGraphs,\n config=None, weights_name=None, keys_name=None,\n feature_engineering_fn=None,\n early_stopping_rounds=100,\n num_trainers=1, trainer_id=0,\n report_feature_importances=False,\n local_eval=False):\n \"\"\"Initializes a TensorForestEstimator instance.\n\n Args:\n params_list: A list of ForestHParams objects for each head, given in order\n of outputs in the label tensor to be trained on.\n device_assigner: An `object` instance that controls how trees get\n assigned to devices. If `None`, will use\n `tensor_forest.RandomForestDeviceAssigner`.\n model_dir: Directory to save model parameters, graph, etc. To continue\n training a previously saved model, load checkpoints saved to this\n directory into an estimator.\n graph_builder_class: An `object` instance that defines how TF graphs for\n random forest training and inference are built. By default will use\n `tensor_forest.RandomForestGraphs`.\n config: `RunConfig` object to configure the runtime settings.\n weights_name: A string defining feature column name representing\n weights. Will be multiplied by the loss of the example. Used to\n downweight or boost examples during training.\n keys_name: A string naming one of the features to strip out and\n pass through into the inference/eval results dict. Useful for\n associating specific examples with their prediction.\n feature_engineering_fn: Feature engineering function. Takes features and\n labels which are the output of `input_fn` and returns features and\n labels which will be fed into the model.\n early_stopping_rounds: Allows training to terminate early if the forest is\n no longer growing. 100 by default. Set to a Falsy value to disable\n the default training hook.\n num_trainers: Number of training jobs, which will partition trees\n among them.\n trainer_id: Which trainer this instance is.\n report_feature_importances: If True, print out feature importances\n during evaluation.\n local_eval: If True, don't use a device assigner for eval. This is to\n support some common setups where eval is done on a single machine, even\n though training might be distributed.\n\n Returns:\n A `TensorForestEstimator` instance.\n \"\"\"\n model_fns = []\n for i in range(len(params_list)):\n params = params_list[i].fill()\n model_fns.append(\n get_model_fn(\n params,\n graph_builder_class,\n device_assigner,\n model_head=get_default_head(\n params, weights_name, name='head{0}'.format(i)),\n weights_name=weights_name,\n keys_name=keys_name,\n early_stopping_rounds=early_stopping_rounds,\n num_trainers=num_trainers,\n trainer_id=trainer_id,\n report_feature_importances=report_feature_importances,\n local_eval=local_eval,\n head_scope='output{0}'.format(i)))\n\n super(MultiForestMultiHeadEstimator, self).__init__(\n model_fn=get_combined_model_fn(model_fns),\n model_dir=model_dir,\n config=config,\n feature_engineering_fn=feature_engineering_fn)\n"
] |
[
[
"tensorflow.contrib.learn.python.learn.estimators.head.regression_head",
"tensorflow.contrib.learn.python.learn.estimators.model_fn.ModelFnOps",
"tensorflow.python.ops.array_ops.concat",
"tensorflow.python.ops.array_ops.slice",
"tensorflow.python.ops.control_flow_ops.group",
"tensorflow.python.ops.resource_variable_ops.destroy_resource_op",
"tensorflow.contrib.framework.get_global_step",
"tensorflow.python.platform.tf_logging.info",
"tensorflow.contrib.learn.python.learn.estimators.head.multi_class_head",
"tensorflow.python.framework.ops.control_dependencies",
"tensorflow.python.ops.math_ops.maximum",
"tensorflow.python.ops.array_ops.stack"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"1.2"
]
}
] |
golf-canada/airbyte
|
[
"a81b183a6b62d6bb4256347aaf39a3ada061aabe"
] |
[
"airbyte-integrations/connectors/source-file/integration_tests/conftest.py"
] |
[
"\"\"\"\nMIT License\n\nCopyright (c) 2020 Airbyte\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n\"\"\"\n\n\nimport json\nimport os\nimport socket\nimport tempfile\nimport uuid\nfrom pathlib import Path\nfrom typing import Mapping\n\nimport boto3\nimport pandas\nimport pytest\nfrom botocore.errorfactory import ClientError\nfrom google.api_core.exceptions import Conflict\nfrom google.cloud import storage\nfrom paramiko.client import AutoAddPolicy, SSHClient\nfrom paramiko.ssh_exception import SSHException\n\nHERE = Path(__file__).parent.absolute()\n\n\[email protected](scope=\"session\")\ndef docker_compose_file() -> Path:\n return HERE / \"docker-compose.yml\"\n\n\[email protected](scope=\"session\")\ndef google_cloud_service_file() -> Path:\n return HERE.parent / \"secrets/gcs.json\"\n\n\[email protected](scope=\"session\")\ndef google_cloud_service_credentials(google_cloud_service_file) -> Mapping:\n with open(google_cloud_service_file) as json_file:\n return json.load(json_file)\n\n\[email protected](scope=\"session\")\ndef aws_credentials() -> Mapping:\n filename = HERE.parent / \"secrets/aws.json\"\n with open(filename) as json_file:\n return json.load(json_file)\n\n\[email protected](scope=\"session\")\ndef cloud_bucket_name():\n return \"airbytetestbucket\"\n\n\ndef is_ssh_ready(ip, port):\n try:\n with SSHClient() as ssh:\n ssh.set_missing_host_key_policy(AutoAddPolicy)\n ssh.connect(\n ip,\n port=port,\n username=\"user1\",\n password=\"pass1\",\n )\n return True\n except (SSHException, socket.error):\n return False\n\n\[email protected](scope=\"session\")\ndef ssh_service(docker_ip, docker_services):\n \"\"\"Ensure that SSH service is up and responsive.\"\"\"\n\n # `port_for` takes a container port and returns the corresponding host port\n port = docker_services.port_for(\"ssh\", 22)\n docker_services.wait_until_responsive(timeout=30.0, pause=0.1, check=lambda: is_ssh_ready(docker_ip, port))\n return docker_ip\n\n\[email protected]\ndef provider_config(ssh_service):\n def lookup(name):\n providers = {\n \"ssh\": dict(storage=\"SSH\", host=ssh_service, user=\"user1\", password=\"pass1\", port=2222),\n \"scp\": dict(storage=\"SCP\", host=ssh_service, user=\"user1\", password=\"pass1\", port=2222),\n \"sftp\": dict(storage=\"SFTP\", host=ssh_service, user=\"user1\", password=\"pass1\", port=100),\n \"gcs\": dict(storage=\"GCS\"),\n \"s3\": dict(storage=\"S3\"),\n }\n return providers[name]\n\n return lookup\n\n\ndef create_unique_gcs_bucket(storage_client, name: str) -> str:\n \"\"\"\n Make a unique bucket to which we'll upload the file.\n (GCS buckets are part of a single global namespace.)\n \"\"\"\n for i in range(0, 5):\n bucket_name = f\"{name}-{uuid.uuid1()}\"\n try:\n bucket = storage_client.bucket(bucket_name)\n bucket.storage_class = \"STANDARD\"\n # fixed locations are cheaper...\n storage_client.create_bucket(bucket, location=\"us-east1\")\n print(f\"\\nNew GCS bucket created {bucket_name}\")\n return bucket_name\n except Conflict:\n print(f\"\\nError: {bucket_name} already exists!\")\n\n\[email protected](scope=\"session\")\ndef download_gcs_public_data():\n print(\"\\nDownload public dataset from gcs to local /tmp\")\n df = pandas.read_csv(\"https://storage.googleapis.com/covid19-open-data/v2/latest/epidemiology.csv\")\n tmp_file = tempfile.NamedTemporaryFile(delete=False)\n df.to_csv(tmp_file.name, index=False)\n\n yield tmp_file.name\n\n os.remove(tmp_file.name)\n print(f\"\\nLocal File {tmp_file.name} is now deleted\")\n\n\[email protected](scope=\"session\")\ndef private_google_cloud_file(google_cloud_service_file, cloud_bucket_name, download_gcs_public_data):\n storage_client = storage.Client.from_service_account_json(str(google_cloud_service_file))\n bucket_name = create_unique_gcs_bucket(storage_client, cloud_bucket_name)\n print(f\"\\nUpload dataset to private gcs bucket {bucket_name}\")\n bucket = storage_client.get_bucket(bucket_name)\n blob = bucket.blob(\"myfile.csv\")\n blob.upload_from_filename(download_gcs_public_data)\n\n yield f\"{bucket_name}/myfile.csv\"\n\n bucket.delete(force=True)\n print(f\"\\nGCS Bucket {bucket_name} is now deleted\")\n\n\[email protected](scope=\"session\")\ndef private_aws_file(aws_credentials, cloud_bucket_name, download_gcs_public_data):\n region = \"eu-west-3\"\n location = {\"LocationConstraint\": region}\n s3_client = boto3.client(\n \"s3\",\n aws_access_key_id=aws_credentials[\"aws_access_key_id\"],\n aws_secret_access_key=aws_credentials[\"aws_secret_access_key\"],\n region_name=region,\n )\n bucket_name = cloud_bucket_name\n print(f\"\\nUpload dataset to private aws bucket {bucket_name}\")\n try:\n s3_client.head_bucket(Bucket=bucket_name)\n except ClientError:\n s3_client.create_bucket(Bucket=bucket_name, CreateBucketConfiguration=location)\n s3_client.upload_file(download_gcs_public_data, bucket_name, \"myfile.csv\")\n\n yield f\"{bucket_name}/myfile.csv\"\n\n s3 = boto3.resource(\n \"s3\", aws_access_key_id=aws_credentials[\"aws_access_key_id\"], aws_secret_access_key=aws_credentials[\"aws_secret_access_key\"]\n )\n bucket = s3.Bucket(bucket_name)\n bucket.objects.all().delete()\n print(f\"\\nS3 Bucket {bucket_name} is now deleted\")\n"
] |
[
[
"pandas.read_csv"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
entn-at/cyclevae-vc-neuralvoco
|
[
"e0188457fe6d16e53b91066600dfe6fd5a9f8c23"
] |
[
"src/bin/calc_rec-cycrec-gv_gru-cycle-mcepvae-laplace_noar.py"
] |
[
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# Copyright 2020 Patrick Lumban Tobing (Nagoya University)\n# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)\n\nfrom __future__ import division\n\nimport argparse\nimport logging\nimport math\nimport os\nimport sys\n\nimport numpy as np\nimport torch\nimport torch.multiprocessing as mp\n\nfrom vcneuvoco import GRU_VAE_ENCODER, GRU_SPEC_DECODER\nfrom utils import find_files, read_hdf5, read_txt, write_hdf5, check_hdf5\n\nfrom dtw_c import dtw_c as dtw\n\nimport torch.nn.functional as F\nimport h5py\n\n#np.set_printoptions(threshold=np.inf)\n\n\ndef main():\n parser = argparse.ArgumentParser()\n # decode setting\n parser.add_argument(\"--feats\", required=True,\n type=str, help=\"list or directory of source eval feat files\")\n parser.add_argument(\"--spk\", required=True,\n type=str, help=\"speaker name to be reconstructed\")\n parser.add_argument(\"--model\", required=True,\n type=str, help=\"model file\")\n parser.add_argument(\"--config\", required=True,\n type=str, help=\"configure file\")\n parser.add_argument(\"--n_gpus\", default=1,\n type=int, help=\"number of gpus\")\n parser.add_argument(\"--outdir\", required=True,\n type=str, help=\"directory to save log\")\n parser.add_argument(\"--string_path\", required=True,\n type=str, help=\"path of h5 generated feature\")\n # other setting\n parser.add_argument(\"--GPU_device\", default=None,\n type=int, help=\"selection of GPU device\")\n parser.add_argument(\"--GPU_device_str\", default=None,\n type=str, help=\"selection of GPU device\")\n parser.add_argument(\"--verbose\", default=1,\n type=int, help=\"log level\")\n\n args = parser.parse_args()\n\n if args.GPU_device is not None or args.GPU_device_str is not None:\n os.environ[\"CUDA_DEVICE_ORDER\"]\t\t= \"PCI_BUS_ID\"\n if args.GPU_device_str is None:\n os.environ[\"CUDA_VISIBLE_DEVICES\"]\t= str(args.GPU_device)\n else:\n os.environ[\"CUDA_VISIBLE_DEVICES\"]\t= args.GPU_device_str\n\n # check directory existence\n if not os.path.exists(args.outdir):\n os.makedirs(args.outdir)\n\n # set log level\n if args.verbose > 0:\n logging.basicConfig(level=logging.INFO,\n format='%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s',\n datefmt='%m/%d/%Y %I:%M:%S', filemode='w',\n filename=args.outdir + \"/decode.log\")\n logging.getLogger().addHandler(logging.StreamHandler())\n elif args.verbose > 1:\n logging.basicConfig(level=logging.DEBUG,\n format='%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s',\n datefmt='%m/%d/%Y %I:%M:%S', filemode='w',\n filename=args.outdir + \"/decode.log\")\n logging.getLogger().addHandler(logging.StreamHandler())\n else:\n logging.basicConfig(level=logging.WARN,\n format='%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s',\n datefmt='%m/%d/%Y %I:%M:%S', filemode='w',\n filename=args.outdir + \"/decode.log\")\n logging.getLogger().addHandler(logging.StreamHandler())\n logging.warn(\"logging is disabled.\")\n\n # load config\n config = torch.load(args.config)\n\n # get source feat list\n if os.path.isdir(args.feats):\n feat_list = sorted(find_files(args.feats, \"*.h5\"))\n elif os.path.isfile(args.feats):\n feat_list = read_txt(args.feats)\n else:\n logging.error(\"--feats should be directory or list.\")\n sys.exit(1)\n\n # prepare the file list for parallel decoding\n feat_lists = np.array_split(feat_list, args.n_gpus)\n feat_lists = [f_list.tolist() for f_list in feat_lists]\n for i in range(args.n_gpus):\n logging.info('%d: %d' % (i+1, len(feat_lists[i])))\n\n spk_list = config.spk_list.split('@')\n n_spk = len(spk_list)\n spk_idx = spk_list.index(args.spk)\n\n stats_list = config.stats_list.split('@')\n assert(n_spk == len(stats_list))\n\n spk_stat = stats_list[spk_idx]\n gv_mean = read_hdf5(spk_stat, \"/gv_range_mean\")[1:]\n\n model_epoch = os.path.basename(args.model).split('.')[0].split('-')[1]\n logging.info('epoch: '+model_epoch)\n\n model_name = os.path.basename(os.path.dirname(args.model)).split('_')[1]\n logging.info('mdl_name: '+model_name)\n\n logging.info(config)\n # define gpu decode function\n def gpu_decode(feat_list, gpu, cvlist=None, mcdpow_cvlist=None, mcdpowstd_cvlist=None, mcd_cvlist=None, \\\n mcdstd_cvlist=None, cvlist_dv=None, mcdpow_cvlist_dv=None, mcdpowstd_cvlist_dv=None, \\\n mcd_cvlist_dv=None, mcdstd_cvlist_dv=None, \\\n cvlist_cyc=None, mcdpow_cvlist_cyc=None, mcdpowstd_cvlist_cyc=None, mcd_cvlist_cyc=None, \\\n mcdstd_cvlist_cyc=None, cvlist_cyc_dv=None, mcdpow_cvlist_cyc_dv=None, mcdpowstd_cvlist_cyc_dv=None, \\\n mcd_cvlist_cyc_dv=None, mcdstd_cvlist_cyc_dv=None):\n with torch.cuda.device(gpu):\n # define model and load parameters\n with torch.no_grad():\n model_encoder = GRU_VAE_ENCODER(\n in_dim=config.mcep_dim+config.excit_dim,\n n_spk=n_spk,\n lat_dim=config.lat_dim,\n hidden_layers=config.hidden_layers_enc,\n hidden_units=config.hidden_units_enc,\n kernel_size=config.kernel_size_enc,\n dilation_size=config.dilation_size_enc,\n causal_conv=config.causal_conv_enc,\n bi=False,\n ar=False,\n pad_first=True,\n right_size=config.right_size_enc)\n logging.info(model_encoder)\n model_decoder = GRU_SPEC_DECODER(\n feat_dim=config.lat_dim,\n out_dim=config.mcep_dim,\n n_spk=n_spk,\n hidden_layers=config.hidden_layers_dec,\n hidden_units=config.hidden_units_dec,\n kernel_size=config.kernel_size_dec,\n dilation_size=config.dilation_size_dec,\n causal_conv=config.causal_conv_dec,\n bi=False,\n ar=False,\n pad_first=True,\n right_size=config.right_size_dec)\n logging.info(model_decoder)\n model_encoder.load_state_dict(torch.load(args.model)[\"model_encoder\"])\n model_decoder.load_state_dict(torch.load(args.model)[\"model_decoder\"])\n model_encoder.remove_weight_norm()\n model_decoder.remove_weight_norm()\n model_encoder.cuda()\n model_decoder.cuda()\n model_encoder.eval()\n model_decoder.eval()\n for param in model_encoder.parameters():\n param.requires_grad = False\n for param in model_decoder.parameters():\n param.requires_grad = False\n count = 0\n pad_left = (model_encoder.pad_left + model_decoder.pad_left)*2\n pad_right = (model_encoder.pad_right + model_decoder.pad_right)*2\n outpad_lefts = [None]*3\n outpad_rights = [None]*3\n outpad_lefts[0] = pad_left-model_encoder.pad_left\n outpad_rights[0] = pad_right-model_encoder.pad_right\n outpad_lefts[1] = outpad_lefts[0]-model_decoder.pad_left\n outpad_rights[1] = outpad_rights[0]-model_decoder.pad_right\n outpad_lefts[2] = outpad_lefts[1]-model_encoder.pad_left\n outpad_rights[2] = outpad_rights[1]-model_encoder.pad_right\n for feat_file in feat_list:\n # convert mcep\n logging.info(\"recmcep \" + feat_file)\n\n feat_org = read_hdf5(feat_file, \"/feat_mceplf0cap\")\n logging.info(feat_org.shape)\n mcep = np.array(feat_org[:,-config.mcep_dim:])\n\n with torch.no_grad():\n feat = F.pad(torch.FloatTensor(feat_org).cuda().unsqueeze(0).transpose(1,2), (pad_left,pad_right), \"replicate\").transpose(1,2)\n feat_excit = torch.FloatTensor(feat_org[:,:config.excit_dim]).cuda().unsqueeze(0)\n\n spk_logits, _, lat_src, _ = model_encoder(feat, sampling=False)\n logging.info('input spkpost')\n if outpad_rights[0] > 0:\n logging.info(torch.mean(F.softmax(spk_logits[:,outpad_lefts[0]:-outpad_rights[0]], dim=-1), 1))\n else:\n logging.info(torch.mean(F.softmax(spk_logits[:,outpad_lefts[0]:], dim=-1), 1))\n\n cvmcep_src, _ = model_decoder((torch.ones((1, lat_src.shape[1]))*spk_idx).cuda().long(), lat_src)\n spk_logits, _, lat_rec, _ = model_encoder(torch.cat((F.pad(feat_excit.transpose(1,2), \\\n (outpad_lefts[1],outpad_rights[1]), \"replicate\").transpose(1,2), cvmcep_src), 2), \n sampling=False)\n logging.info('rec spkpost')\n if outpad_rights[2] > 0:\n logging.info(torch.mean(F.softmax(spk_logits[:,outpad_lefts[2]:-outpad_rights[2]], dim=-1), 1))\n else:\n logging.info(torch.mean(F.softmax(spk_logits[:,outpad_lefts[2]:], dim=-1), 1))\n\n cvmcep_cyc, _ = model_decoder((torch.ones((1, lat_rec.shape[1]))*spk_idx).cuda().long(), lat_rec)\n\n if outpad_rights[1] > 0:\n feat_rec = torch.cat((feat_excit, cvmcep_src[:,outpad_lefts[1]:-outpad_rights[1]]), 2)[0].cpu().data.numpy()\n else:\n feat_rec = torch.cat((feat_excit, cvmcep_src[:,outpad_lefts[1]:]), 2)[0].cpu().data.numpy()\n feat_cyc = torch.cat((feat_excit, cvmcep_cyc), 2)[0].cpu().data.numpy()\n\n cvmcep_src = np.array(cvmcep_src[0].cpu().data.numpy(), dtype=np.float64)\n cvmcep_cyc = np.array(cvmcep_cyc[0].cpu().data.numpy(), dtype=np.float64)\n\n logging.info(cvmcep_src.shape)\n logging.info(cvmcep_cyc.shape)\n \n spcidx = read_hdf5(feat_file, \"/spcidx_range\")[0]\n\n _, _, _, mcdpow_arr = dtw.dtw_org_to_trg(np.array(cvmcep_src[np.array(spcidx),:], \\\n dtype=np.float64), np.array(mcep[np.array(spcidx),:], dtype=np.float64))\n _, _, _, mcd_arr = dtw.dtw_org_to_trg(np.array(cvmcep_src[np.array(spcidx),1:], \\\n dtype=np.float64), np.array(mcep[np.array(spcidx),1:], dtype=np.float64))\n mcdpow_mean = np.mean(mcdpow_arr)\n mcdpow_std = np.std(mcdpow_arr)\n mcd_mean = np.mean(mcd_arr)\n mcd_std = np.std(mcd_arr)\n logging.info(\"mcdpow_rec: %.6f dB +- %.6f\" % (mcdpow_mean, mcdpow_std))\n logging.info(\"mcd_rec: %.6f dB +- %.6f\" % (mcd_mean, mcd_std))\n\n _, _, _, mcdpow_arr = dtw.dtw_org_to_trg(np.array(cvmcep_cyc[np.array(spcidx),:], \\\n dtype=np.float64), np.array(mcep[np.array(spcidx),:], dtype=np.float64))\n _, _, _, mcd_arr = dtw.dtw_org_to_trg(np.array(cvmcep_cyc[np.array(spcidx),1:], \\\n dtype=np.float64), np.array(mcep[np.array(spcidx),1:], dtype=np.float64))\n mcdpow_mean_cyc = np.mean(mcdpow_arr)\n mcdpow_std_cyc = np.std(mcdpow_arr)\n mcd_mean_cyc = np.mean(mcd_arr)\n mcd_std_cyc = np.std(mcd_arr)\n logging.info(\"mcdpow_cyc: %.6f dB +- %.6f\" % (mcdpow_mean_cyc, mcdpow_std_cyc))\n logging.info(\"mcd_cyc: %.6f dB +- %.6f\" % (mcd_mean_cyc, mcd_std_cyc))\n \n dataset = feat_file.split('/')[1].split('_')[0]\n if 'tr' in dataset:\n logging.info('trn')\n mcdpow_cvlist.append(mcdpow_mean)\n mcdpowstd_cvlist.append(mcdpow_std)\n mcd_cvlist.append(mcd_mean)\n mcdstd_cvlist.append(mcd_std)\n cvlist.append(np.var(cvmcep_src[:,1:], axis=0))\n logging.info(len(cvlist))\n mcdpow_cvlist_cyc.append(mcdpow_mean_cyc)\n mcdpowstd_cvlist_cyc.append(mcdpow_std_cyc)\n mcd_cvlist_cyc.append(mcd_mean_cyc)\n mcdstd_cvlist_cyc.append(mcd_std_cyc)\n cvlist_cyc.append(np.var(cvmcep_cyc[:,1:], axis=0))\n elif 'dv' in dataset:\n logging.info('dev')\n mcdpow_cvlist_dv.append(mcdpow_mean)\n mcdpowstd_cvlist_dv.append(mcdpow_std)\n mcd_cvlist_dv.append(mcd_mean)\n mcdstd_cvlist_dv.append(mcd_std)\n cvlist_dv.append(np.var(cvmcep_src[:,1:], axis=0))\n logging.info(len(cvlist_dv))\n mcdpow_cvlist_cyc_dv.append(mcdpow_mean_cyc)\n mcdpowstd_cvlist_cyc_dv.append(mcdpow_std_cyc)\n mcd_cvlist_cyc_dv.append(mcd_mean_cyc)\n mcdstd_cvlist_cyc_dv.append(mcd_std_cyc)\n cvlist_cyc_dv.append(np.var(cvmcep_cyc[:,1:], axis=0))\n\n logging.info('write rec to h5')\n outh5dir = os.path.join(os.path.dirname(os.path.dirname(feat_file)), args.spk+\"-\"+args.spk)\n if not os.path.exists(outh5dir):\n os.makedirs(outh5dir)\n feat_file = os.path.join(outh5dir, os.path.basename(feat_file))\n logging.info(feat_file + ' ' + args.string_path)\n logging.info(feat_rec.shape)\n write_hdf5(feat_file, args.string_path, feat_rec)\n\n logging.info('write cyc to h5')\n outh5dir = os.path.join(os.path.dirname(os.path.dirname(feat_file)), args.spk+\"-\"+args.spk+\"-\"+args.spk)\n if not os.path.exists(outh5dir):\n os.makedirs(outh5dir)\n feat_file = os.path.join(outh5dir, os.path.basename(feat_file))\n logging.info(feat_file + ' ' + args.string_path)\n logging.info(feat_cyc.shape)\n write_hdf5(feat_file, args.string_path, feat_cyc)\n\n count += 1\n #if count >= 5:\n #if count >= 3:\n # break\n\n\n # parallel decode training\n with mp.Manager() as manager:\n gpu = 0\n processes = []\n cvlist = manager.list()\n mcd_cvlist = manager.list()\n mcdstd_cvlist = manager.list()\n mcdpow_cvlist = manager.list()\n mcdpowstd_cvlist = manager.list()\n cvlist_dv = manager.list()\n mcd_cvlist_dv = manager.list()\n mcdstd_cvlist_dv = manager.list()\n mcdpow_cvlist_dv = manager.list()\n mcdpowstd_cvlist_dv = manager.list()\n cvlist_cyc = manager.list()\n mcd_cvlist_cyc = manager.list()\n mcdstd_cvlist_cyc = manager.list()\n mcdpow_cvlist_cyc = manager.list()\n mcdpowstd_cvlist_cyc = manager.list()\n cvlist_cyc_dv = manager.list()\n mcd_cvlist_cyc_dv = manager.list()\n mcdstd_cvlist_cyc_dv = manager.list()\n mcdpow_cvlist_cyc_dv = manager.list()\n mcdpowstd_cvlist_cyc_dv = manager.list()\n for i, feat_list in enumerate(feat_lists):\n logging.info(i)\n p = mp.Process(target=gpu_decode, args=(feat_list, gpu, cvlist, mcdpow_cvlist, mcdpowstd_cvlist, \\\n mcd_cvlist, mcdstd_cvlist, cvlist_dv, mcdpow_cvlist_dv, \\\n mcdpowstd_cvlist_dv, mcd_cvlist_dv, mcdstd_cvlist_dv,\\\n cvlist_cyc, mcdpow_cvlist_cyc, mcdpowstd_cvlist_cyc, \\\n mcd_cvlist_cyc, mcdstd_cvlist_cyc, cvlist_cyc_dv, mcdpow_cvlist_cyc_dv, \\\n mcdpowstd_cvlist_cyc_dv, mcd_cvlist_cyc_dv, mcdstd_cvlist_cyc_dv,))\n p.start()\n processes.append(p)\n gpu += 1\n if (i + 1) % args.n_gpus == 0:\n gpu = 0\n # wait for all process\n for p in processes:\n p.join()\n\n # calculate cv_gv statistics\n if len(mcdpow_cvlist) > 0:\n logging.info(\"mcdpow_rec: %.6f dB (+- %.6f) +- %.6f (+- %.6f)\" % (np.mean(np.array(mcdpow_cvlist)), \\\n np.std(np.array(mcdpow_cvlist)),np.mean(np.array(mcdpowstd_cvlist)),\\\n np.std(np.array(mcdpowstd_cvlist))))\n logging.info(\"mcd_rec: %.6f dB (+- %.6f) +- %.6f (+- %.6f)\" % (np.mean(np.array(mcd_cvlist)), \\\n np.std(np.array(mcd_cvlist)),np.mean(np.array(mcdstd_cvlist)),\\\n np.std(np.array(mcdstd_cvlist))))\n cvgv_mean = np.mean(np.array(cvlist), axis=0)\n cvgv_var = np.var(np.array(cvlist), axis=0)\n logging.info(\"%lf +- %lf\" % (np.mean(np.sqrt(np.square(np.log(cvgv_mean)-np.log(gv_mean)))), \\\n np.std(np.sqrt(np.square(np.log(cvgv_mean)-np.log(gv_mean))))))\n logging.info(\"mcdpow_cyc: %.6f dB (+- %.6f) +- %.6f (+- %.6f)\" % (np.mean(np.array(mcdpow_cvlist_cyc)), \\\n np.std(np.array(mcdpow_cvlist_cyc)),np.mean(np.array(mcdpowstd_cvlist_cyc)),\\\n np.std(np.array(mcdpowstd_cvlist_cyc))))\n logging.info(\"mcd_cyc: %.6f dB (+- %.6f) +- %.6f (+- %.6f)\" % (np.mean(np.array(mcd_cvlist_cyc)), \\\n np.std(np.array(mcd_cvlist_cyc)),np.mean(np.array(mcdstd_cvlist_cyc)),\\\n np.std(np.array(mcdstd_cvlist_cyc))))\n cvgv_mean = np.mean(np.array(cvlist_cyc), axis=0)\n cvgv_var = np.var(np.array(cvlist_cyc), axis=0)\n logging.info(\"%lf +- %lf\" % (np.mean(np.sqrt(np.square(np.log(cvgv_mean)-np.log(gv_mean)))), \\\n np.std(np.sqrt(np.square(np.log(cvgv_mean)-np.log(gv_mean))))))\n\n cvgv_mean = np.mean(np.array(np.r_[cvlist,cvlist_cyc]), axis=0)\n cvgv_var = np.var(np.array(np.r_[cvlist,cvlist_cyc]), axis=0)\n logging.info(\"%lf +- %lf\" % (np.mean(np.sqrt(np.square(np.log(cvgv_mean)-np.log(gv_mean)))), \\\n np.std(np.sqrt(np.square(np.log(cvgv_mean)-np.log(gv_mean))))))\n\n string_path = model_name+\"-\"+str(config.n_half_cyc)+\"-\"+str(config.lat_dim)+\"-\"+model_epoch\n logging.info(string_path)\n\n string_mean = \"/recgv_mean_\"+string_path\n string_var = \"/recgv_var_\"+string_path\n write_hdf5(spk_stat, string_mean, cvgv_mean)\n write_hdf5(spk_stat, string_var, cvgv_var)\n\n if len(mcdpow_cvlist_dv) > 0:\n logging.info(\"mcdpow_rec_dv: %.6f dB (+- %.6f) +- %.6f (+- %.6f)\" % (np.mean(np.array(mcdpow_cvlist_dv)), \\\n np.std(np.array(mcdpow_cvlist_dv)),np.mean(np.array(mcdpowstd_cvlist_dv)),\\\n np.std(np.array(mcdpowstd_cvlist_dv))))\n logging.info(\"mcd_rec_dv: %.6f dB (+- %.6f) +- %.6f (+- %.6f)\" % (np.mean(np.array(mcd_cvlist_dv)), \\\n np.std(np.array(mcd_cvlist_dv)),np.mean(np.array(mcdstd_cvlist_dv)),\\\n np.std(np.array(mcdstd_cvlist_dv))))\n cvgv_mean = np.mean(np.array(cvlist_dv), axis=0)\n cvgv_var = np.var(np.array(cvlist_dv), axis=0)\n logging.info(\"%lf +- %lf\" % (np.mean(np.sqrt(np.square(np.log(cvgv_mean)-np.log(gv_mean)))), \\\n np.std(np.sqrt(np.square(np.log(cvgv_mean)-np.log(gv_mean))))))\n logging.info(\"mcdpow_cyc_dv: %.6f dB (+- %.6f) +- %.6f (+- %.6f)\" % (np.mean(np.array(mcdpow_cvlist_cyc_dv)), \\\n np.std(np.array(mcdpow_cvlist_cyc_dv)),np.mean(np.array(mcdpowstd_cvlist_cyc_dv)),\\\n np.std(np.array(mcdpowstd_cvlist_cyc_dv))))\n logging.info(\"mcd_cyc_dv: %.6f dB (+- %.6f) +- %.6f (+- %.6f)\" % (np.mean(np.array(mcd_cvlist_cyc_dv)), \\\n np.std(np.array(mcd_cvlist_cyc_dv)),np.mean(np.array(mcdstd_cvlist_cyc_dv)),\\\n np.std(np.array(mcdstd_cvlist_cyc_dv))))\n cvgv_mean = np.mean(np.array(cvlist_cyc_dv), axis=0)\n cvgv_var = np.var(np.array(cvlist_cyc_dv), axis=0)\n logging.info(\"%lf +- %lf\" % (np.mean(np.sqrt(np.square(np.log(cvgv_mean)-np.log(gv_mean)))), \\\n np.std(np.sqrt(np.square(np.log(cvgv_mean)-np.log(gv_mean))))))\n\n\nif __name__ == \"__main__\":\n main()\n"
] |
[
[
"torch.nn.functional.softmax",
"numpy.log",
"torch.ones",
"torch.multiprocessing.Manager",
"torch.load",
"torch.cat",
"numpy.var",
"numpy.std",
"torch.no_grad",
"numpy.mean",
"torch.FloatTensor",
"torch.cuda.device",
"numpy.array",
"numpy.array_split",
"torch.multiprocessing.Process"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
hyx100e/athena-1
|
[
"2ff02d5f54070563c6a600199ae9e8d3ca3c66dd"
] |
[
"athena/models/speech_transformer.py"
] |
[
"# coding=utf-8\n# Copyright (C) 2019 ATHENA AUTHORS; Xiangang Li; Dongwei Jiang; Xiaoning Lei\n# All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n# Only support eager mode\n# pylint: disable=no-member, invalid-name, relative-beyond-top-level\n# pylint: disable=too-many-locals, too-many-statements, too-many-arguments, too-many-instance-attributes\n\n\"\"\" speech transformer implementation\"\"\"\n\nfrom absl import logging\nimport tensorflow as tf\nfrom .base import BaseModel\nfrom ..loss import Seq2SeqSparseCategoricalCrossentropy\nfrom ..metrics import Seq2SeqSparseCategoricalAccuracy\nfrom ..utils.misc import generate_square_subsequent_mask, insert_sos_in_labels\nfrom ..layers.commons import PositionalEncoding\nfrom ..layers.transformer import Transformer\nfrom ..utils.hparam import register_and_parse_hparams\nfrom ..tools.beam_search import BeamSearchDecoder\nfrom ..tools.lm_scorer import NGramScorer, RNNScorer\n\n\nclass SpeechTransformer(BaseModel):\n \"\"\" Standard implementation of a SpeechTransformer. Model mainly consists of three parts:\n the x_net for input preparation, the y_net for output preparation and the transformer itself\n \"\"\"\n default_config = {\n \"return_encoder_output\": False,\n \"num_filters\": 512,\n \"d_model\": 512,\n \"num_heads\": 8,\n \"num_encoder_layers\": 12,\n \"num_decoder_layers\": 6,\n \"dff\": 1280,\n \"rate\": 0.1,\n \"schedual_sampling_rate\": 0.9,\n \"label_smoothing_rate\": 0.0\n }\n\n def __init__(self, data_descriptions, config=None):\n super().__init__()\n self.hparams = register_and_parse_hparams(self.default_config, config, cls=self.__class__)\n\n self.num_class = data_descriptions.num_class + 1\n self.sos = self.num_class - 1\n self.eos = self.num_class - 1\n ls_rate = self.hparams.label_smoothing_rate\n self.loss_function = Seq2SeqSparseCategoricalCrossentropy(\n num_classes=self.num_class, eos=self.eos, label_smoothing=ls_rate\n )\n self.metric = Seq2SeqSparseCategoricalAccuracy(eos=self.eos, name=\"Accuracy\")\n\n # for the x_net\n num_filters = self.hparams.num_filters\n d_model = self.hparams.d_model\n layers = tf.keras.layers\n input_features = layers.Input(shape=data_descriptions.sample_shape[\"input\"], dtype=tf.float32)\n inner = layers.Conv2D(\n filters=num_filters,\n kernel_size=(3, 3),\n strides=(2, 2),\n padding=\"same\",\n use_bias=False,\n data_format=\"channels_last\",\n )(input_features)\n inner = layers.BatchNormalization()(inner)\n inner = tf.nn.relu6(inner)\n inner = layers.Conv2D(\n filters=num_filters,\n kernel_size=(3, 3),\n strides=(2, 2),\n padding=\"same\",\n use_bias=False,\n data_format=\"channels_last\",\n )(inner)\n inner = layers.BatchNormalization()(inner)\n\n inner = tf.nn.relu6(inner)\n _, _, dim, channels = inner.get_shape().as_list()\n output_dim = dim * channels\n inner = layers.Reshape((-1, output_dim))(inner)\n\n inner = layers.Dense(d_model, activation=tf.nn.relu6)(inner)\n inner = PositionalEncoding(d_model, scale=False)(inner)\n inner = layers.Dropout(self.hparams.rate)(inner) # self.hparams.rate\n self.x_net = tf.keras.Model(inputs=input_features, outputs=inner, name=\"x_net\")\n print(self.x_net.summary())\n\n # y_net for target\n input_labels = layers.Input(shape=data_descriptions.sample_shape[\"output\"], dtype=tf.int32)\n inner = layers.Embedding(self.num_class, d_model)(input_labels)\n inner = PositionalEncoding(d_model, scale=True)(inner)\n inner = layers.Dropout(self.hparams.rate)(inner)\n self.y_net = tf.keras.Model(inputs=input_labels, outputs=inner, name=\"y_net\")\n print(self.y_net.summary())\n\n # transformer layer\n self.transformer = Transformer(\n self.hparams.d_model,\n self.hparams.num_heads,\n self.hparams.num_encoder_layers,\n self.hparams.num_decoder_layers,\n self.hparams.dff,\n self.hparams.rate,\n )\n\n # last layer for output\n self.final_layer = layers.Dense(self.num_class, input_shape=(d_model,))\n\n # some temp function\n self.random_num = tf.random_uniform_initializer(0, 1)\n\n def call(self, samples, training: bool = None):\n x0 = samples[\"input\"]\n y0 = insert_sos_in_labels(samples[\"output\"], self.sos)\n x = self.x_net(x0, training=training)\n y = self.y_net(y0, training=training)\n input_length = self.compute_logit_length(samples)\n input_mask, output_mask = self._create_masks(x, input_length, y0)\n y, encoder_output = self.transformer(\n x,\n y,\n input_mask,\n output_mask,\n input_mask,\n training=training,\n return_encoder_output=True,\n )\n y = self.final_layer(y)\n if self.hparams.return_encoder_output:\n return y, encoder_output\n return y\n\n @staticmethod\n def _create_masks(x, input_length, y):\n r\"\"\" Generate a square mask for the sequence. The masked positions are\n filled with float(1.0). Unmasked positions are filled with float(0.0).\n \"\"\"\n input_mask, output_mask = None, None\n if x is not None:\n input_mask = 1.0 - tf.sequence_mask(\n input_length, tf.shape(x)[1], dtype=tf.float32\n )\n input_mask = input_mask[:, tf.newaxis, tf.newaxis, :]\n input_mask.set_shape([None, None, None, None])\n if y is not None:\n output_mask = tf.cast(tf.math.equal(y, 0), tf.float32)\n output_mask = output_mask[:, tf.newaxis, tf.newaxis, :]\n look_ahead_mask = generate_square_subsequent_mask(tf.shape(y)[1])\n output_mask = tf.maximum(output_mask, look_ahead_mask)\n output_mask.set_shape([None, None, None, None])\n return input_mask, output_mask\n\n def compute_logit_length(self, samples):\n \"\"\" used for get logit length \"\"\"\n input_length = tf.cast(samples[\"input_length\"], tf.float32)\n logit_length = tf.math.ceil(input_length / 2)\n logit_length = tf.math.ceil(logit_length / 2)\n logit_length = tf.cast(logit_length, tf.int32)\n return logit_length\n\n def time_propagate(self, history_logits, history_predictions, step, enc_outputs):\n \"\"\" TODO: doctring\n last_predictions: the predictions of last time_step, [beam_size]\n history_predictions: the predictions of history from 0 to time_step,\n [beam_size, time_steps]\n states: (step)\n \"\"\"\n # merge\n (encoder_output, memory_mask) = enc_outputs\n step = step + 1\n output_mask = generate_square_subsequent_mask(step)\n # propagate 1 step\n logits = self.y_net(tf.transpose(history_predictions.stack()), training=False)\n logits = self.transformer.decoder(\n logits,\n encoder_output,\n tgt_mask=output_mask,\n memory_mask=memory_mask,\n training=False,\n )\n logits = self.final_layer(logits)\n logits = logits[:, -1, :]\n history_logits = history_logits.write(step - 1, logits)\n return logits, history_logits, step\n\n def decode(self, samples, hparams, lm_model=None, return_encoder=False):\n \"\"\" beam search decoding \"\"\"\n x0 = samples[\"input\"]\n batch = tf.shape(x0)[0]\n x = self.x_net(x0, training=False)\n input_length = self.compute_logit_length(samples)\n input_mask, _ = self._create_masks(x, input_length, None)\n encoder_output = self.transformer.encoder(x, input_mask, training=False)\n if return_encoder:\n return encoder_output, input_mask\n # init op\n last_predictions = tf.ones([batch], dtype=tf.int32) * self.sos\n history_predictions = tf.TensorArray(\n tf.int32, size=1, dynamic_size=True, clear_after_read=False\n )\n step = 0\n history_predictions.write(0, last_predictions)\n history_predictions = history_predictions.stack()\n init_cand_states = [history_predictions]\n\n beam_size = 1 if not hparams.beam_search else hparams.beam_size\n beam_search_decoder = BeamSearchDecoder(\n self.num_class, self.sos, self.eos, beam_size=beam_size\n )\n beam_search_decoder.build(self.time_propagate)\n if hparams.lm_weight != 0:\n if hparams.lm_path is None:\n raise ValueError(\"lm path should not be none\")\n if hparams.lm_type == \"ngram\":\n lm_scorer = NGramScorer(\n hparams.lm_path,\n self.sos,\n self.eos,\n self.num_class,\n lm_weight=hparams.lm_weight,\n )\n elif hparams.lm_type == \"rnn\":\n lm_scorer = RNNScorer(\n lm_model,\n lm_weight=hparams.lm_weight)\n beam_search_decoder.add_scorer(lm_scorer)\n predictions = beam_search_decoder(\n history_predictions, init_cand_states, step, (encoder_output, input_mask)\n )\n return predictions\n\n def restore_from_pretrained_model(self, pretrained_model, model_type=\"\"):\n if model_type == \"\":\n return\n if model_type == \"mpc\":\n logging.info(\"loading from pretrained mpc model\")\n self.x_net = pretrained_model.x_net\n self.transformer.encoder = pretrained_model.encoder\n elif model_type == \"SpeechTransformer\":\n logging.info(\"loading from pretrained SpeechTransformer model\")\n self.x_net = pretrained_model.x_net\n self.y_net = pretrained_model.y_net\n self.transformer = pretrained_model.transformer\n self.final_layer = pretrained_model.final_layer\n else:\n raise ValueError(\"NOT SUPPORTED\")\n\n\nclass SpeechTransformer2(SpeechTransformer):\n \"\"\" Decoder for SpeechTransformer2 works for two pass schedual sampling\"\"\"\n\n def call(self, samples, training: bool = None):\n x0 = samples[\"input\"]\n y0 = insert_sos_in_labels(samples[\"output\"], self.sos)\n x = self.x_net(x0, training=training)\n y = self.y_net(y0, training=training)\n input_length = self.compute_logit_length(samples)\n input_mask, output_mask = self._create_masks(x, input_length, y0)\n # first pass\n y, encoder_output = self.transformer(\n x,\n y,\n input_mask,\n output_mask,\n input_mask,\n training=training,\n return_encoder_output=True,\n )\n y_pre = self.final_layer(y)\n # second pass\n y = self.mix_target_sequence(y0, y_pre, training)\n y, encoder_output = self.transformer(\n x,\n y,\n input_mask,\n output_mask,\n input_mask,\n training=training,\n return_encoder_output=True,\n )\n y = self.final_layer(y)\n if self.hparams.return_encoder_output:\n return y, encoder_output\n return y\n\n def mix_target_sequence(self, gold_token, predicted_token, training, top_k=5):\n \"\"\" to mix gold token and prediction\n param gold_token: true labels\n param predicted_token: predictions by first pass\n return: mix of the gold_token and predicted_token\n \"\"\"\n mix_result = tf.TensorArray(\n tf.float32, size=1, dynamic_size=True, clear_after_read=False\n )\n for i in tf.range(tf.shape(gold_token)[-1]):\n if self.random_num([1]) > self.hparams.schedual_sampling_rate:# do schedual sampling\n selected_input = predicted_token[:, i, :]\n selected_idx = tf.nn.top_k(selected_input, top_k).indices\n embedding_input = self.y_net.layers[1](selected_idx, training=training)\n embedding_input = tf.reduce_mean(embedding_input, axis=1)\n mix_result = mix_result.write(i, embedding_input)\n else:\n selected_input = tf.reshape(gold_token[:, i], [-1, 1])\n embedding_input = self.y_net.layers[1](selected_input, training=training)\n mix_result = mix_result.write(i, embedding_input[:, 0, :])\n final_input = self.y_net.layers[2](tf.transpose(mix_result.stack(), [1, 0, 2]),\n training=training)\n final_input = self.y_net.layers[3](final_input, training=training)\n return final_input\n"
] |
[
[
"tensorflow.nn.relu6",
"tensorflow.random_uniform_initializer",
"tensorflow.TensorArray",
"tensorflow.maximum",
"tensorflow.shape",
"tensorflow.cast",
"tensorflow.reduce_mean",
"tensorflow.ones",
"tensorflow.keras.Model",
"tensorflow.reshape",
"tensorflow.math.equal",
"tensorflow.nn.top_k",
"tensorflow.math.ceil"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.