repo_name
stringlengths
6
130
hexsha
list
file_path
list
code
list
apis
list
possible_versions
list
tirkarthi/estimator
[ "5d962124f1c2ad5b2886ada53d5c604257b660b6" ]
[ "tensorflow_estimator/python/estimator/canned/timeseries/model_utils.py" ]
[ "# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Helper functions for training and constructing time series Models.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\nimport numpy\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.ops import init_ops\nfrom tensorflow.python.ops import nn_ops\nfrom tensorflow.python.ops import variable_scope\nfrom tensorflow_estimator.python.estimator.canned.timeseries import feature_keys\n\n\n# TODO(agarwal): Remove and replace with functionality from tf.slim\ndef fully_connected(inp,\n inp_size,\n layer_size,\n name,\n activation=tf.nn.relu,\n dtype=tf.dtypes.float32):\n \"\"\"Helper method to create a fully connected hidden layer.\"\"\"\n wt = tf.compat.v1.get_variable(\n name=\"{}_weight\".format(name), shape=[inp_size, layer_size], dtype=dtype)\n bias = tf.compat.v1.get_variable(\n name=\"{}_bias\".format(name),\n shape=[layer_size],\n initializer=tf.compat.v1.initializers.zeros())\n output = tf.compat.v1.nn.xw_plus_b(inp, wt, bias)\n if activation is not None:\n assert callable(activation)\n output = activation(output)\n return output\n\n\ndef canonicalize_times_or_steps_from_output(times, steps,\n previous_model_output):\n \"\"\"Canonicalizes either relative or absolute times, with error checking.\"\"\"\n if steps is not None and times is not None:\n raise ValueError(\"Only one of `steps` and `times` may be specified.\")\n if steps is None and times is None:\n raise ValueError(\"One of `steps` and `times` must be specified.\")\n if times is not None:\n times = numpy.array(times)\n if len(times.shape) != 2:\n times = times[None, ...]\n if (previous_model_output[feature_keys.FilteringResults.TIMES].shape[0] !=\n times.shape[0]):\n raise ValueError(\n (\"`times` must have a batch dimension matching\"\n \" the previous model output (got a batch dimension of {} for `times`\"\n \" and {} for the previous model output).\").format(\n times.shape[0], previous_model_output[\n feature_keys.FilteringResults.TIMES].shape[0]))\n if not (previous_model_output[feature_keys.FilteringResults.TIMES][:, -1] <\n times[:, 0]).all():\n raise ValueError(\"Prediction times must be after the corresponding \"\n \"previous model output.\")\n if steps is not None:\n predict_times = (\n previous_model_output[feature_keys.FilteringResults.TIMES][:, -1:] + 1 +\n numpy.arange(steps)[None, ...])\n else:\n predict_times = times\n return predict_times\n" ]
[ [ "numpy.arange", "tensorflow.compat.v1.initializers.zeros", "numpy.array", "tensorflow.compat.v1.nn.xw_plus_b" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
BGerwe/impedance.py
[ "f727318327bf96280691c51ac5ad00c552fa49d8" ]
[ "impedance/tests/test_fitting.py" ]
[ "from impedance.models.circuits.fitting import buildCircuit, rmse, \\\n extract_circuit_elements\nimport numpy as np\n\n# def test_residuals():\n# pass\n#\n#\n# def test_valid():\n# pass\n#\n#\n\n\ndef test_buildCircuit():\n\n # Test simple Randles circuit with CPE\n circuit = 'R0-p(R1-Wo1,CPE1)'\n params = [.1, .01, 1, 1000, 15, .9]\n frequencies = [1000.0, 5.0, 0.01]\n\n assert buildCircuit(circuit, frequencies, *params,\n constants={})[0].replace(' ', '') == \\\n 's([R([0.1],[1000.0,5.0,0.01]),' + \\\n 'p([s([R([0.01],[1000.0,5.0,0.01]),' + \\\n 'Wo([1.0,1000.0],[1000.0,5.0,0.01])]),' + \\\n 'CPE([15.0,0.9],[1000.0,5.0,0.01])])])'\n\n # Test multiple parallel elements\n circuit = 'R0-p(C1,R1,R2)'\n params = [.1, .01, .2, .3]\n frequencies = [1000.0, 5.0, 0.01]\n\n assert buildCircuit(circuit, frequencies, *params,\n constants={})[0].replace(' ', '') == \\\n 's([R([0.1],[1000.0,5.0,0.01]),' + \\\n 'p([C([0.01],[1000.0,5.0,0.01]),' + \\\n 'R([0.2],[1000.0,5.0,0.01]),' + \\\n 'R([0.3],[1000.0,5.0,0.01])])])'\n\n # Test nested parallel groups\n circuit = 'R0-p(p(R1, C1)-R2, C2)'\n params = [1, 2, 3, 4, 5]\n frequencies = [1000.0, 5.0, 0.01]\n\n assert buildCircuit(circuit, frequencies, *params,\n constants={})[0].replace(' ', '') == \\\n 's([R([1],[1000.0,5.0,0.01]),' + \\\n 'p([s([p([R([2],[1000.0,5.0,0.01]),' + \\\n 'C([3],[1000.0,5.0,0.01])]),' + \\\n 'R([4],[1000.0,5.0,0.01])]),' + \\\n 'C([5],[1000.0,5.0,0.01])])])'\n\n # Test parallel elements at beginning and end\n circuit = 'p(C1,R1)-p(C2,R2)'\n params = [.1, .01, .2, .3]\n frequencies = [1000.0, 5.0, 0.01]\n\n assert buildCircuit(circuit, frequencies, *params,\n constants={})[0].replace(' ', '') == \\\n 's([p([C([0.1],[1000.0,5.0,0.01]),' + \\\n 'R([0.01],[1000.0,5.0,0.01])]),' + \\\n 'p([C([0.2],[1000.0,5.0,0.01]),' + \\\n 'R([0.3],[1000.0,5.0,0.01])])])'\n\n # Test single element circuit\n circuit = 'R1'\n params = [100]\n frequencies = [1000.0, 5.0, 0.01]\n\n assert buildCircuit(circuit, frequencies, *params,\n constants={})[0].replace(' ', '') == \\\n '([R([100],[1000.0,5.0,0.01])])'\n\n\ndef test_RMSE():\n a = np.array([2 + 4*1j, 3 + 2*1j])\n b = np.array([2 + 4*1j, 3 + 2*1j])\n\n assert rmse(a, b) == 0.0\n\n c = np.array([2 + 4*1j, 1 + 4*1j])\n d = np.array([4 + 2*1j, 3 + 2*1j])\n assert np.isclose(rmse(c, d), 2*np.sqrt(2))\n\n\ndef test_element_extraction():\n circuit = 'R0-p(RR0,C1)-p(R1,C2032478)-W1'\n extracted_elements = extract_circuit_elements(circuit)\n assert extracted_elements == ['R0', 'RR0', 'C1', 'R1', 'C2032478', 'W1']\n" ]
[ [ "numpy.array", "numpy.sqrt" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
mattn/xgboost
[ "e526871f0acb124a1b5701653a48662da63fd0be" ]
[ "tests/python/test_with_dask.py" ]
[ "import testing as tm\nimport pytest\nimport xgboost as xgb\nimport sys\nimport numpy as np\n\nif sys.platform.startswith(\"win\"):\n pytest.skip(\"Skipping dask tests on Windows\", allow_module_level=True)\n\npytestmark = pytest.mark.skipif(**tm.no_dask())\n\ntry:\n from distributed.utils_test import client, loop, cluster_fixture\n import dask.dataframe as dd\n import dask.array as da\n from xgboost.dask import DaskDMatrix\nexcept ImportError:\n client = None\n loop = None\n cluster_fixture = None\n pass\n\nkRows = 1000\nkCols = 10\n\n\ndef generate_array():\n partition_size = 20\n X = da.random.random((kRows, kCols), partition_size)\n y = da.random.random(kRows, partition_size)\n return X, y\n\n\ndef test_from_dask_dataframe(client):\n X, y = generate_array()\n\n X = dd.from_dask_array(X)\n y = dd.from_dask_array(y)\n\n dtrain = DaskDMatrix(client, X, y)\n booster = xgb.dask.train(\n client, {}, dtrain, num_boost_round=2)['booster']\n\n prediction = xgb.dask.predict(client, model=booster, data=dtrain)\n\n assert prediction.ndim == 1\n assert isinstance(prediction, da.Array)\n assert prediction.shape[0] == kRows\n\n with pytest.raises(ValueError):\n # evals_result is not supported in dask interface.\n xgb.dask.train(\n client, {}, dtrain, num_boost_round=2, evals_result={})\n\n prediction = prediction.compute() # force prediction to be computed\n\n\ndef test_from_dask_array(client):\n X, y = generate_array()\n dtrain = DaskDMatrix(client, X, y)\n # results is {'booster': Booster, 'history': {...}}\n result = xgb.dask.train(client, {}, dtrain)\n\n prediction = xgb.dask.predict(client, result, dtrain)\n assert prediction.shape[0] == kRows\n\n assert isinstance(prediction, da.Array)\n\n prediction = prediction.compute() # force prediction to be computed\n\n\ndef test_regressor(client):\n X, y = generate_array()\n regressor = xgb.dask.DaskXGBRegressor(verbosity=1, n_estimators=2)\n regressor.set_params(tree_method='hist')\n regressor.client = client\n regressor.fit(X, y, eval_set=[(X, y)])\n prediction = regressor.predict(X)\n\n assert prediction.ndim == 1\n assert prediction.shape[0] == kRows\n\n history = regressor.evals_result()\n\n assert isinstance(prediction, da.Array)\n assert isinstance(history, dict)\n\n assert list(history['validation_0'].keys())[0] == 'rmse'\n assert len(history['validation_0']['rmse']) == 2\n\n\ndef test_classifier(client):\n X, y = generate_array()\n y = (y * 10).astype(np.int32)\n classifier = xgb.dask.DaskXGBClassifier(verbosity=1, n_estimators=2)\n classifier.client = client\n classifier.fit(X, y, eval_set=[(X, y)])\n prediction = classifier.predict(X)\n\n assert prediction.ndim == 1\n assert prediction.shape[0] == kRows\n\n history = classifier.evals_result()\n\n assert isinstance(prediction, da.Array)\n assert isinstance(history, dict)\n\n assert list(history.keys())[0] == 'validation_0'\n assert list(history['validation_0'].keys())[0] == 'merror'\n assert len(list(history['validation_0'])) == 1\n assert len(history['validation_0']['merror']) == 2\n\n assert classifier.n_classes_ == 10\n\n # Test with dataframe.\n X_d = dd.from_dask_array(X)\n y_d = dd.from_dask_array(y)\n classifier.fit(X_d, y_d)\n\n assert classifier.n_classes_ == 10\n prediction = classifier.predict(X_d)\n\n assert prediction.ndim == 1\n assert prediction.shape[0] == kRows\n\n\ndef run_empty_dmatrix(client, parameters):\n\n def _check_outputs(out, predictions):\n assert isinstance(out['booster'], xgb.dask.Booster)\n assert len(out['history']['validation']['rmse']) == 2\n assert isinstance(predictions, np.ndarray)\n assert predictions.shape[0] == 1\n\n kRows, kCols = 1, 97\n X = dd.from_array(np.random.randn(kRows, kCols))\n y = dd.from_array(np.random.rand(kRows))\n dtrain = xgb.dask.DaskDMatrix(client, X, y)\n\n out = xgb.dask.train(client, parameters,\n dtrain=dtrain,\n evals=[(dtrain, 'validation')],\n num_boost_round=2)\n predictions = xgb.dask.predict(client=client, model=out,\n data=dtrain).compute()\n _check_outputs(out, predictions)\n\n # train has more rows than evals\n valid = dtrain\n kRows += 1\n X = dd.from_array(np.random.randn(kRows, kCols))\n y = dd.from_array(np.random.rand(kRows))\n dtrain = xgb.dask.DaskDMatrix(client, X, y)\n\n out = xgb.dask.train(client, parameters,\n dtrain=dtrain,\n evals=[(valid, 'validation')],\n num_boost_round=2)\n predictions = xgb.dask.predict(client=client, model=out,\n data=valid).compute()\n _check_outputs(out, predictions)\n\n\n# No test for Exact, as empty DMatrix handling are mostly for distributed\n# environment and Exact doesn't support it.\n\ndef test_empty_dmatrix_hist(client):\n parameters = {'tree_method': 'hist'}\n run_empty_dmatrix(client, parameters)\n\n\ndef test_empty_dmatrix_approx(client):\n parameters = {'tree_method': 'approx'}\n run_empty_dmatrix(client, parameters)\n" ]
[ [ "numpy.random.randn", "numpy.random.rand" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
frostburn/image-source-reverb
[ "84c467249fb5fb1e978009510889a0447a625cda" ]
[ "demos/triangular_drum.py" ]
[ "from pylab import *\nfrom matplotlib.animation import FuncAnimation\nimport matplotlib.pyplot as plt\nfrom image_source_reverb.shapes import equilateral_triangle\n\n\n# TODO: Improve\ndef gaussian_point_source_2D(t, r, tightness=50):\n \"\"\"\n A somewhat close approximation to the 2D Wave Equation with gaussian initial conditions\n \"\"\"\n u = exp(-(tightness * (t-r))**2)\n u -= exp(-(tightness * (t-r)-0.5)**2)*0.66\n return u * (r + 0.5)**-0.5\n\nx = linspace(-2.2, 2.2, 256)\nx, y = meshgrid(x, x)\n\ninside = vectorize(lambda x, y: equilateral_triangle.contains([x, y]))(x, y)\n\n\nsource_x = 1.0\nsource_y = 0.2\n\nreflections = equilateral_triangle.reflections([source_x, source_y], 8)\n\nprint(\"Calculating using {} reflected images...\".format(len(reflections)))\n\nu = 0*x\nfig, ax = plt.subplots()\nplots = [ax.imshow(u * inside, vmin=-0.5, vmax=0.5)]\ndef update(t):\n u = 0*x\n for image_source, sign in reflections:\n if t - 2 < norm(image_source) < t + 2:\n r = sqrt((x-image_source[0])**2 + (y-image_source[1])**2)\n u += gaussian_point_source_2D(t, r) * sign\n plots[0].set_data(u * inside)\n return plots\n\nani = FuncAnimation(fig, update, frames=np.linspace(0, 10, 256),\n init_func=lambda: plots, blit=True, repeat=False, interval=50)\nplt.show()\n" ]
[ [ "matplotlib.pyplot.show", "matplotlib.pyplot.subplots" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
SohSalari/Stone-Soup
[ "39c7f02ce11e10c9b3c612ad359f6d8bca495266", "e073d4145df18facad43e4ab7b0a34e8d65f67ee" ]
[ "stonesoup/sensor/tests/test_passive.py", "stonesoup/platform/tests/test_platform_simple.py" ]
[ "# -*- coding: utf-8 -*-\nimport datetime\n\nimport numpy as np\n\nfrom ...functions import cart2angles, rotx, roty, rotz\nfrom ...types.array import StateVector, CovarianceMatrix\nfrom ...types.state import State\nfrom ..passive import PassiveElevationBearing\n\n\ndef test_passive_sensor():\n # Input arguments\n # TODO: pytest parametarization\n noise_covar = CovarianceMatrix([[np.deg2rad(0.015), 0],\n [0, np.deg2rad(0.1)]])\n detector_position = StateVector([1, 1, 0])\n detector_orientation = StateVector([0, 0, 0])\n target_state = State(detector_position +\n np.array([[1], [1], [0]]),\n timestamp=datetime.datetime.now())\n measurement_mapping = np.array([0, 1, 2])\n\n # Create a radar object\n detector = PassiveElevationBearing(\n position=detector_position,\n orientation=detector_orientation,\n ndim_state=3,\n mapping=measurement_mapping,\n noise_covar=noise_covar)\n\n # Assert that the object has been correctly initialised\n assert (np.equal(detector.position, detector_position).all())\n\n # Generate a noiseless measurement for the given target\n measurement = detector.measure(target_state, noise=False)\n\n # Account\n xyz = target_state.state_vector - detector_position\n\n # Calculate rotation matrix\n theta_x = -detector_orientation[0, 0]\n theta_y = -detector_orientation[1, 0]\n theta_z = -detector_orientation[2, 0]\n rot_mat = rotz(theta_z) @ roty(theta_y) @ rotx(theta_x)\n\n # Rotate coordinates\n xyz_rot = rot_mat @ xyz\n\n # Convert to Angles\n phi, theta = cart2angles(*xyz_rot)\n\n # Assert correction of generated measurement\n assert (measurement.timestamp == target_state.timestamp)\n assert (np.equal(measurement.state_vector,\n StateVector([theta, phi])).all())\n", "# coding: utf-8\nimport datetime\n\nimport numpy as np\nimport pytest\n\nfrom ..tests import test_platform_base\nfrom ...types.state import State\nfrom ..base import MovingPlatform, FixedPlatform\nfrom ...models.transition.linear import (\n ConstantVelocity, CombinedLinearGaussianTransitionModel)\nfrom ...sensor.radar.radar import RadarRangeBearing\nfrom ...types.array import StateVector, CovarianceMatrix\n\n\ndef get_3d_expected(i):\n if i == 0:\n # static platform or X velocity\n return np.array([[0, 0, 0], [1, 0, 0], [0, 1, 0], [-1, 0, 0],\n [0, -1, 0], [0, 0, 1], [0, 0, -1]])\n elif i == 1:\n # y-axis motion\n return np.array([[0, 0, 0], [0, 1, 0], [-1, 0, 0], [0, -1, 0],\n [1, 0, 0], [0, 0, 1], [0, 0, -1]])\n elif i == 2:\n # negative x-axis motion\n return np.array([[0, 0, 0], [-1, 0, 0], [0, -1, 0], [1, 0, 0],\n [0, 1, 0], [0, 0, 1], [0, 0, -1]])\n elif i == 3:\n # negative y-axis motion\n return np.array([[0, 0, 0], [0, -1, 0], [1, 0, 0], [0, 1, 0],\n [-1, 0, 0], [0, 0, 1], [0, 0, -1]])\n elif i == 4:\n # x-y motion\n return np.array([[0, 0, 0], [1/np.sqrt(2), 1/np.sqrt(2), 0],\n [-1/np.sqrt(2), 1/np.sqrt(2), 0],\n [-1/np.sqrt(2), -1/np.sqrt(2), 0],\n [1/np.sqrt(2), -1/np.sqrt(2), 0], [0, 0, 1],\n [0, 0, -1]])\n elif i == 5:\n # neg x- neg y motion\n return np.array([[0, 0, 0], [-1/np.sqrt(2), -1/np.sqrt(2), 0],\n [1/np.sqrt(2), -1/np.sqrt(2), 0],\n [1/np.sqrt(2), 1/np.sqrt(2), 0],\n [-1/np.sqrt(2), 1/np.sqrt(2), 0], [0, 0, 1],\n [0, 0, -1]])\n elif i == 6:\n # pos x- neg y motion\n return np.array([[0, 0, 0], [1/np.sqrt(2), -1/np.sqrt(2), 0],\n [1/np.sqrt(2), 1/np.sqrt(2), 0],\n [-1/np.sqrt(2), 1/np.sqrt(2), 0],\n [-1/np.sqrt(2), -1/np.sqrt(2), 0], [0, 0, 1],\n [0, 0, -1]])\n elif i == 7:\n # neg x- pos y motion\n return np.array([[0, 0, 0], [-1/np.sqrt(2), 1/np.sqrt(2), 0],\n [-1/np.sqrt(2), -1/np.sqrt(2), 0],\n [1/np.sqrt(2), -1/np.sqrt(2), 0],\n [1/np.sqrt(2), 1/np.sqrt(2), 0], [0, 0, 1],\n [0, 0, -1]])\n elif i == 8:\n # \"z vel\"\n return np.array([[0, 0, 0], [0, 0, 1], [0, 1, 0], [0, 0, -1],\n [0, -1, 0], [-1, 0, 0], [1, 0, 0]])\n elif i == 9:\n # \"-z vel\"\n return np.array([[0, 0, 0], [0, 0, -1], [0, 1, 0], [0, 0, 1],\n [0, -1, 0], [1, 0, 0], [-1, 0, 0]])\n elif i == 10:\n # \"y.z vel\"\n return np.array([[0, 0, 0], [0, 1/np.sqrt(2), 1/np.sqrt(2)],\n [-1, 0, 0], [0, -1/np.sqrt(2), -1/np.sqrt(2)],\n [1, 0, 0], [0, -1/np.sqrt(2), 1/np.sqrt(2)],\n [0, 1/np.sqrt(2), -1/np.sqrt(2)]])\n elif i == 11:\n # \"y.-z vel\"\n return np.array([[0, 0, 0], [0, 1/np.sqrt(2), -1/np.sqrt(2)],\n [-1, 0, 0], [0, -1/np.sqrt(2), 1/np.sqrt(2)],\n [1, 0, 0], [0, 1/np.sqrt(2), 1/np.sqrt(2)],\n [0, -1/np.sqrt(2), -1/np.sqrt(2)]])\n elif i == 12:\n # \"-y.z vel\"\n return np.array([[0, 0, 0], [0, -1/np.sqrt(2), 1/np.sqrt(2)],\n [1, 0, 0], [0, 1/np.sqrt(2), -1/np.sqrt(2)],\n [-1, 0, 0], [0, 1/np.sqrt(2), 1/np.sqrt(2)],\n [0, -1/np.sqrt(2), -1/np.sqrt(2)]])\n elif i == 13:\n # \"-y.-z vel\"\n return np.array([[0, 0, 0], [0, -1/np.sqrt(2), -1/np.sqrt(2)],\n [1, 0, 0], [0, 1/np.sqrt(2), 1/np.sqrt(2)],\n [-1, 0, 0], [0, -1/np.sqrt(2), 1/np.sqrt(2)],\n [0, 1/np.sqrt(2), -1/np.sqrt(2)]])\n elif i == 14:\n # x.z vel\n return np.array([[0, 0, 0], [1/np.sqrt(2), 0, 1/np.sqrt(2)],\n [0, 1, 0], [-1/np.sqrt(2), 0, -1/np.sqrt(2)],\n [0, -1, 0], [-1/np.sqrt(2), 0, 1/np.sqrt(2)],\n [1/np.sqrt(2), 0, -1/np.sqrt(2)]])\n elif i == 15:\n # -x.z vel\n return np.array([[0, 0, 0], [-1/np.sqrt(2), 0, 1/np.sqrt(2)],\n [0, -1, 0], [1/np.sqrt(2), 0, -1/np.sqrt(2)],\n [0, 1, 0], [1/np.sqrt(2), 0, 1/np.sqrt(2)],\n [-1/np.sqrt(2), 0, -1/np.sqrt(2)]])\n elif i == 16:\n # x.-z vel\n return np.array([[0, 0, 0], [1/np.sqrt(2), 0, -1/np.sqrt(2)],\n [0, 1, 0], [-1/np.sqrt(2), 0, 1/np.sqrt(2)],\n [0, -1, 0], [1/np.sqrt(2), 0, 1/np.sqrt(2)],\n [-1/np.sqrt(2), 0, -1/np.sqrt(2)]])\n elif i == 17:\n # -x,-z vel\n return np.array([[0, 0, 0], [-1/np.sqrt(2), 0, -1/np.sqrt(2)],\n [0, -1, 0], [1/np.sqrt(2), 0, 1/np.sqrt(2)],\n [0, 1, 0], [-1/np.sqrt(2), 0, 1/np.sqrt(2)],\n [1/np.sqrt(2), 0, -1/np.sqrt(2)]])\n elif i == 18:\n # x.y.z vel\n a = np.cos(np.arctan2(1, np.sqrt(2)) * -1)\n b = np.sin(np.arctan2(1, np.sqrt(2)) * -1) / np.sqrt(2)\n return np.array([[0, 0, 0], [1/np.sqrt(3), 1/np.sqrt(3), 1/np.sqrt(3)],\n [-1/np.sqrt(2), 1/np.sqrt(2), 0],\n [-1/np.sqrt(3), -1/np.sqrt(3), -1/np.sqrt(3)],\n [1/np.sqrt(2), -1/np.sqrt(2), 0],\n [b, b, a], [-b, -b, -a]])\n elif i == 19:\n # -x.-y.-z vel\n a = np.cos(np.arctan2(-1, np.sqrt(2)) * -1)\n b = np.sin(np.arctan2(-1, np.sqrt(2)) * -1) / np.sqrt(2)\n return np.array([[0, 0, 0],\n [-1/np.sqrt(3), -1/np.sqrt(3), -1/np.sqrt(3)],\n [1/np.sqrt(2), -1/np.sqrt(2), 0],\n [1/np.sqrt(3), 1/np.sqrt(3), 1/np.sqrt(3)],\n [-1/np.sqrt(2), 1/np.sqrt(2), 0],\n [-b, -b, a], [b, b, -a]])\n\n\[email protected]\ndef radars_2d():\n # Generate 5 radar models for testing purposes\n noise_covar = CovarianceMatrix(np.array([[0.015, 0],\n [0, 0.1]]))\n\n measurement_mapping = np.array([0, 2])\n\n # Create 5 simple radar sensor objects\n radar1 = RadarRangeBearing(\n ndim_state=4,\n position_mapping=measurement_mapping,\n noise_covar=noise_covar,\n )\n\n radar2 = RadarRangeBearing(\n ndim_state=4,\n position_mapping=measurement_mapping,\n noise_covar=noise_covar\n )\n radar3 = RadarRangeBearing(\n ndim_state=4,\n position_mapping=measurement_mapping,\n noise_covar=noise_covar\n )\n\n radar4 = RadarRangeBearing(\n ndim_state=4,\n position_mapping=measurement_mapping,\n noise_covar=noise_covar\n )\n radar5 = RadarRangeBearing(\n ndim_state=4,\n position_mapping=measurement_mapping,\n noise_covar=noise_covar\n )\n\n return [radar1, radar2, radar3, radar4, radar5]\n\n\[email protected]\ndef radars_3d():\n # Generate 7 radar models for testing purposes\n noise_covar = CovarianceMatrix(np.array([[0.015, 0],\n [0, 0.1]]))\n\n measurement_mapping = np.array([0, 2, 4])\n\n # Create 5 simple radar sensor objects\n radar1 = RadarRangeBearing(\n ndim_state=6,\n position_mapping=measurement_mapping,\n noise_covar=noise_covar\n )\n\n radar2 = RadarRangeBearing(\n ndim_state=6,\n position_mapping=measurement_mapping,\n noise_covar=noise_covar\n )\n radar3 = RadarRangeBearing(\n ndim_state=6,\n position_mapping=measurement_mapping,\n noise_covar=noise_covar\n )\n\n radar4 = RadarRangeBearing(\n ndim_state=6,\n position_mapping=measurement_mapping,\n noise_covar=noise_covar\n )\n radar5 = RadarRangeBearing(\n ndim_state=6,\n position_mapping=measurement_mapping,\n noise_covar=noise_covar\n )\n radar6 = RadarRangeBearing(\n ndim_state=6,\n position_mapping=measurement_mapping,\n noise_covar=noise_covar\n )\n radar7 = RadarRangeBearing(\n ndim_state=6,\n position_mapping=measurement_mapping,\n noise_covar=noise_covar\n )\n return [radar1, radar2, radar3, radar4, radar5, radar6, radar7]\n\n\[email protected](scope='session')\ndef mounting_offsets_2d():\n # Generate sensor mounting offsets for testing purposes\n offsets = [[0, 0],\n [1, 0],\n [0, 1],\n [-1, 0],\n [0, -1]]\n return [StateVector(offset) for offset in offsets]\n\n\[email protected](scope='session')\ndef mounting_offsets_3d():\n # Generate sensor mounting offsets for testing purposes\n offsets = [[0, 0, 0],\n [1, 0, 0],\n [0, 1, 0],\n [-1, 0, 0],\n [0, -1, 0],\n [0, 0, 1],\n [0, 0, -1]]\n return [StateVector(offset) for offset in offsets]\n\n\[email protected](params=[MovingPlatform, FixedPlatform],\n ids=['MovingPlatform', 'FixedPlatform'])\ndef platform_type(request):\n return request.param\n\n\[email protected](params=[True, False], ids=[\"Moving\", \"Static\"])\ndef move(request):\n return request.param\n\n\[email protected](params=[True, False], ids=[\"Add\", \"Initialise\"])\ndef add_sensor(request):\n return request.param\n\n\ntestdata_2d = [\n StateVector([0, 0, 0, 0]),\n StateVector([10, 0, 0, 0]),\n StateVector([0, 1, 0, 0]),\n StateVector([0, 0, 0, 1]),\n StateVector([0, -1, 0, 0]),\n StateVector([0, 0, 0, -1]),\n StateVector([0, 1, 0, 1]),\n StateVector([0, -1, 0, -1]),\n StateVector([0, 1, 0, -1]),\n StateVector([0, -1, 0, 1])\n]\n\nexpected_2d = [\n # static platform or X velocity\n np.array([[0, 0], [1, 0], [0, 1], [-1, 0], [0, -1]]),\n # static platform or X velocity\n np.array([[0, 0], [1, 0], [0, 1], [-1, 0], [0, -1]]),\n # static platform or X velocity\n np.array([[0, 0], [1, 0], [0, 1], [-1, 0], [0, -1]]),\n # y-axis motion\n np.array([[0, 0], [0, 1], [-1, 0], [0, -1], [1, 0]]),\n # negative x-axis motion\n np.array([[0, 0], [-1, 0], [0, -1], [1, 0], [0, 1]]),\n # negative y-axis motion\n np.array([[0, 0], [0, -1], [1, 0], [0, 1], [-1, 0]]),\n # x-y motion\n np.array([[0, 0], [1/np.sqrt(2), 1/np.sqrt(2)],\n [-1/np.sqrt(2), 1/np.sqrt(2)], [-1/np.sqrt(2), -1/np.sqrt(2)],\n [1/np.sqrt(2), -1/np.sqrt(2)]]),\n # neg x- neg y motion\n np.array([[0, 0], [-1/np.sqrt(2), -1/np.sqrt(2)],\n [1/np.sqrt(2), -1/np.sqrt(2)], [1/np.sqrt(2), 1/np.sqrt(2)],\n [-1/np.sqrt(2), 1/np.sqrt(2)]]),\n # pos x- neg y motion\n np.array([[0, 0], [1/np.sqrt(2), -1/np.sqrt(2)],\n [1/np.sqrt(2), 1/np.sqrt(2)], [-1/np.sqrt(2), 1/np.sqrt(2)],\n [-1/np.sqrt(2), -1/np.sqrt(2)]]),\n # neg x- pos y motion\n np.array([[0, 0], [-1/np.sqrt(2), 1/np.sqrt(2)],\n [-1/np.sqrt(2), -1/np.sqrt(2)], [1/np.sqrt(2), -1/np.sqrt(2)],\n [1/np.sqrt(2), 1/np.sqrt(2)]])\n]\n\n\[email protected](\n 'state, expected', zip(testdata_2d, expected_2d),\n ids=[\"Static\", \"pos offset\", \"x vel\", \"y vel\", \"-x vel\", \"-y vel\",\n \"x,y vel\", \"-x,-y vel\", \"x,-y vel\", \"-x,y vel\"])\ndef test_2d_platform(state, expected, move, radars_2d,\n mounting_offsets_2d, add_sensor):\n # Define time related variables\n timestamp = datetime.datetime.now()\n # Define transition model and position for platform\n model_1d = ConstantVelocity(0.0) # zero noise so pure movement\n trans_model = CombinedLinearGaussianTransitionModel(\n [model_1d] * (radars_2d[0].ndim_state // 2))\n platform_state = State(state, timestamp)\n\n # This defines the position_mapping to the platforms state vector (i.e. x and y)\n mounting_mapping = np.array([0, 2])\n # create a platform with the simple radar mounted\n if add_sensor:\n platform = MovingPlatform(\n states=platform_state,\n transition_model=trans_model,\n sensors=[],\n mounting_offsets=[],\n position_mapping=mounting_mapping\n )\n for sensor, offset in zip(radars_2d, mounting_offsets_2d):\n platform.add_sensor(sensor, offset)\n else:\n platform = MovingPlatform(\n states=platform_state,\n transition_model=trans_model,\n sensors=radars_2d,\n mounting_offsets=mounting_offsets_2d,\n position_mapping=mounting_mapping\n )\n if move:\n # Move the platform\n platform.move(timestamp + datetime.timedelta(seconds=2))\n sensor_positions_test(expected, platform)\n\n\ntestdata_3d = [\n (StateVector([0, 0, 0, 0, 0, 0]), get_3d_expected(0)),\n (StateVector([10, 0, 0, 0, 0, 0]), get_3d_expected(0)),\n (StateVector([0, 1, 0, 0, 0, 0]), get_3d_expected(0)),\n (StateVector([0, 0, 0, 1, 0, 0]), get_3d_expected(1)),\n (StateVector([0, -1, 0, 0, 0, 0]), get_3d_expected(2)),\n (StateVector([0, 0, 0, -1, 0, 0]), get_3d_expected(3)),\n (StateVector([0, 1, 0, 1, 0, 0]), get_3d_expected(4)),\n (StateVector([0, -1, 0, -1, 0, 0]), get_3d_expected(5)),\n (StateVector([0, 1, 0, -1, 0, 0]), get_3d_expected(6)),\n (StateVector([0, -1, 0, 1, 0, 0]), get_3d_expected(7)),\n (StateVector([0, 0, 0, 0, 0, 1]), get_3d_expected(8)),\n (StateVector([0, 0, 0, 0, 0, -1]), get_3d_expected(9)),\n (StateVector([0, 0, 0, 1, 0, 1]), get_3d_expected(10)),\n (StateVector([0, 0, 0, 1, 0, -1]), get_3d_expected(11)),\n (StateVector([0, 0, 0, -1, 0, 1]), get_3d_expected(12)),\n (StateVector([0, 0, 0, -1, 0, -1]), get_3d_expected(13)),\n (StateVector([0, 1, 0, 0, 0, 1]), get_3d_expected(14)),\n (StateVector([0, -1, 0, 0, 0, 1]), get_3d_expected(15)),\n (StateVector([0, 1, 0, 0, 0, -1]), get_3d_expected(16)),\n (StateVector([0, -1, 0, 0, 0, -1]), get_3d_expected(17)),\n (StateVector([0, 1, 0, 1, 0, 1]), get_3d_expected(18)),\n (StateVector([0, -1, 0, -1, 0, -1]), get_3d_expected(19))\n]\n\n\[email protected]('state, expected', testdata_3d, ids=[\n \"Static\", \"pos offset\", \"x vel\", \"y vel\", \"-x vel\", \"-y vel\", \"x,y vel\",\n \"-x,-y vel\", \"x,-y vel\", \"-x,y vel\", \"z vel\", \"-z vel\", \"y.z vel\",\n \"y.-z vel\", \"-y.z vel\", \"-y.-z vel\", \"x.z vel\", \"-x.z vel\", \"x.-z vel\",\n \"-x,-z vel\", \"x,y,z vel\", \"-x,-y,-z vel\"\n])\ndef test_3d_platform(state, expected, move, radars_3d, mounting_offsets_3d,\n add_sensor):\n # Define time related variables\n timestamp = datetime.datetime.now()\n # Define transition model and position for platform\n model_1d = ConstantVelocity(0.0) # zero noise so pure movement\n trans_model = CombinedLinearGaussianTransitionModel(\n [model_1d] * (radars_3d[0].ndim_state // 2))\n platform_state = State(state, timestamp)\n\n # This defines the position_mapping to the platforms state vector (i.e. x and y)\n mounting_mapping = np.array([0, 2, 4])\n # create a platform with the simple radar mounted\n if add_sensor:\n platform = MovingPlatform(\n states=platform_state,\n transition_model=trans_model,\n sensors=[],\n mounting_offsets=[],\n position_mapping=mounting_mapping\n )\n for sensor, offset in zip(radars_3d, mounting_offsets_3d):\n platform.add_sensor(sensor, offset)\n else:\n platform = MovingPlatform(\n states=platform_state,\n transition_model=trans_model,\n sensors=radars_3d,\n mounting_offsets=mounting_offsets_3d,\n position_mapping=mounting_mapping\n )\n if move:\n # Move the platform\n platform.move(timestamp + datetime.timedelta(seconds=2))\n sensor_positions_test(expected, platform)\n\n\[email protected](scope='session')\ndef rotation_offsets_2d():\n # Generate sensor mounting offsets for testing purposes\n offsets = [[0, 0, 0],\n [0, 0, np.pi / 4],\n [0, 0, -np.pi / 4],\n [0, 0, np.pi / 2],\n [0, 0, -np.pi / 2]]\n return [StateVector(offset) for offset in offsets]\n\n\[email protected](scope='session')\ndef rotation_offsets_3d():\n # Generate sensor rotation offsets for testing purposes\n offsets = [[0, 0, 0],\n [np.pi / 4, 0, 0],\n [0, np.pi / 4, 0],\n [-np.pi / 4, 0, 0],\n [0, -np.pi / 4, 0],\n [0, 0, np.pi / 4],\n [0, 0, -np.pi / 4]]\n return [StateVector(offset) for offset in offsets]\n\n\ndef expected_orientations_3d():\n pi = np.pi\n offset_3d_movement = np.arctan(1/np.sqrt(2))\n\n return [\n np.array([[0., 0., 0.], [pi/4, 0., 0.], [0., pi/4, 0.], [-pi/4, 0., 0.],\n [0., -pi/4, 0.], [0., 0., pi/4], [0., 0., -pi/4]]),\n np.array([[0., 0., pi/2], [pi/4, 0., pi/2], [0., pi/4, pi/2], [-pi/4, 0., pi/2],\n [0., -pi/4, pi/2], [0., 0., 3 * pi/4], [0., 0., pi/4]]),\n np.array([[0., pi/2, 0.],\n [pi/4, pi/2, 0.], [0., 3 * pi/4, 0.], [-pi/4, pi/2, 0.],\n [0., pi/4, 0.], [0., pi/2, pi/4], [0., pi/2, -pi/4]]),\n np.array([[0., 0., 0.], [pi/4, 0., 0.], [0., pi/4, 0.], [-pi/4, 0., 0.],\n [0., -pi/4, 0.], [0., 0., pi/4], [0., 0., -pi/4]]),\n np.array([[0., 0., pi/2], [pi/4, 0., pi/2], [0., pi/4, pi/2], [-pi/4, 0., pi/2],\n [0., -pi/4, pi/2], [0., 0., 3 * pi/4], [0., 0., pi/4]]),\n np.array([[0., pi/2, 0.], [pi/4, pi/2, 0.], [0., 3 * pi/4, 0.], [-pi/4, pi/2, 0.],\n [0., pi/4, 0.], [0., pi/2, pi/4], [0., pi/2, -pi/4]]),\n np.array([[0., 0., pi/4], [pi/4, 0., pi/4], [0., pi/4, pi/4], [-pi/4, 0., pi/4],\n [0., -pi/4, pi/4], [0., 0., pi/2], [0., 0., 0.]]),\n np.array([[0., pi/2, pi/4], [pi/4, pi/2, pi/4], [0., 3 * pi/4, pi/4], [-pi/4, pi/2, pi/4],\n [0., pi/4, pi/4], [0., pi/2, pi/2], [0., pi/2, 0.]]),\n np.array([[0., pi/4, offset_3d_movement], [pi/4, pi/4, offset_3d_movement],\n [0., pi/2, offset_3d_movement], [-pi/4, pi/4, offset_3d_movement],\n [0., 0., offset_3d_movement], [0., pi/4, pi/4 + offset_3d_movement],\n [0., pi/4, -pi/4 + offset_3d_movement]]),\n np.array([[0., pi, 0.], [pi/4, pi, 0.], [0., 5 * pi/4, 0.], [-pi/4, pi, 0.],\n [0., 3 * pi/4, 0.], [0., pi, pi/4], [0., pi, -pi/4]]),\n np.array([[0., -pi/2, 0.], [pi/4, -pi/2, 0.], [0., -pi/4, 0.], [-pi/4, -pi/2, 0.],\n [0., -3 * pi/4, 0.], [0., -pi/2, pi/4], [0., -pi/2, -pi/4]]),\n np.array([[0., 0., -pi/2], [pi/4, 0., -pi/2], [0., pi/4, -pi/2], [-pi/4, 0., -pi/2],\n [0., -pi/4, -pi/2], [0., 0., -pi/4], [0., 0., -3 * pi/4]]),\n np.array([[0., pi, 0.], [pi/4, pi, 0.], [0., 5 * pi/4, 0.], [-pi/4, pi, 0.],\n [0., 3 * pi/4, 0.], [0., pi, pi/4], [0., pi, -pi/4]]),\n np.array([[0., -pi/2, 0.], [pi/4, -pi/2, 0.], [0., -pi/4, 0.], [-pi/4, -pi/2, 0.],\n [0., -3 * pi/4, 0.], [0., -pi/2, pi/4], [0., -pi/2, -pi/4]]),\n np.array([[0., 0., -pi/2], [pi/4, 0., -pi/2], [0., pi/4, -pi/2], [-pi/4, 0., -pi/2],\n [0., -pi/4, -pi/2], [0., 0., -pi/4], [0., 0., -3 * pi/4]]),\n np.array([[0., pi, -pi/4], [pi/4, pi, -pi/4], [0., 5 * pi/4, -pi/4], [-pi/4, pi, -pi/4],\n [0., 3 * pi/4, -pi/4], [0., pi, 0.], [0., pi, -pi/2]]),\n np.array([[0., -pi/2, -pi/4], [pi/4, -pi/2, -pi/4], [0., -pi/4, -pi/4],\n [-pi/4, -pi/2, -pi/4], [0., -3 * pi/4, -pi/4], [0., -pi/2, 0.],\n [0., -pi/2, -pi/2]]),\n ]\n\n\ndef expected_orientations_2d():\n pi = np.pi\n return [\n np.array([[0., 0., 0.], [0., 0., pi/4], [0., 0., -pi/4], [0., 0., pi/2],\n [0., 0., -pi/2]]),\n np.array([[0., 0., pi/2], [0., 0., 3 * pi/4], [0., 0., pi/4], [0., 0., pi],\n [0., 0., 0.]]),\n np.array([[0., 0., 0.], [0., 0., pi/4], [0., 0., -pi/4], [0., 0., pi/2],\n [0., 0., -pi/2]]),\n np.array([[0., 0., pi/2], [0., 0., 3 * pi/4], [0., 0., pi/4], [0., 0., pi],\n [0., 0., 0.]]),\n np.array([[0., 0., pi/4], [0., 0., pi/2], [0., 0., 0.], [0., 0., 3 * pi/4],\n [0., 0., -pi/4]]),\n np.array([[0., 0., pi], [0., 0., 5*pi/4], [0., 0., 3 * pi/4], [0., 0., 3 * pi/2],\n [0., 0., pi/2]]),\n np.array([[0., 0., -pi/2], [0., 0., -pi/4], [0., 0., -3 * pi/4], [0., 0., 0.],\n [0., 0., -pi]]),\n np.array([[0., 0., pi], [0., 0., 5 * pi/4], [0., 0., 3 * pi/4], [0., 0., 3 * pi/2],\n [0., 0., pi/2]]),\n np.array([[0., 0., -pi/2], [0., 0., -pi/4], [0., 0., -3 * pi/4], [0., 0., 0.],\n [0., 0., -pi]]),\n np.array([[0., 0., -3 * pi/4], [0., 0., -pi/2], [0., 0., -pi], [0., 0., -pi/4],\n [0., 0., -5 * pi/4]])\n ]\n\n\[email protected]('state, expected_platform_orientation, expected_sensor_orientations',\n zip(*zip(*test_platform_base.orientation_tests_2d),\n expected_orientations_2d()))\ndef test_rotation_offsets_2d(state, expected_platform_orientation, expected_sensor_orientations,\n move, radars_2d, rotation_offsets_2d):\n # Define time related variables\n timestamp = datetime.datetime.now()\n # Define transition model and position for platform\n model_1d = ConstantVelocity(0.0) # zero noise so pure movement\n trans_model = CombinedLinearGaussianTransitionModel(\n [model_1d] * (radars_2d[0].ndim_state // 2))\n platform_state = State(state, timestamp)\n\n # This defines the position_mapping to the platforms state vector (i.e. x and y)\n mounting_mapping = np.array([0, 2])\n # create a platform with the simple radar mounted\n platform = MovingPlatform(\n states=platform_state,\n transition_model=trans_model,\n sensors=radars_2d,\n rotation_offsets=rotation_offsets_2d,\n position_mapping=mounting_mapping\n )\n if move:\n # Move the platform\n platform.move(timestamp + datetime.timedelta(seconds=2))\n assert np.allclose(platform.orientation, expected_platform_orientation)\n assert np.allclose(all_sensor_orientations(platform), expected_sensor_orientations)\n\n\[email protected]('state, expected_platform_orientation, expected_sensor_orientations',\n zip(*zip(*test_platform_base.orientation_tests_3d),\n expected_orientations_3d()))\ndef test_rotation_offsets_3d(state, expected_platform_orientation, expected_sensor_orientations,\n move, radars_3d, rotation_offsets_3d):\n # Define time related variables\n timestamp = datetime.datetime.now()\n # Define transition model and position for platform\n model_1d = ConstantVelocity(0.0) # zero noise so pure movement\n trans_model = CombinedLinearGaussianTransitionModel(\n [model_1d] * (radars_3d[0].ndim_state // 2))\n platform_state = State(state, timestamp)\n\n # This defines the position_mapping to the platforms state vector (i.e. x and y)\n mounting_mapping = np.array([0, 2, 4])\n # create a platform with the simple radar mounted\n platform = MovingPlatform(\n states=platform_state,\n transition_model=trans_model,\n sensors=radars_3d,\n rotation_offsets=rotation_offsets_3d,\n position_mapping=mounting_mapping\n )\n if move:\n # Move the platform\n platform.move(timestamp + datetime.timedelta(seconds=2))\n assert np.allclose(platform.orientation, expected_platform_orientation)\n assert np.allclose(all_sensor_orientations(platform), expected_sensor_orientations)\n\n\ndef all_sensor_orientations(platform):\n sensor_orientations = np.concatenate([sensor.orientation for sensor in platform.sensors],\n axis=1)\n return sensor_orientations.T\n\n\ndef all_sensor_positions(platform):\n sensor_positions = np.concatenate([sensor.position for sensor in platform.sensors], axis=1)\n return sensor_positions.T\n\n\ndef test_defaults(radars_3d, platform_type, add_sensor):\n platform_state = State(state_vector=StateVector([0, 1, 2, 1, 4, 1]),\n timestamp=datetime.datetime.now())\n platform_args = {}\n if platform_type is MovingPlatform:\n platform_args['transition_model'] = None\n\n if add_sensor:\n platform = platform_type(states=platform_state, sensors=[], position_mapping=[0, 2, 4],\n **platform_args)\n for sensor in radars_3d:\n platform.add_sensor(sensor)\n else:\n platform = platform_type(states=platform_state, sensors=radars_3d,\n position_mapping=[0, 2, 4], **platform_args)\n\n for i, sensor in enumerate(radars_3d):\n assert np.array_equal(platform.mounting_offsets[i], StateVector([0, 0, 0]))\n assert np.array_equal(platform.rotation_offsets[i], StateVector([0, 0, 0]))\n assert np.array_equal(sensor.position, platform.position)\n assert np.array_equal(sensor.orientation, platform.orientation)\n\n\ndef test_sensor_offset_error(radars_3d, platform_type):\n platform_state = State(state_vector=StateVector([0, 1, 2, 1, 4, 1]),\n timestamp=datetime.datetime.now())\n platform_args = {}\n if platform_type is MovingPlatform:\n platform_args['transition_model'] = None\n\n offset = StateVector([0, 0, 0])\n\n offsets = [offset] * (len(radars_3d) - 1)\n with pytest.raises(ValueError):\n _ = platform_type(states=platform_state, sensors=radars_3d, position_mapping=[0, 2, 4],\n mounting_offsets=offsets, **platform_args)\n\n with pytest.raises(ValueError):\n _ = platform_type(states=platform_state, sensors=radars_3d, position_mapping=[0, 2, 4],\n rotation_offsets=offsets, **platform_args)\n\n\ndef test_missing_sensors(radars_3d, platform_type):\n platform_state = State(state_vector=StateVector([0, 1, 2, 1, 4, 1]),\n timestamp=datetime.datetime.now())\n platform_args = {}\n if platform_type is MovingPlatform:\n platform_args['transition_model'] = None\n\n # add all but the last sensor\n platform = platform_type(states=platform_state, sensors=radars_3d[:-2],\n position_mapping=[0, 2, 4], **platform_args)\n\n # finding the position/orientation of a sensor that is not on the platform\n # should raise an error\n with pytest.raises(ValueError):\n platform.get_sensor_position(radars_3d[-1])\n\n with pytest.raises(ValueError):\n platform.get_sensor_orientation(radars_3d[-1])\n\n\ndef sensor_positions_test(expected_offset, platform):\n \"\"\"\n This function asserts that the sensor positions on the platform have been\n correctly updated when the platform has been moved or sensor mounted on the\n platform.\n\n :param expected_offset: nD array of expected sensor position post rotation\n :param platform: platform object\n :return:\n \"\"\"\n radar_position = all_sensor_positions(platform)\n platform_position = platform.position\n expected_radar_position = expected_offset + platform_position.T\n\n assert np.allclose(expected_radar_position, radar_position)\n" ]
[ [ "numpy.deg2rad", "numpy.array", "numpy.equal" ], [ "numpy.sqrt", "numpy.allclose", "numpy.array_equal", "numpy.concatenate", "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
DhananjayAshok/KagglePipeline
[ "aaf3eaa7793fdb2e1b1639b0be316327e10b546a" ]
[ "KagglePipeline/rapid_deployment/MultipleModels.py" ]
[ "class MultipleModels(object):\n \"\"\"\n Abstract Classes that create multiple models and store them in a list\n\n \n \"\"\"\n def __init__(self, classification, extras=[]):\n \"\"\"\n Extras is a list of additional models that can be called to fit on all in format (model, string name of model)\n \"\"\"\n self.model_list = []\n self._generate_model_list(classification)\n self.model_list.extend(extras)\n self.classification = classification\n\n\n def fit(self, X_train, y_train):\n for model_tuple in self.model_list:\n model_tuple[0].fit(X_train, y_train)\n\n\n def _generate_model_list(self):\n raise NotImplementedError\n\n def display_comparison(self, X_val, y_val):\n \"\"\"\n Displays a chart of each model and their performance on the datasets provideds\n \"\"\"\n import matplotlib.pyplot as plt\n x = []\n y = []\n for model_tuple in self.model_list:\n x.append(model_tuple[1])\n y.append(model_tuple[0].score(X_val, y_val))\n plt.scatter(x, y)\n plt.show()\n\n\n def display_cross_val_comparison(self, X, y, cross_val= 5):\n \"\"\"\n More rigerous cross validation is used to train the models and compare scores\n Plots the results\n returns a dataframe with the results\n \"\"\"\n from sklearn.model_selection import KFold, cross_val_score\n import pandas as pd\n import seaborn as sns\n import matplotlib.pyplot as plt\n\n folds = KFold(n_splits=cross_val, shuffle=True, random_state=11)\n d = pd.DataFrame({self.model_list[0][1]: list(range(cross_val))})\n for model_tuple in self.model_list:\n d[model_tuple[1]] = cross_val_score(model_tuple[0], X, y, cv=folds)\n\n sns.boxplot(data=d)\n plt.xlabel(\"Classifier\")\n plt.ylabel(\"R^2 Score (Higher is Better)\")\n plt.title(\"Comparison between models\")\n plt.show()\n return d\n\n\n def __len__(self):\n return len(self.model_list)\n\n def __str__(self):\n return str([model_tuple[1] for model_tuple in self.model_list])\n\n def parameters(self):\n return str([model_tuple[0] for model_tuple in self.model_list])\n" ]
[ [ "sklearn.model_selection.cross_val_score", "matplotlib.pyplot.scatter", "matplotlib.pyplot.title", "sklearn.model_selection.KFold", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
VireshDhawan/Data-Science-Templates
[ "bb248045e66c6d6230f6aee80fa53e030085c4fe" ]
[ "Algorithms/GradientBoosting/GradientBoosting.py" ]
[ "from sklearn.ensemble import GradientBoostingClassifier, GradientBoostingRegressor\nfrom sklearn.model_selection import cross_val_score\nimport numpy as np\n\nclass GradientBoosting:\n\n def __init__(self, x_train, y_train, problemtype = 'regression', cv = 5):\n self.x_train = x_train\n self.y_train = y_train\n self.cv = cv\n\n if problemtype == 'regression':\n self.clf = GradientBoostingRegressor()\n elif problemtype == 'classification':\n self.clf = GradientBoostingClassifier()\n\n def classify(self):\n self.clf.fit(self.x_train, self.y_train)\n\n def regress(self):\n self.clf.fit(self.x_train, self.y_train)\n\n\n def show_cross_val_score(self):\n cv_score = cross_val_score(estimator=self.clf, X=self.x_train, y=self.y_train, cv=self.cv, n_jobs=-1)\n print('Gradient Boosting Cross Validated Score...')\n print(np.mean(cv_score))\n print('\\n')\n\n def optimise(self):\n pass\n" ]
[ [ "sklearn.ensemble.GradientBoostingClassifier", "sklearn.model_selection.cross_val_score", "numpy.mean", "sklearn.ensemble.GradientBoostingRegressor" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
krmurtha/xcp_abcd
[ "2b745d980fbd41aabe4ebaff5342c7151ee7308d" ]
[ "xcp_abcd/utils/write_save.py" ]
[ "# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-\n# vi: set ft=python sts=4 ts=4 sw=4 et:\n\"\"\"Utilities to read and write nifiti and cifti data.\"\"\"\nimport nibabel as nb\nimport numpy as np\nimport os \nimport subprocess\nfrom templateflow.api import get as get_template\nimport tempfile \n\ndef read_ndata(datafile,maskfile=None):\n '''\n read nifti or cifti\n input: \n datafile:\n nifti or cifti file\n output:\n data:\n numpy ndarry ( vertices or voxels by timepoints)\n '''\n # read cifti series\n if datafile.endswith('.dtseries.nii'):\n data = nb.load(datafile).get_fdata().T\n # or nifiti data, mask is required\n elif datafile.endswith('.nii.gz'):\n datax = nb.load(datafile).get_fdata()\n mask = nb.load(maskfile).get_fdata()\n data = datax[mask==1]\n return data\n \n\n\ndef write_ndata(data_matrix,template,filename,mask=None,tr=1):\n '''\n input:\n data matrix : veritices by timepoint \n template: header and affine\n filename : name of the output\n mask : mask is not needed\n\n '''\n basedir = os.path.split(os.path.abspath(filename))[0]\n # write cifti series\n if template.endswith('.dtseries.nii'):\n from nibabel.cifti2 import Cifti2Image\n template_file = nb.load(template)\n if data_matrix.shape[1] == template_file.shape[0]:\n dataimg = Cifti2Image(dataobj=data_matrix.T,header=template_file.header,\n file_map=template_file.file_map,nifti_header=template_file.nifti_header)\n elif data_matrix.shape[1] != template_file.shape[0]:\n fake_cifti1 = str(basedir+'/fake_niftix.nii.gz')\n run_shell(['OMP_NUM_THREADS=2 wb_command -cifti-convert -to-nifti ',template,fake_cifti1])\n fake_cifti0 = str(basedir+ '/edited_cifti_nifti.nii.gz')\n fake_cifti0 = edit_ciftinifti(fake_cifti1,fake_cifti0,data_matrix)\n orig_cifti0 = str(basedir+ '/edited_nifti2cifti.dtseries.nii')\n run_shell(['OMP_NUM_THREADS=2 wb_command -cifti-convert -from-nifti ',fake_cifti0,template, \n orig_cifti0,'-reset-timepoints',str(tr),str(0) ])\n template_file2 = nb.load(orig_cifti0)\n dataimg = Cifti2Image(dataobj=data_matrix.T,header=template_file2.header,\n file_map=template_file2.file_map,nifti_header=template_file2.nifti_header)\n os.remove(fake_cifti1)\n os.remove(fake_cifti0)\n os.remove(orig_cifti0)\n # write nifti series\n elif template.endswith('.nii.gz'):\n mask_data = nb.load(mask).get_fdata()\n template_file = nb.load(template)\n\n if len(data_matrix.shape) == 1:\n dataz = np.zeros(mask_data.shape) \n dataz[mask_data==1] = data_matrix\n \n else:\n dataz = np.zeros([mask_data.shape[0],mask_data.shape[1],\n mask_data.shape[2],data_matrix.shape[1]])\n dataz[mask_data==1,:] = data_matrix\n \n dataimg = nb.Nifti1Image(dataobj=dataz, affine=template_file.affine, \n header=template_file.header)\n \n dataimg.to_filename(filename)\n \n return filename\n\ndef edit_ciftinifti(in_file,out_file,datax):\n \"\"\"\n this function create a fake nifti file from cifti\n in_file: \n cifti file. .dstreries etc\n out_file:\n output fake nifti file \n datax: numpy darray \n data matrix with vertices by timepoints dimension\n \"\"\"\n thdata = nb.load(in_file)\n dataxx = thdata.get_fdata()\n dd = dataxx[:,:,:,0:datax.shape[1]]\n dataimg = nb.Nifti1Image(dataobj=dd, affine=thdata.affine, \n header=thdata.header)\n dataimg.to_filename(out_file)\n return out_file\n\ndef run_shell(cmd,env = os.environ):\n \"\"\"\n utilities to run shell in python\n cmd: \n shell command that wanted to be run \n \n\n \"\"\"\n if type(cmd) is list:\n cmd = ' '.join(cmd)\n \n call_command = subprocess.Popen(cmd,stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,env=env,shell=True,)\n output, error = call_command.communicate(\"Hello from the other side!\")\n call_command.wait()\n \n\n return output,error\n \n\n\ndef write_gii(datat,template,filename,hemi):\n '''\n datatt : vector \n template: real file loaded with nibabel to get header and filemap\n filename ; name of the output\n '''\n datax = np.array(datat,dtype='float32')\n template = str(get_template(\"fsLR\", hemi=hemi,suffix='midthickness',density='32k'))\n template = nb.load(template)\n dataimg=nb.gifti.GiftiImage(header=template.header,file_map=template.file_map,extra=template.extra)\n dataimg=nb.gifti.GiftiImage(header=template.header,file_map=template.file_map,extra=template.extra,\n meta=template.meta)\n d_timepoint=nb.gifti.GiftiDataArray(data=datax,intent='NIFTI_INTENT_NORMAL')\n dataimg.add_gifti_data_array(d_timepoint)\n dataimg.to_filename(filename)\n return filename\n\n\ndef read_gii(surf_gii):\n \"\"\"\n using nibabel to read surface file\n \"\"\"\n bbx = nb.load(surf_gii)\n datat = bbx.agg_data()\n if not hasattr(datat, '__shape__'):\n datat = np.zeros((len(bbx.darrays[0].data), len(bbx.darrays)))\n for arr in range(len(bbx.darrays)):\n datat[:, arr] = bbx.darrays[arr].data\n return datat\n\n\ndef despikedatacifti(cifti,tr,basedir):\n \"\"\" despiking cifti \"\"\"\n fake_cifti1 = str(basedir+'/fake_niftix.nii.gz')\n fake_cifti1_depike = str(basedir+'/fake_niftix_depike.nii.gz')\n cifti_despike = str(basedir+ '/despike_nifti2cifti.dtseries.nii')\n run_shell(['OMP_NUM_THREADS=2 wb_command -cifti-convert -to-nifti ',cifti,fake_cifti1])\n run_shell(['3dDespike -nomask -NEW -prefix',fake_cifti1_depike,fake_cifti1])\n run_shell(['OMP_NUM_THREADS=2 wb_command -cifti-convert -from-nifti ',fake_cifti1_depike,cifti, \n cifti_despike,'-reset-timepoints',str(tr),str(0)])\n return cifti_despike" ]
[ [ "numpy.array", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
terry00123/cgp-cnn-PyTorch
[ "784c2ea39d7849e000d158d4e320ef2f0b133671" ]
[ "cgp_config.py" ]
[ "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport multiprocessing as mp\nimport multiprocessing.pool\nimport numpy as np\nimport cnn_train as cnn\n\n\n# wrapper function for multiprocessing\ndef arg_wrapper_mp(args):\n return args[0](*args[1:])\n\nclass NoDaemonProcess(mp.Process):\n # make 'daemon' attribute always return False\n def _get_daemon(self):\n return False\n def _set_daemon(self, value):\n pass\n daemon = property(_get_daemon, _set_daemon)\n\n# We sub-class multiprocessing.pool.Pool instead of multiprocessing.Pool\n# because the latter is only a wrapper function, not a proper class.\nclass NoDaemonProcessPool(multiprocessing.pool.Pool):\n Process = NoDaemonProcess\n\n\n# Evaluation of CNNs\ndef cnn_eval(net, gpu_id, epoch_num, batchsize, dataset, verbose, imgSize):\n\n print('\\tgpu_id:', gpu_id, ',', net)\n train = cnn.CNN_train(dataset, validation=True, verbose=verbose, imgSize=imgSize, batchsize=batchsize)\n evaluation = train(net, gpu_id, epoch_num=epoch_num, out_model=None)\n print('\\tgpu_id:', gpu_id, ', eval:', evaluation)\n return evaluation\n\n\nclass CNNEvaluation(object):\n def __init__(self, gpu_ids, dataset='cifar10', verbose=True, epoch_num=50, batchsize=16, imgSize=32):\n self.gpu_num = len(gpu_ids)\n self.gpu_ids = gpu_ids\n self.epoch_num = epoch_num\n self.batchsize = batchsize\n self.dataset = dataset\n self.verbose = verbose\n self.imgSize = imgSize\n\n def __call__(self, net_lists):\n evaluations = np.zeros(len(net_lists))\n for i in np.arange(0, len(net_lists), self.gpu_num):\n process_num = np.min((i + self.gpu_num, len(net_lists))) - i\n pool = NoDaemonProcessPool(process_num)\n arg_data = [(cnn_eval, net_lists[i+j], self.gpu_ids[j], self.epoch_num, self.batchsize, self.dataset, self.verbose, self.imgSize) for j in range(process_num)]\n evaluations[i:i+process_num] = pool.map(arg_wrapper_mp, arg_data)\n pool.terminate()\n\n return evaluations\n\n\n# network configurations\nclass CgpInfoConvSet(object):\n def __init__(self, rows=30, cols=40, level_back=40, min_active_num=8, max_active_num=50):\n self.input_num = 1\n # \"S_\" means that the layer has a convolution layer without downsampling.\n # \"D_\" means that the layer has a convolution layer with downsampling.\n # \"Sum\" means that the layer has a skip connection.\n self.func_type = ['S_ConvBlock_32_1', 'S_ConvBlock_32_3', 'S_ConvBlock_32_5',\n 'S_ConvBlock_128_1', 'S_ConvBlock_128_3', 'S_ConvBlock_128_5',\n 'S_ConvBlock_64_1', 'S_ConvBlock_64_3', 'S_ConvBlock_64_5',\n 'S_ResBlock_32_1', 'S_ResBlock_32_3', 'S_ResBlock_32_5',\n 'S_ResBlock_128_1', 'S_ResBlock_128_3', 'S_ResBlock_128_5',\n 'S_ResBlock_64_1', 'S_ResBlock_64_3', 'S_ResBlock_64_5',\n 'Concat', 'Sum',\n 'Max_Pool', 'Avg_Pool']\n \n self.func_in_num = [1, 1, 1,\n 1, 1, 1,\n 1, 1, 1,\n 1, 1, 1,\n 1, 1, 1,\n 1, 1, 1,\n 2, 2,\n 1, 1]\n\n self.out_num = 1\n self.out_type = ['full']\n self.out_in_num = [1]\n\n # CGP network configuration\n self.rows = rows\n self.cols = cols\n self.node_num = rows * cols\n self.level_back = level_back\n self.min_active_num = min_active_num\n self.max_active_num = max_active_num\n\n self.func_type_num = len(self.func_type)\n self.out_type_num = len(self.out_type)\n self.max_in_num = np.max([np.max(self.func_in_num), np.max(self.out_in_num)])\n" ]
[ [ "numpy.max" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
pjpetersik/ninolearn
[ "2a6912bbaaf3c5737f6dcda89e4d7d1fd885a35e", "2a6912bbaaf3c5737f6dcda89e4d7d1fd885a35e", "2a6912bbaaf3c5737f6dcda89e4d7d1fd885a35e" ]
[ "research/Master_Thesis/4.2deep_ensmble_full.py", "ninolearn/learn/models/ipnn.py", "research/StandardizedResearchIPNN/cross_training.py" ]
[ "import numpy as np\nimport matplotlib.pyplot as plt\n\nimport keras.backend as K\n\nfrom ninolearn.learn.models.dem import DEM\nfrom ninolearn.utils import print_header, small_print_header\nfrom ninolearn.pathes import modeldir\n\nfrom data_pipeline import pipeline\n\nimport os\nimport time\nplt.close(\"all\")\nK.clear_session()\n\n#%% =============================================================================\n# Deep ensemble\n# =============================================================================\ndecades = [60, 70, 80, 90, 100, 110]\n\nfor lead_time in [0, 3, 6, 9, 12, 15]:\n X, y, timey, yp = pipeline(lead_time, return_persistance=True)\n print_header(f'Lead time: {lead_time} month')\n\n for decade in decades:\n small_print_header(f'Test period: {1902+decade}-01-01 till {1911+decade}-12-01')\n\n # jump loop iteration if already trained\n ens_dir=f'ensemble_decade{decade}_lead{lead_time}'\n out_dir = os.path.join(modeldir, ens_dir)\n\n modified_time = time.gmtime(os.path.getmtime(out_dir))\n compare_time = time.strptime(\"21-7-2019 13:00 UTC\", \"%d-%m-%Y %H:%M %Z\")\n\n if modified_time>compare_time:\n print(\"Trained already!\")\n continue\n\n test_indeces = (timey>=f'{1902+decade}-01-01') & (timey<=f'{1911+decade}-12-01')\n train_indeces = np.invert(test_indeces)\n\n trainX, trainy = X[train_indeces,:], y[train_indeces]\n\n model = DEM()\n\n model.set_parameters(layers=1, dropout=[0.1, 0.5], noise_in=[0.1, 0.5],\n noise_sigma=[0.1,0.5], noise_mu=[0.1, 0.5],\n l1_hidden=[0.0, 0.2], l2_hidden=[0, 0.2],\n l1_mu=[0.0, 0.2], l2_mu=[0.0, 0.2], l1_sigma=[0.0, 0.2],\n l2_sigma=[0.0, 0.2], lr=[0.0001,0.01], batch_size=100, epochs=500, n_segments=5,\n n_members_segment=1, patience=30, verbose=0, std=True)\n\n model.fit_RandomizedSearch(trainX, trainy, n_iter=200)\n\n model.save(location=modeldir, dir_name=ens_dir)\n\n del model", "\"\"\"\nIPNN : Classification neural network\n\"\"\"\nimport numpy as np\nimport json\n\nimport keras.backend as K\nfrom keras.models import Model, save_model, load_model\nfrom keras.layers import Dense, Input\nfrom keras.layers import Dropout, GaussianNoise\nfrom keras.losses import sparse_categorical_crossentropy\nfrom keras.optimizers import Adam\nfrom keras.callbacks import EarlyStopping\nfrom keras import regularizers\n\nfrom os.path import join, exists\nfrom os import mkdir, getcwd\nfrom shutil import rmtree\nimport glob\n\nfrom ninolearn.learn.models.baseModel import baseModel\nfrom ninolearn.utils import small_print_header\nfrom ninolearn.exceptions import MissingArgumentError\n\nimport warnings\n\nimport time\n\nclass ipnn(baseModel):\n \"\"\"\n\n \"\"\"\n def __del__(self):\n K.clear_session()\n\n def __init__(self, low=-5, high = +5, step=0.25, layers=1, neurons=16, dropout=0.2, noise_in=0.1,\n noise_out=0.1,\n l1_hidden=0.1, l2_hidden=0.1,\n l1_out=0.0, l2_out=0.1,\n batch_size=10, n_segments=5, n_members_segment=1,\n lr=0.001, patience = 10, epochs=300, verbose=0,\n name='ipnn'):\n self.set_hyperparameters(layers=layers, neurons=neurons, dropout=dropout,\n noise_in=noise_in, noise_out=noise_out,\n l1_hidden=l1_hidden, l2_hidden=l2_hidden,\n l1_out=l1_out, l2_out=l2_out,\n\n batch_size=batch_size, n_segments=n_segments, n_members_segment=n_members_segment,\n lr=lr, patience=patience, epochs=epochs, verbose=verbose,\n name=name)\n\n self.loss = sparse_categorical_crossentropy\n self.loss_name = 'sparse_categorical_crossentropy'\n\n self.thresholds = np.arange(low, high+step, step)\n self.n_outputs = len(self.thresholds) + 1\n self.output_names = [f'p{i}' for i in range(self.n_outputs)]\n\n\n def build_model(self, n_features):\n \"\"\"\n The method builds a new member of the ensemble and returns it.\n \"\"\"\n # derived parameters\n self.hyperparameters['n_members'] = self.hyperparameters['n_segments'] * self.hyperparameters['n_members_segment']\n\n # initialize optimizer and early stopping\n self.optimizer = Adam(lr=self.hyperparameters['lr'], beta_1=0.9, beta_2=0.999, epsilon=None, decay=0., amsgrad=False)\n self.es = EarlyStopping(monitor=f'val_{self.loss_name}', min_delta=0.0, patience=self.hyperparameters['patience'], verbose=1,\n mode='min', restore_best_weights=True)\n\n inputs = Input(shape=(n_features,))\n h = GaussianNoise(self.hyperparameters['noise_in'],\n name='noise_input')(inputs)\n\n for i in range(self.hyperparameters['layers']):\n h = Dense(self.hyperparameters['neurons'], activation='tanh',\n kernel_regularizer=regularizers.l1_l2(self.hyperparameters['l1_hidden'],\n self.hyperparameters['l2_hidden']),\n kernel_initializer='random_uniform',\n bias_initializer='random_uniform',\n name=f'hidden_{i}')(h)\n\n h = Dropout(self.hyperparameters['dropout'],\n name=f'hidden_dropout_{i}')(h)\n\n out = Dense(self.n_outputs, activation='softmax',\n kernel_regularizer=regularizers.l1_l2(self.hyperparameters['l1_out'],\n self.hyperparameters['l2_out']),\n kernel_initializer='random_uniform',\n bias_initializer='random_uniform',\n name='output')(h)\n\n model = Model(inputs=inputs, outputs=out)\n return model\n\n def to_inteval(self, y):\n y_cls = np.zeros(len(y))\n y_cls[:] = -999\n\n for i in range(1, self.n_outputs-2):\n y_cls[(self.thresholds[i]<y)&(self.thresholds[i+1]>=y)] = i\n\n y_cls[self.thresholds[0]>y] = 0\n y_cls[self.thresholds[-1]<y] = self.n_outputs - 1\n return y_cls\n\n\n def fit(self, trainX, trainy, valX=None, valy=None, use_pretrained=False):\n \"\"\"\n Fit the model to training data\n \"\"\"\n trainy = self.to_inteval(trainy)\n if valy is not None:\n valy = self.to_inteval(valy)\n\n start_time = time.time()\n # clear memory\n K.clear_session()\n\n # allocate lists for the ensemble\n self.ensemble = []\n self.history = []\n self.val_loss = []\n self.train_loss = []\n\n self.segment_len = trainX.shape[0]//self.hyperparameters['n_segments']\n\n if self.hyperparameters['n_segments']==1 and (valX is not None or valy is not None):\n warnings.warn(\"Validation and test data set are the same if n_segements is 1!\")\n\n i = 0\n while i<self.hyperparameters['n_members_segment']:\n j = 0\n while j<self.hyperparameters['n_segments']:\n ensemble_member = self.build_model(trainX.shape[1])\n\n n_ens_sel = len(self.ensemble)\n small_print_header(f\"Train member Nr {n_ens_sel+1}/{self.hyperparameters['n_members']}\")\n\n if use_pretrained:\n ensemble_member.load_weights(self.pretrained_weights)\n\n ensemble_member.compile(loss=self.loss, optimizer=self.optimizer, metrics=[self.loss])\n\n # validate on the spare segment\n if self.hyperparameters['n_segments']!=1:\n if valX is not None or valy is not None:\n warnings.warn(\"Validation data set will be one of the segments. The provided validation data set is not used!\")\n\n start_ind = j * self.segment_len\n end_ind = (j+1) * self.segment_len\n\n trainXens = np.delete(trainX, np.s_[start_ind:end_ind], axis=0)\n trainyens = np.delete(trainy, np.s_[start_ind:end_ind])\n valXens = trainX[start_ind:end_ind]\n valyens = trainy[start_ind:end_ind]\n\n # validate on test data set\n elif self.hyperparameters['n_segments']==1:\n if valX is None or valy is None:\n raise MissingArgumentError(\"When segments length is 1, a validation data set must be provided.\")\n trainXens = trainX\n trainyens = trainy\n valXens = valX\n valyens = valy\n\n history = ensemble_member.fit(trainXens, trainyens,\n epochs=self.hyperparameters['epochs'],\n batch_size=self.hyperparameters['batch_size'],\n verbose=self.hyperparameters['verbose'],\n shuffle=True, callbacks=[self.es],\n validation_data=(valXens, valyens))\n\n self.history.append(history)\n self.val_loss.append(ensemble_member.evaluate(valXens, valyens)[1])\n\n self.train_loss.append(ensemble_member.evaluate(trainXens, trainyens)[1])\n\n self.ensemble.append(ensemble_member)\n j+=1\n i+=1\n self.mean_val_loss = np.mean(self.val_loss)\n self.mean_train_loss = np.mean(self.train_loss)\n\n print(f'Validation Loss: {self.mean_val_loss}')\n print(f'Train Loss: {self.mean_train_loss}')\n # print computation time\n end_time = time.time()\n passed_time = np.round(end_time-start_time, decimals=1)\n print(f'Computation time: {passed_time}s')\n\n\n def predict(self, X):\n \"\"\"\n Generates the ensemble prediction of a model ensemble\n\n :param model_ens: list of ensemble models\n :param X: The features\n\n \"\"\"\n\n pred_ens = np.zeros((X.shape[0], self.n_outputs, self.hyperparameters['n_members']))\n for i in range(self.hyperparameters['n_members']):\n pred_ens[:,:,i] = self.ensemble[i].predict(X)\n return self._mixture(pred_ens).T\n\n\n def _mixture(self, pred):\n \"\"\"\n returns the ensemble mixture results\n \"\"\"\n mix_mean = pred[:,:,:].mean(axis=2)\n return mix_mean\n\n\n def save(self, location='', dir_name='ensemble'):\n \"\"\"\n Save the ensemble\n \"\"\"\n path = join(location, dir_name)\n\n if not exists(path):\n mkdir(path)\n\n else:\n rmtree(path)\n mkdir(path)\n\n# with open(join(path, 'hyperparameters.json'), 'w') as file:\n# json.dump(self.hyperparameters, file)\n\n self.df_history_hyp.to_csv(join(path, 'hyperparameters_history.csv'))\n\n for i in range(self.hyperparameters['n_members']):\n path_h5 = join(path, f\"member{i}.h5\")\n save_model(self.ensemble[i], path_h5, include_optimizer=False)\n\n def load(self, location=None, dir_name='dem'):\n \"\"\"\n Load the ensemble\n \"\"\"\n if location is None:\n location = getcwd()\n\n path = join(location, dir_name)\n files = glob.glob(join(path,'*.h5'))\n self.hyperparameters = {}\n self.hyperparameters['n_members'] = len(files)\n self.ensemble = []\n\n for file in files:\n file_path = join(path, file)\n self.ensemble.append(load_model(file_path))\n\n", "import numpy as np\nfrom sklearn.preprocessing import StandardScaler\n\nfrom ninolearn.utils import include_time_lag\nfrom ninolearn.IO.read_processed import data_reader\nfrom ninolearn.learn.models.ipnn import ipnn\n\nfrom ninolearn.learn.fit import cross_training\n\ndef pipeline(lead_time, return_persistance=False):\n \"\"\"\n Data pipeline for the processing of the data before the Deep Ensemble\n is trained.\n\n :type lead_time: int\n :param lead_time: The lead time in month.\n\n :type return_persistance: boolean\n :param return_persistance: Return as the persistance as well.\n\n :returns: The feature \"X\" (at observation time), the label \"y\" (at lead\n time), the target season \"timey\" (least month) and if selected the\n label at observation time \"y_persistance\". Hence, the output comes as:\n X, y, timey, y_persistance.\n \"\"\"\n reader = data_reader(startdate='1960-01', enddate='2017-12')\n\n # indeces\n oni = reader.read_csv('oni')\n\n iod = reader.read_csv('iod')\n wwv = reader.read_csv('wwv_proxy')\n\n # seasonal cycle\n sc1 = np.cos(np.arange(len(oni))/12*2*np.pi)\n sc2 = np.sin(np.arange(len(oni))/12*2*np.pi)\n\n # network metrics\n network_ssh = reader.read_statistic('network_metrics', variable='zos', dataset='ORAS4', processed=\"anom\")\n c2_ssh = network_ssh['fraction_clusters_size_2']\n H_ssh = network_ssh['corrected_hamming_distance']\n\n #wind stress\n taux = reader.read_netcdf('taux', dataset='NCEP', processed='anom')\n\n taux_WP = taux.loc[dict(lat=slice(2.5,-2.5), lon=slice(120, 160))]\n taux_WP_mean = taux_WP.mean(dim='lat').mean(dim='lon')\n\n # decadel variation of leading eof\n pca_dec = reader.read_statistic('pca', variable='dec_sst', dataset='ERSSTv5', processed='anom')['pca1']\n\n # time lag\n time_lag = 12\n\n # shift such that lead time corresponds to the definition of lead time\n shift = 3\n\n # process features\n feature_unscaled = np.stack((oni, sc1, sc2, wwv, iod,\n taux_WP_mean,\n c2_ssh, H_ssh,\n pca_dec), axis=1)\n\n # scale each feature\n scalerX = StandardScaler()\n Xorg = scalerX.fit_transform(feature_unscaled)\n\n # set nans to 0.\n Xorg = np.nan_to_num(Xorg)\n\n # arange the feature array\n X = Xorg[:-lead_time-shift,:]\n X = include_time_lag(X, max_lag=time_lag)\n\n # arange label\n yorg = oni.values\n y = yorg[lead_time + time_lag + shift:]\n\n # get the time axis of the label\n timey = oni.index[lead_time + time_lag + shift:]\n\n if return_persistance:\n y_persistance = yorg[time_lag: - lead_time - shift]\n return X, y, timey, y_persistance\n else:\n return X, y, timey\n\n\ndef pipeline_small(lead_time, return_persistance=False):\n \"\"\"\n Data pipeline for the processing of the data before the Deep Ensemble\n is trained.\n\n :type lead_time: int\n :param lead_time: The lead time in month.\n\n :type return_persistance: boolean\n :param return_persistance: Return as the persistance as well.\n\n :returns: The feature \"X\" (at observation time), the label \"y\" (at lead\n time), the target season \"timey\" (least month) and if selected the\n label at observation time \"y_persistance\". Hence, the output comes as:\n X, y, timey, y_persistance.\n \"\"\"\n reader = data_reader(startdate='1960-01', enddate='2017-12')\n\n # indeces\n oni = reader.read_csv('oni')\n\n iod = reader.read_csv('iod')\n wwv = reader.read_csv('wwv_proxy')\n\n # network metrics\n network_ssh = reader.read_statistic('network_metrics', variable='zos', dataset='ORAS4', processed=\"anom\")\n c2_ssh = network_ssh['fraction_clusters_size_2']\n H_ssh = network_ssh['corrected_hamming_distance']\n\n #wind stress\n taux = reader.read_netcdf('taux', dataset='NCEP', processed='anom')\n\n taux_WP = taux.loc[dict(lat=slice(2.5,-2.5), lon=slice(120, 160))]\n taux_WP_mean = taux_WP.mean(dim='lat').mean(dim='lon')\n\n # decadel variation of leading eof\n pca_dec = reader.read_statistic('pca', variable='dec_sst', dataset='ERSSTv5', processed='anom')['pca1']\n\n # time lag\n time_lag = 2\n\n # shift such that lead time corresponds to the definition of lead time\n shift = 3\n\n # process features\n feature_unscaled = np.stack((oni,\n oni.index.month,\n wwv,\n #iod,\n #taux_WP_mean,\n #c2_ssh,\n H_ssh,\n pca_dec\n ), axis=1)\n\n # scale each feature\n scalerX = StandardScaler()\n Xorg = scalerX.fit_transform(feature_unscaled)\n\n # set nans to 0.\n Xorg = np.nan_to_num(Xorg)\n\n # arange the feature array\n X = Xorg[:-lead_time-shift,:]\n X = include_time_lag(X, max_lag=time_lag)\n\n # arange label\n yorg = oni.values\n y = yorg[lead_time + time_lag + shift:]\n\n # get the time axis of the label\n timey = oni.index[lead_time + time_lag + shift:]\n\n if return_persistance:\n y_persistance = yorg[time_lag: - lead_time - shift]\n return X, y, timey, y_persistance\n else:\n return X, y, timey\n\nif __name__==\"__main__\":\n\n cross_training(ipnn, pipeline_small, 10,\n layers=1, neurons = 32, dropout=0.2, noise_in=0.0, noise_out=0.,\n l1_hidden=[0.002, 0.15, 'log'], l2_hidden=0.,\n l1_out=0., l2_out=0., batch_size=100, lr = 0.01,\n epochs=5000, n_segments=5, n_members_segment=1, patience=25,\n verbose=0, name=\"ipnn_new\")" ]
[ [ "numpy.invert", "matplotlib.pyplot.close" ], [ "numpy.arange", "numpy.round", "numpy.delete", "numpy.mean", "numpy.zeros" ], [ "sklearn.preprocessing.StandardScaler", "numpy.nan_to_num", "numpy.stack" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
beasteers/pescador
[ "dff2c75e5cbfaa5b03c7fd94ccfc546658bed600", "dff2c75e5cbfaa5b03c7fd94ccfc546658bed600" ]
[ "docs/conf.py", "examples/mux/chain_example.py" ]
[ "# -*- coding: utf-8 -*-\n#\n# pescador documentation build configuration file, created by\n# sphinx-quickstart on Fri Apr 3 10:03:34 2015.\n#\n# This file is execfile()d with the current directory set to its\n# containing dir.\n#\n# Note that not all possible configuration values are present in this\n# autogenerated file.\n#\n# All configuration values have a default; values that are commented out\n# serve to show the default.\n\nimport sys\nimport os\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\nsys.path.insert(0, os.path.abspath('../pescador'))\n\n# -- General configuration ------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n#needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n 'sphinx.ext.autodoc',\n 'sphinx.ext.autosummary',\n 'sphinx.ext.intersphinx',\n 'sphinx.ext.mathjax',\n # 'sphinx.ext.coverage',\n # 'sphinx.ext.viewcode',\n # 'sphinx.ext.doctest',\n 'numpydoc',\n 'sphinx_gallery.gen_gallery',\n]\n\n# Configuration for sphinx-gallery\nsphinx_gallery_conf = {\n # Path to examples scripts\n 'examples_dirs': '../examples',\n # Path to where to save gallery generated examples\n 'gallery_dirs': 'auto_examples',\n 'reference_url': {\n 'sphinx_gallery': None,\n 'numpy': 'http://docs.scipy.org/doc/numpy/',\n 'np': 'http://docs.scipy.org/doc/numpy/',\n 'scipy': 'http://docs.scipy.org/doc/scipy/reference/',\n 'matplotlib': 'http://matplotlib.org/',\n 'sklearn': 'http://scikit-learn.org/stable/',\n 'keras': None,\n 'theano': 'http://deeplearning.net/software/theano/'\n },\n 'default_thumb_file': 'noun_199.png',\n 'backreferences_dir': False,\n}\n\nimport matplotlib as mpl\nmpl.use('Agg')\nimport matplotlib.pyplot as plt\n\nfrom glob import glob\nautosummary_generate = glob('*.rst')\n\n# Include the __init__ doc as well as the class\nautoclass_content = 'class'\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# The suffix of source filenames.\nsource_suffix = '.rst'\n\n# The encoding of source files.\n#source_encoding = 'utf-8-sig'\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# General information about the project.\nproject = u'pescador'\nauthors = u'Pescador development team'\ncopyright = u'2016, {}'.format(authors)\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\nimport six\n\nif six.PY3:\n from unittest.mock import MagicMock as Mock\nelse:\n from mock import Mock\n\nMOCK_MODULES = ['numpy', 'scipy',\n 'joblib.parallel', 'joblib._parallel_backends', 'joblib',\n 'zmq',\n 'json', 'ujson',\n 'multiprocessing']\n\nsys.modules.update((mod_name, Mock()) for mod_name in MOCK_MODULES)\n\nimport imp\npescador_version = imp.load_source('pescador.version', '../pescador/version.py')\n# The short X.Y version.\nversion = pescador_version.short_version\nrelaese = pescador_version.version\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#language = None\n\n# There are two options for replacing |today|: either, you set today to some\n# non-false value, then it is used:\n#today = ''\n# Else, today_fmt is used as the format for a strftime call.\n#today_fmt = '%B %d, %Y'\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\nexclude_patterns = ['_build', '.DS_Store']\n\n# The reST default role (used for this markup: `text`) to use for all\n# documents.\n#default_role = None\n\n# If true, '()' will be appended to :func: etc. cross-reference text.\n#add_function_parentheses = True\n\n# If true, the current module name will be prepended to all description\n# unit titles (such as .. function::).\n#add_module_names = True\n\n# If true, sectionauthor and moduleauthor directives will be shown in the\n# output. They are ignored by default.\n#show_authors = False\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'sphinx'\n\n# A list of ignored prefixes for module index sorting.\n#modindex_common_prefix = []\n\n# If true, keep warnings as \"system message\" paragraphs in the built documents.\n#keep_warnings = False\n\n# -- Numpydoc --\n\nnumpydoc_show_class_members = False\n\nintersphinx_mapping = {'python': ('https://docs.python.org/2', None),\n 'numpy': ('https://docs.scipy.org/doc/numpy/', None),\n 'np': ('https://docs.scipy.org/doc/numpy/', None),\n 'scipy': ('https://docs.scipy.org/doc/scipy/reference/', None)}\n\n\n# -- Options for HTML output ----------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\nhtml_theme = 'default'\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n#html_theme_options = {}\n\n# Add any paths that contain custom themes here, relative to this directory.\n#html_theme_path = []\n\n# The name for this set of Sphinx documents. If None, it defaults to\n# \"<project> v<release> documentation\".\n#html_title = None\n\n# A shorter title for the navigation bar. Default is the same as html_title.\n#html_short_title = None\n\n# The name of an image file (relative to this directory) to place at the top\n# of the sidebar.\n#html_logo = None\n\n# The name of an image file (within the static path) to use as favicon of the\n# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32\n# pixels large.\n#html_favicon = None\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = ['_static']\n\n# Add any extra paths that contain custom files (such as robots.txt or\n# .htaccess) here, relative to this directory. These files are copied\n# directly to the root of the documentation.\n#html_extra_path = []\n\n# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,\n# using the given strftime format.\n#html_last_updated_fmt = '%b %d, %Y'\n\n# If true, SmartyPants will be used to convert quotes and dashes to\n# typographically correct entities.\n#html_use_smartypants = True\n\n# Custom sidebar templates, maps document names to template names.\n#html_sidebars = {}\n\n# Additional templates that should be rendered to pages, maps page names to\n# template names.\n#html_additional_pages = {}\n\n# If false, no module index is generated.\n#html_domain_indices = True\n\n# If false, no index is generated.\n#html_use_index = True\n\n# If true, the index is split into individual pages for each letter.\n#html_split_index = False\n\n# If true, links to the reST sources are added to the pages.\n#html_show_sourcelink = True\n\n# If true, \"Created using Sphinx\" is shown in the HTML footer. Default is True.\n#html_show_sphinx = True\n\n# If true, \"(C) Copyright ...\" is shown in the HTML footer. Default is True.\n#html_show_copyright = True\n\n# If true, an OpenSearch description file will be output, and all pages will\n# contain a <link> tag referring to it. The value of this option must be the\n# base URL from which the finished HTML is served.\n#html_use_opensearch = ''\n\n# This is the file name suffix for HTML files (e.g. \".xhtml\").\n#html_file_suffix = None\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'pescadordoc'\n\n\n# -- Options for LaTeX output ---------------------------------------------\n\nlatex_elements = {\n# The paper size ('letterpaper' or 'a4paper').\n#'papersize': 'letterpaper',\n\n# The font size ('10pt', '11pt' or '12pt').\n#'pointsize': '10pt',\n\n# Additional stuff for the LaTeX preamble.\n#'preamble': '',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title,\n# author, documentclass [howto, manual, or own class]).\nlatex_documents = [\n ('index', 'pescador.tex', u'pescador Documentation',\n authors),\n]\n\n# The name of an image file (relative to this directory) to place at the top of\n# the title page.\n#latex_logo = None\n\n# For \"manual\" documents, if this is true, then toplevel headings are parts,\n# not chapters.\n#latex_use_parts = False\n\n# If true, show page references after internal links.\n#latex_show_pagerefs = False\n\n# If true, show URL addresses after external links.\n#latex_show_urls = False\n\n# Documents to append as an appendix to all manuals.\n#latex_appendices = []\n\n# If false, no module index is generated.\n#latex_domain_indices = True\n\n\n# -- Options for manual page output ---------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [\n ('index', 'pescador', u'pescador Documentation',\n [authors], 1)\n]\n\n# If true, show URL addresses after external links.\n#man_show_urls = False\n\n\n# -- Options for Texinfo output -------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n ('index', 'pescador', u'pescador Documentation',\n authors, 'pescador', 'One line description of project.',\n 'Miscellaneous'),\n]\n\n# Documents to append as an appendix to all manuals.\n#texinfo_appendices = []\n\n# If false, no module index is generated.\n#texinfo_domain_indices = True\n\n# How to display URL addresses: 'footnote', 'no', or 'inline'.\n#texinfo_show_urls = 'footnote'\n\n# If true, do not generate a @detailmenu in the \"Top\" node's menu.\n#texinfo_no_detailmenu = False\n\n", "#! -*- coding: utf-8 -*-\n\"\"\"\n=====================================\nUsing ChainMux for repeatable streams\n=====================================\n\nSome applications call for deterministic, repeatable data streams, rather\nthan stochastic samples.\nA common use case is *validation* in machine learning applications, where\na held-out set of data is used to estimate the quality of a model during\ntraining.\nThe validation score is computed repeatedly as the model changes, and the\nresulting scores are compared to each other to find the best version of\nthe model.\nThe simplest way to ensure that the validation scores are comparable is to\nuse the same sample set each time.\nWith Pescador, this is most easily achieved by using the `ChainMux`.\n\"\"\"\n\n# Imports\nimport numpy as np\nimport pescador\n\n##########################\n# Setup\n##########################\n# We'll assume that the validation data lives in some N files\n# Each file produces M examples, so the total validation set\n# has N*M examples\n\nval_files = ['file1.npz', 'file2.npz']\nN = len(val_files)\n\nM = 10 # or whatever the number of examples per file is\n\n\n############################\n# Data generator\n############################\n# We'll make a simple generator that streams the first m\n# examples from an npz file.\n# The npz file is assumed to store two arrays: X and Y\n# containing inputs and outputs (eg, images and class labels)\n# Once the streamer produces m examples, it exits.\n\[email protected]\ndef data_gen(filename, m):\n\n data = np.load(filename)\n X = data['X']\n Y = data['Y']\n for i in range(m):\n yield dict(X=X[i], y=Y[i])\n\n\n############################\n# Constructing the streamers\n############################\n# First, we'll make a streamer for each validation example.\n#\n\nval_streams = [data_gen(fn, M) for fn in val_files]\n\n\n############################\n# Building the mux\n############################\n# The `ChainMux` can be used to combine data from all val_streams\n# in order.\n# We'll use `cycle` mode here, so that the chain restarts after\n# all of the streamers have been exhausted.\n# This produces an infinite stream of data from a finite sequence\n# that repeats every `N*M` steps.\n# This can be used in `keras`'s `fit_generator` function\n# with `validation_steps=N*M` to ensure that the validation set is\n# constant at each epoch.\n\nval_stream = pescador.ChainMux(val_streams, mode='cycle')\n" ]
[ [ "matplotlib.use" ], [ "numpy.load" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
faizanahemad/facebook-hateful-memes
[ "1f7febf65f5fc4ed4aeb476d5383437f677fbc19", "1f7febf65f5fc4ed4aeb476d5383437f677fbc19" ]
[ "facebook_hateful_memes_detector/models/text_models/Albert.py", "facebook_hateful_memes_detector/models/text_models/BERTClassifier.py" ]
[ "import abc\nfrom typing import List, Tuple, Dict, Set, Union\nimport numpy as np\nimport torch.nn as nn\nimport torch\nimport torch.nn.functional as F\nfrom ..classifiers import CNN1DFeaturizer, GRUFeaturizer, BasicFeaturizer, TransformerFeaturizer\nfrom .Fasttext1DCNN import Fasttext1DCNNModel\nfrom transformers import AutoModelWithLMHead, AutoTokenizer, AutoModel, DistilBertTokenizer, LongformerTokenizer\nfrom transformers import AlbertModel, AlbertTokenizer, AlbertForSequenceClassification, DistilBertModel, LongformerModel\nimport torchvision.models as models\nfrom torchnlp.word_to_vector import CharNGram\nfrom torchnlp.word_to_vector import BPEmb\nfrom ...utils import get_device, GaussianNoise, random_word_mask, load_stored_params, ExpandContract, Transformer, PositionalEncoding, LambdaLayer, get_global, \\\n get_regularization_layers, WordMasking\nfrom ...training import fb_1d_loss_builder\nimport os\nimport random\nimport math\n\n\nclass AlbertClassifer(Fasttext1DCNNModel):\n def __init__(self, classifier_dims, num_classes,\n gaussian_noise, dropout,\n internal_dims, n_layers,\n featurizer,\n n_tokens_in=64, n_tokens_out=16,\n use_as_super=False, **kwargs):\n embedding_dims = 768\n super(AlbertClassifer, self).__init__(classifier_dims, num_classes, embedding_dims, gaussian_noise, dropout,\n internal_dims, n_layers,\n featurizer, final_layer_builder,\n n_tokens_in, n_tokens_out, True, **kwargs)\n self.word_masking_proba = kwargs[\"word_masking_proba\"] if \"word_masking_proba\" in kwargs else 0.0\n\n if not use_as_super:\n model = kwargs[\"model\"] if \"model\" in kwargs else 'albert-base-v2'\n global_dir = get_global(\"models_dir\")\n model = os.path.join(global_dir, model) if model in os.listdir(global_dir) else model\n self.tokenizer = AutoTokenizer.from_pretrained(model)\n self.model = AutoModel.from_pretrained(model)\n print(\"Pick stored Model\", model, \"Model Class = \", type(self.model), \"Tokenizer Class = \", type(self.tokenizer))\n if featurizer == \"cnn\":\n self.featurizer = CNN1DFeaturizer(n_tokens_in, embedding_dims, n_tokens_out,\n classifier_dims, internal_dims, n_layers, gaussian_noise, dropout)\n elif featurizer == \"gru\":\n self.featurizer = GRUFeaturizer(n_tokens_in, embedding_dims, n_tokens_out, classifier_dims,\n internal_dims, n_layers, gaussian_noise, dropout)\n elif featurizer == \"basic\":\n self.featurizer = BasicFeaturizer(n_tokens_in, embedding_dims, n_tokens_out,\n classifier_dims,\n internal_dims, n_layers, gaussian_noise, dropout)\n elif featurizer == \"transformer\":\n self.attention_drop_proba = kwargs[\"attention_drop_proba\"] if \"attention_drop_proba\" in kwargs else 0.0\n n_encoders = kwargs.pop(\"n_encoders\", n_layers)\n n_decoders = kwargs.pop(\"n_decoders\", n_layers)\n self.featurizer = TransformerFeaturizer(n_tokens_in, embedding_dims, n_tokens_out,\n classifier_dims,\n internal_dims, n_encoders, n_decoders,\n gaussian_noise, dropout, self.attention_drop_proba)\n else:\n raise NotImplementedError()\n\n self.final_layer = fb_1d_loss_builder(classifier_dims, n_tokens_out, num_classes, dropout, **kwargs)\n if \"stored_model\" in kwargs:\n load_stored_params(self, kwargs[\"stored_model\"])\n self.word_masking = WordMasking(tokenizer=self.tokenizer, **kwargs)\n self.reg_layers = get_regularization_layers(self)\n\n def tokenise(self, texts: List[str]):\n tokenizer = self.tokenizer\n n_tokens_in = self.n_tokens_in\n texts = self.word_masking(texts)\n converted_texts = tokenizer.batch_encode_plus(texts, add_special_tokens=True, pad_to_max_length=True, max_length=n_tokens_in, truncation=True)\n input_ids, attention_mask = converted_texts[\"input_ids\"], converted_texts[\"attention_mask\"]\n return torch.tensor(input_ids).to(self.device), torch.tensor(attention_mask).to(self.device)\n\n def get_word_vectors(self, texts: List[str]):\n input_ids, attention_mask = self.tokenise(texts)\n outputs = self.model(input_ids, attention_mask=attention_mask)\n last_hidden_states = outputs[0]\n pooled_output = outputs[1]\n return last_hidden_states\n", "import abc\nfrom typing import List, Tuple, Dict, Set, Union\nimport numpy as np\nimport torch.nn as nn\nimport torch\nimport torch.nn.functional as F\nfrom ..classifiers import CNN1DFeaturizer, GRUFeaturizer, BasicFeaturizer, TransformerFeaturizer\nfrom .Fasttext1DCNN import Fasttext1DCNNModel\nfrom transformers import AutoModelWithLMHead, AutoTokenizer, AutoModel, DistilBertTokenizer, LongformerTokenizer\nfrom transformers import AlbertModel, AlbertTokenizer, AlbertForSequenceClassification, DistilBertModel, LongformerModel\nimport torchvision.models as models\nfrom torchnlp.word_to_vector import CharNGram\nfrom torchnlp.word_to_vector import BPEmb\nfrom ...utils import get_device, GaussianNoise, random_word_mask, load_stored_params, ExpandContract, Transformer, PositionalEncoding, LambdaLayer, get_global, \\\n get_regularization_layers, WordMasking\nfrom ...training import fb_1d_loss_builder\nimport os\nimport random\nimport math\n\n\nclass BERTClassifier(nn.Module):\n def __init__(self, classifier_dims, num_classes,\n gaussian_noise, dropout,\n internal_dims, n_layers,\n device,\n n_tokens_in=64, n_tokens_out=16,\n **kwargs):\n embedding_dims = 768\n super(BERTClassifier, self).__init__()\n self.word_masking_proba = kwargs[\"word_masking_proba\"] if \"word_masking_proba\" in kwargs else 0.0\n self.mlm_probability = self.word_masking_proba\n self.n_tokens_in = n_tokens_in\n self.token_cache = kwargs.pop(\"token_cache\", None)\n self.force_masking = False\n\n model = kwargs[\"model\"] if \"model\" in kwargs else 'albert-base-v2'\n global_dir = get_global(\"models_dir\")\n model = os.path.join(global_dir, model) if model in os.listdir(global_dir) else model\n self.tokenizer = AutoTokenizer.from_pretrained(model)\n self.model = AutoModel.from_pretrained(model)\n print(\"Pick stored Model\", model, \"Model Class = \", type(self.model), \"Tokenizer Class = \", type(self.tokenizer))\n\n self.attention_drop_proba = kwargs[\"attention_drop_proba\"] if \"attention_drop_proba\" in kwargs else 0.0\n n_encoders = kwargs.pop(\"n_encoders\", n_layers)\n n_decoders = kwargs.pop(\"n_decoders\", n_layers)\n self.featurizer = TransformerFeaturizer(n_tokens_in, embedding_dims, n_tokens_out,\n classifier_dims,\n internal_dims, n_encoders, n_decoders,\n gaussian_noise, dropout, self.attention_drop_proba)\n\n self.final_layer = fb_1d_loss_builder(classifier_dims, n_tokens_out, num_classes, dropout, **kwargs)\n if \"stored_model\" in kwargs:\n load_stored_params(self, kwargs[\"stored_model\"])\n self.word_masking = WordMasking(tokenizer=self.tokenizer, **kwargs)\n self.device=device\n\n def mask_tokens(self, inputs: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:\n \"\"\"\n Prepare masked tokens inputs/labels for masked language modeling: 80% MASK, 10% random, 10% original.\n \"\"\"\n\n if self.tokenizer.mask_token is None:\n raise ValueError(\n \"This tokenizer does not have a mask token which is necessary for masked language modeling. Remove the --mlm flag if you want to use this tokenizer.\"\n )\n\n labels = inputs.clone()\n # We sample a few tokens in each sequence for masked-LM training (with probability args.mlm_probability defaults to 0.15 in Bert/RoBERTa)\n probability_matrix = torch.full(labels.shape, self.mlm_probability)\n special_tokens_mask = [\n self.tokenizer.get_special_tokens_mask(val, already_has_special_tokens=True) for val in labels.tolist()\n ]\n probability_matrix.masked_fill_(torch.tensor(special_tokens_mask, dtype=torch.bool), value=0.0)\n if self.tokenizer._pad_token is not None:\n padding_mask = labels.eq(self.tokenizer.pad_token_id)\n probability_matrix.masked_fill_(padding_mask, value=0.0)\n masked_indices = torch.bernoulli(probability_matrix).bool()\n labels[~masked_indices] = -100 # We only compute loss on masked tokens\n\n # 80% of the time, we replace masked input tokens with tokenizer.mask_token ([MASK])\n indices_replaced = torch.bernoulli(torch.full(labels.shape, 0.8)).bool() & masked_indices\n inputs[indices_replaced] = self.tokenizer.convert_tokens_to_ids(self.tokenizer.mask_token)\n\n # 10% of the time, we replace masked input tokens with random word\n indices_random = torch.bernoulli(torch.full(labels.shape, 0.5)).bool() & masked_indices & ~indices_replaced\n random_words = torch.randint(len(self.tokenizer), labels.shape, dtype=torch.long)\n inputs[indices_random] = random_words[indices_random]\n\n # The rest of the time (10% of the time) we keep the masked input tokens unchanged\n return inputs, labels\n\n def tokenise(self, ids, texts: List[str]):\n tokenizer = self.tokenizer\n n_tokens_in = self.n_tokens_in\n if self.token_cache is None:\n converted_texts = tokenizer.batch_encode_plus(texts, add_special_tokens=True, pad_to_max_length=True, max_length=n_tokens_in, truncation=True)\n input_ids, attention_mask = converted_texts[\"input_ids\"], converted_texts[\"attention_mask\"]\n else:\n input_ids, attention_mask = zip(*[self.token_cache[id] for id in ids])\n\n input_ids, attention_mask = torch.tensor(input_ids), torch.tensor(attention_mask)\n if self.training or self.force_masking:\n input_ids, _ = self.mask_tokens(input_ids)\n return input_ids.to(self.device), attention_mask.to(self.device)\n\n\n def get_word_vectors(self, input_ids, attention_mask):\n outputs = self.model(input_ids, attention_mask=attention_mask)\n last_hidden_states = outputs[0]\n return last_hidden_states\n\n def forward(self, input_ids, attention_mask, labels=None):\n vectors = self.get_word_vectors(input_ids, attention_mask)\n vectors = self.featurizer(vectors)\n logits, loss = self.final_layer(vectors, labels) if self.final_layer is not None else (None, None)\n logits = torch.softmax(logits, dim=1)\n predicted_labels = logits.max(dim=1).indices\n return logits, predicted_labels, labels, loss\n" ]
[ [ "torch.tensor" ], [ "torch.softmax", "torch.bernoulli", "torch.full", "torch.tensor" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
MuhammadSYahyaS/FUnIE-GAN
[ "8f934c834c94e007b00866186b9ee624dc2b7b69" ]
[ "TF-Keras/test_funieGAN.py" ]
[ "\"\"\"\n# > Script for testing FUnIE-GAN \n# > Notes and Usage:\n# - set data_dir and model paths\n# - python test_funieGAN.py\n\"\"\"\nimport os\nimport time\nimport ntpath\nimport numpy as np\nfrom PIL import Image\nfrom os.path import join, exists\nfrom keras.models import model_from_json\n## local libs\nfrom utils.data_utils import getPaths, read_and_resize, preprocess, deprocess\n\n## for testing arbitrary local data\ndata_dir = \"../data/test/A/\"\nfrom utils.data_utils import get_local_test_data\ntest_paths = getPaths(data_dir)\nprint (\"{0} test images are loaded\".format(len(test_paths)))\n\n## create dir for log and (sampled) validation data\nsamples_dir = \"../data/output/\"\nif not exists(samples_dir): os.makedirs(samples_dir)\n\n## test funie-gan\ncheckpoint_dir = 'models/gen_p/'\nmodel_name_by_epoch = \"model_15320_\" \n## test funie-gan-up\n#checkpoint_dir = 'models/gen_up/'\n#model_name_by_epoch = \"model_35442_\" \n\nmodel_h5 = checkpoint_dir + model_name_by_epoch + \".h5\" \nmodel_json = checkpoint_dir + model_name_by_epoch + \".json\"\n# sanity\nassert (exists(model_h5) and exists(model_json))\n\n# load model\nwith open(model_json, \"r\") as json_file:\n loaded_model_json = json_file.read()\nfunie_gan_generator = model_from_json(loaded_model_json)\n# load weights into new model\nfunie_gan_generator.load_weights(model_h5)\nprint(\"\\nLoaded data and model\")\n\n# testing loop\ntimes = []; s = time.time()\nfor img_path in test_paths:\n # prepare data\n inp_img = read_and_resize(img_path, (256, 256))\n im = preprocess(inp_img)\n im = np.expand_dims(im, axis=0) # (1,256,256,3)\n # generate enhanced image\n s = time.time()\n gen = funie_gan_generator.predict(im)\n gen_img = deprocess(gen)[0]\n tot = time.time()-s\n times.append(tot)\n # save output images\n img_name = ntpath.basename(img_path)\n out_img = np.hstack((inp_img, gen_img)).astype('uint8')\n Image.fromarray(out_img).save(join(samples_dir, img_name))\n\n# some statistics \nnum_test = len(test_paths)\nif (num_test==0):\n print (\"\\nFound no images for test\")\nelse:\n print (\"\\nTotal images: {0}\".format(num_test)) \n # accumulate frame processing times (without bootstrap)\n Ttime, Mtime = np.sum(times[1:]), np.mean(times[1:]) \n print (\"Time taken: {0} sec at {1} fps\".format(Ttime, 1./Mtime))\n print(\"\\nSaved generated images in in {0}\\n\".format(samples_dir))\n\n" ]
[ [ "numpy.hstack", "numpy.expand_dims", "numpy.mean", "numpy.sum" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
iwangyuezhang/naturalcc
[ "9c3329dd8387c8242deb52bf590ebe3ac795f8de", "9c3329dd8387c8242deb52bf590ebe3ac795f8de" ]
[ "ncc/modules/encoders/base/contracode_encoder_lstm.py", "ncc/modules/encoders/retrieval/deepcs_encoder.py" ]
[ "import torch\nimport torch.nn.functional as F\nfrom torch import nn\n\nfrom ncc.data.constants import DEFAULT_MAX_SOURCE_POSITIONS\nfrom ncc.utils import utils\nfrom .lstm_encoder import LSTMEncoder\n\n\nclass CodeEncoderLSTMEncoder(LSTMEncoder):\n def __init__(\n self, dictionary, embed_dim=512, hidden_size=512, num_layers=1,\n dropout_in=0.1, dropout_out=0.1, bidirectional=False,\n left_pad=True, pretrained_embed=None, padding_idx=None,\n max_source_positions=DEFAULT_MAX_SOURCE_POSITIONS, project='hidden',\n ):\n super().__init__(dictionary, embed_dim, hidden_size, num_layers,\n dropout_in, dropout_out, bidirectional,\n left_pad, pretrained_embed, padding_idx,\n max_source_positions)\n\n self.project = project\n if project:\n if project == \"sequence_mean\" or project == \"sequence_mean_nonpad\":\n project_in = 2 * hidden_size\n self.project_layer = nn.Sequential(nn.Linear(project_in, hidden_size), nn.ReLU(),\n nn.Linear(embed_dim, 128)) # 218->hidden_size\n elif project == \"hidden\":\n project_in = num_layers * 2 * hidden_size\n self.project_layer = nn.Sequential(nn.Linear(project_in, hidden_size), nn.ReLU(),\n nn.Linear(embed_dim, 128))\n # elif project == \"hidden_identity\":\n # pass\n else:\n raise ValueError(f\"Unknown value '{project}' for CodeEncoderLSTM project argument\")\n # NOTE: We use the default PyTorch intialization, so no need to reset parameters.\n\n def forward(self, src_tokens, src_lengths, no_project_override=False):\n self.lstm.flatten_parameters()\n if self.left_pad:\n # nn.utils.rnn.pack_padded_sequence requires right-padding;\n # convert left-padding to right-padding\n src_tokens = utils.convert_padding_direction(\n src_tokens,\n self.padding_idx,\n left_to_right=True,\n )\n\n bsz, seqlen = src_tokens.size()\n\n # embed tokens\n x = self.embed_tokens(src_tokens)\n # x = self.pos_encoder(x) # TODO\n\n x = F.dropout(x, p=self.dropout_in, training=self.training)\n\n # B x T x C -> T x B x C\n x = x.transpose(0, 1)\n\n # pack embedded source tokens into a PackedSequence\n packed_x = nn.utils.rnn.pack_padded_sequence(x, src_lengths.data.tolist(), enforce_sorted=False)\n\n # apply LSTM\n if self.bidirectional:\n state_size = 2 * self.num_layers, bsz, self.hidden_size\n else:\n state_size = self.num_layers, bsz, self.hidden_size\n h0 = x.new_zeros(*state_size)\n c0 = x.new_zeros(*state_size)\n packed_outs, (final_hiddens, final_cells) = self.lstm(packed_x, (h0, c0))\n\n # unpack outputs and apply dropout\n x, _ = nn.utils.rnn.pad_packed_sequence(packed_outs, padding_value=self.padding_idx)\n x = F.dropout(x, p=self.dropout_out, training=self.training)\n # assert list(x.size()) == [seqlen, bsz, self.output_units] # TODO\n\n if self.bidirectional:\n def combine_bidir(outs):\n out = outs.view(self.num_layers, 2, bsz, -1).transpose(1, 2).contiguous()\n return out.view(self.num_layers, bsz, -1)\n\n final_hiddens = combine_bidir(final_hiddens)\n final_cells = combine_bidir(final_cells)\n\n encoder_padding_mask = src_tokens.eq(self.padding_idx).t()\n\n if not no_project_override and self.project:\n if self.project == \"sequence_mean\":\n # out is T x B x n_directions*d_model\n rep = x.mean(dim=0) # B x n_directions*d_model\n elif self.project == \"sequence_mean_nonpad\":\n out_ = x.transpose(0, 1) # B x T x n_directions*d_model\n mask = torch.arange(out_.size(1), device=out_.device).unsqueeze(0).unsqueeze(-1).expand_as(\n out_) < src_lengths.unsqueeze(1).unsqueeze(2)\n rep = (out_ * mask.float()).sum(dim=1) # B x n_directions*d_model\n rep = rep / src_lengths.unsqueeze(1).float()\n elif self.project == \"hidden\":\n # h_n is n_layers*n_directions x B x d_model\n rep = torch.flatten(final_hiddens.transpose(0, 1), start_dim=1)\n # elif self.config[\"project\"] == \"hidden_identity\"\n # return torch.flatten(h_n.transpose(0, 1), start_dim=1)\n else:\n raise ValueError\n # return self.project_layer(rep)\n return {\n 'encoder_out': (self.project_layer(rep), final_hiddens, final_cells),\n 'encoder_padding_mask': encoder_padding_mask if encoder_padding_mask.any() else None\n }\n\n # return out\n return {\n 'encoder_out': (x, final_hiddens, final_cells),\n 'encoder_padding_mask': encoder_padding_mask if encoder_padding_mask.any() else None\n }\n\n def max_positions(self):\n \"\"\"Maximum input length supported by the encoder.\"\"\"\n return self.max_source_positions\n", "# -*- coding: utf-8 -*-\n\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.nn.utils.rnn import pack_padded_sequence\n\nfrom ncc.modules.base.layers import (\n Embedding,\n Linear,\n LSTM,\n)\nfrom ncc.modules.encoders.ncc_encoder import NccEncoder\n\n\nclass SeqEncoder(NccEncoder):\n def __init__(self, dictionary, embed_dim,\n hidden_dim, rnn_layers=1, bidirectional=True,\n dropout=0.25):\n super(SeqEncoder, self).__init__(dictionary)\n self.padding_idx = self.dictionary.pad()\n self.embed_dim = embed_dim\n self.hidden_dim = hidden_dim\n self.rnn_layers = rnn_layers\n self.bidirectional = bidirectional\n self.dropout = dropout\n\n self.embed = Embedding(len(dictionary), embed_dim, padding_idx=self.padding_idx)\n self.rnn = LSTM(embed_dim, hidden_dim, batch_first=True, bidirectional=bool(bidirectional))\n\n def forward(self, src_tokens, src_lengths=None, **kwargs):\n bsz = src_tokens.size(0)\n x = self.embed(src_tokens)\n # x = F.dropout(x, p=self.dropout, training=self.training)\n\n if src_lengths is None:\n src_lengths = src_lengths.new([src_lengths.size(0)]).copy_(\n src_tokens.ne(self.padding_idx).sum(-1)\n )\n\n # sort\n sorted_lens, indices = src_lengths.sort(descending=True)\n sorted_x = x.index_select(0, indices)\n sorted_x = pack_padded_sequence(sorted_x, sorted_lens.data.tolist(), batch_first=True)\n\n x, (h, c) = self.rnn(sorted_x)\n\n _, reversed_indices = indices.sort()\n # x, lens = pad_packed_sequence(x, batch_first=True)\n # x = F.dropout(x, p=self.dropout, training=self.training)\n # x = x.index_select(0, reversed_indices)\n h = h.index_select(1, reversed_indices)\n h = h.view(self.rnn_layers, 2 if self.bidirectional else 1, bsz, self.hidden_dim)\n h = h[-1].view(bsz, -1)\n return h\n\n\nclass NBOWEncoder(NccEncoder):\n def __init__(self, dictionary, embed_dim, dropout=0.25):\n super(NBOWEncoder, self).__init__(dictionary)\n self.padding_idx = self.dictionary.pad()\n self.embed_dim = embed_dim\n self.dropout = dropout\n self.embed = Embedding(len(dictionary), embed_dim) # , padding_idx=self.padding_idx)\n # self.init_weights()\n\n def forward(self, src_tokens, src_lengths=None, **kwargs):\n lens = src_tokens.size(1)\n x = self.embed(src_tokens)\n # x = F.dropout(x, p=self.dropout, training=self.training)\n x = F.max_pool1d(x.transpose(1, 2), lens).squeeze(2)\n return x\n\n\nclass DeepCSEncoder(nn.Module):\n def __init__(\n self,\n name_dict=None, apiseq_dict=None, tokens_dict=None,\n embed_dim=128, hidden_dim=128, rnn_layers=1, bidirectional=False, dropout=0.1,\n ):\n super(DeepCSEncoder, self).__init__()\n # func_name encoder\n if name_dict is None:\n self.name_encoder = None\n else:\n self.name_encoder = nn.ModuleList([\n SeqEncoder(name_dict,\n embed_dim=embed_dim,\n hidden_dim=hidden_dim,\n rnn_layers=rnn_layers,\n bidirectional=bidirectional,\n dropout=dropout),\n Linear(2 * hidden_dim, embed_dim),\n ])\n # apiseq encoder\n if apiseq_dict is None:\n self.apiseq_encoder = None\n else:\n self.apiseq_encoder = nn.ModuleList([\n SeqEncoder(name_dict,\n embed_dim=embed_dim,\n hidden_dim=hidden_dim,\n rnn_layers=rnn_layers,\n bidirectional=bidirectional,\n dropout=dropout),\n Linear(2 * hidden_dim, embed_dim),\n ])\n # apiseq encoder\n if tokens_dict is None:\n self.tokens_encoder = None\n else:\n self.tokens_encoder = nn.ModuleList([\n NBOWEncoder(tokens_dict,\n embed_dim=embed_dim,\n dropout=dropout),\n Linear(embed_dim, embed_dim),\n ])\n # fusion layer\n self.fusion = Linear(embed_dim, embed_dim)\n\n def forward(self,\n name=None, name_len=None,\n apiseq=None, apiseq_len=None,\n tokens=None, tokens_len=None,\n ):\n if name is not None and self.name_encoder is not None:\n name_out = self.name_encoder[0](name, name_len)\n name_out = self.name_encoder[1](name_out)\n # name_repr = self.name_encoder(name, name_len)\n else:\n name_out = 0\n\n if apiseq is not None and self.apiseq_encoder is not None:\n apiseq_out = self.apiseq_encoder[0](apiseq, apiseq_len)\n apiseq_out = self.apiseq_encoder[1](apiseq_out)\n # api_repr = self.apiseq_encoder(api, api_len)\n else:\n apiseq_out = 0\n\n if tokens is not None and self.tokens_encoder is not None:\n tokens_out = self.tokens_encoder[0](tokens, tokens_len)\n tokens_out = self.tokens_encoder[1](tokens_out)\n # tokens_repr = self.tokens_encoder(tokens, tokens_len)\n else:\n tokens_out = 0\n\n code_repr = self.fusion(torch.tanh(name_out + apiseq_out + tokens_out))\n return code_repr\n" ]
[ [ "torch.nn.Linear", "torch.nn.utils.rnn.pad_packed_sequence", "torch.nn.ReLU", "torch.nn.functional.dropout" ], [ "torch.tanh" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
AndMu/Market-Wisdom
[ "64889634566172ccfed372bba452f717862ba956" ]
[ "src/PortfolioBasic/Technical/Indicators.py" ]
[ "import abc\nimport logging\n\nimport pandas as pd\nfrom PortfolioBasic.stockstats import StockDataFrame\n\nfrom PortfolioBasic.Definitions import HeaderFactory\nfrom PortfolioBasic.Technical.Analysis import TechnicalPerformance\nlogger = logging.getLogger(__name__)\n\n\nclass Indicator(object):\n __metaclass__ = abc.ABCMeta\n\n @abc.abstractmethod\n def calculate(self, data: pd.DataFrame)-> pd.DataFrame:\n pass\n\n @abc.abstractmethod\n def required_days(self) -> int:\n pass\n\n\nclass CombinedIndicator(Indicator):\n\n def required_days(self) -> int:\n return max(self.indicators, key=lambda x: x.required_days()).required_days()\n\n def __init__(self, indicators: list):\n self.indicators = indicators\n\n def calculate(self, data: pd.DataFrame) -> pd.DataFrame:\n result = pd.DataFrame(index=data.index)\n for indicator in self.indicators:\n indicator_result = indicator.calculate(data)\n result = result.join(indicator_result)\n return result\n\n\nclass MomentumIndicator(Indicator):\n\n def __init__(self, days=5):\n self.days = days\n\n def required_days(self) -> int:\n return self.days + 1\n\n def calculate(self, data: pd.DataFrame) -> pd.DataFrame:\n data = data[HeaderFactory.Price].copy()\n previous = data.shift(self.days)\n data = data / previous - 1\n result = pd.DataFrame(index=data.index, data=data.values, columns=['{}_{}'.format(HeaderFactory.MOM, self.days)])\n return result\n\n\nclass Williams(Indicator):\n def __init__(self):\n self.windows = 10\n\n def required_days(self) -> int:\n return self.windows\n\n def calculate(self, data: pd.DataFrame) -> pd.DataFrame:\n stock = StockDataFrame.retype(data.copy())\n wr_10 = stock['wr_10']\n result = pd.DataFrame(index=wr_10.index, data=wr_10.values, columns=['wr_10'])\n return result\n\n\nclass CommodityChannelIndex(Indicator):\n def __init__(self):\n self.windows = 14\n\n def required_days(self) -> int:\n return self.windows\n\n def calculate(self, data: pd.DataFrame) -> pd.DataFrame:\n stock = StockDataFrame.retype(data.copy())\n value = stock['cci']\n result = pd.DataFrame(index=value.index, data=value.values, columns=['cci'])\n return result\n\n\nclass TripleExponentialMovingAverage(Indicator):\n def __init__(self):\n self.windows = 12\n\n def required_days(self) -> int:\n return self.windows\n\n def calculate(self, data: pd.DataFrame) -> pd.DataFrame:\n stock = StockDataFrame.retype(data.copy())\n value = stock['trix']\n result = pd.DataFrame(index=value.index, data=value.values, columns=['trix'])\n return result\n\n\nclass AverageTrueRange(Indicator):\n def __init__(self):\n self.windows = 12\n\n def required_days(self) -> int:\n return self.windows\n\n def calculate(self, data: pd.DataFrame) -> pd.DataFrame:\n stock = StockDataFrame.retype(data.copy())\n value = stock['atr']\n result = pd.DataFrame(index=value.index, data=value.values, columns=['atr'])\n return result\n\n\nclass AverageDirectionalIndex(Indicator):\n def __init__(self):\n self.windows = 6\n\n def required_days(self) -> int:\n return self.windows\n\n def calculate(self, data: pd.DataFrame) -> pd.DataFrame:\n stock = StockDataFrame.retype(data.copy())\n value = stock['adx']\n result = pd.DataFrame(index=value.index, data=value.values, columns=['adx'])\n return result\n\n\nclass BollingerIndicator(Indicator):\n\n def __init__(self, windows=20):\n self.windows = windows\n\n def required_days(self) -> int:\n return self.windows\n\n def calculate(self, data: pd.DataFrame) -> pd.DataFrame:\n data = data[HeaderFactory.Price].copy()\n rm, rstd = TechnicalPerformance.compute_std(data, self.windows)\n result = (data - rm) / (2 * rstd)\n result = pd.DataFrame(index=result.index, data=result.values, columns=[HeaderFactory.Bollinger])\n return result\n\n\nclass RsiIndicator(Indicator):\n\n def required_days(self) -> int:\n return 15\n\n def calculate(self, data: pd.DataFrame) -> pd.DataFrame:\n data = pd.DataFrame(data[HeaderFactory.Price])\n data = data.rename(index=str, columns={HeaderFactory.Price: \"close\"})\n stock = StockDataFrame.retype(data)\n rsi = stock['rsi_14']\n rsi /= 100\n data = rsi\n result = pd.DataFrame(index=data.index, data=data.values, columns=[HeaderFactory.RSI])\n return result\n\n\nclass MACDIndicator(Indicator):\n\n def __init__(self):\n self.n_fast = 12\n self.n_slow = 26\n self.signal_period = 9\n\n def required_days(self) -> int:\n return 26\n\n def calculate(self, data: pd.DataFrame, normalized=False) -> pd.DataFrame:\n fast = data[HeaderFactory.Price].ewm(adjust=True, min_periods=self.n_slow - 1, span=self.n_fast,\n ignore_na=False).mean()\n EMAfast = pd.Series(fast)\n\n slow = data[HeaderFactory.Price].ewm(adjust=True, min_periods=self.n_slow - 1, span=self.n_slow,\n ignore_na=False).mean()\n EMAslow = pd.Series(slow)\n result = EMAfast - EMAslow\n if normalized:\n result = result / EMAslow\n MACD = pd.Series(result, name=HeaderFactory.MACD)\n signal = MACD.ewm(adjust=True, min_periods=self.signal_period - 1, span=self.signal_period,\n ignore_na=False).mean()\n MACDsign = pd.Series(signal, name=HeaderFactory.MACD_SIGNAL)\n MACDdiff = pd.Series(MACD - MACDsign, name=HeaderFactory.MACD_DIFF)\n data = pd.DataFrame(MACD).join(MACDsign)\n data = data.join(MACDdiff)\n return data\n\n" ]
[ [ "pandas.Series", "pandas.DataFrame" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] } ]
PJunhyuk/2021AICompetition-03
[ "dbeea7dec3f009f1f1485984dcdfa54eb6b4f75e" ]
[ "train.py" ]
[ "import argparse\nimport logging\nimport math\nimport os\nimport random\nimport time\nfrom copy import deepcopy\nfrom pathlib import Path\nfrom threading import Thread\n\nimport numpy as np\nimport torch.distributed as dist\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nimport torch.optim.lr_scheduler as lr_scheduler\nimport torch.utils.data\nimport yaml\nfrom torch.cuda import amp\nfrom torch.nn.parallel import DistributedDataParallel as DDP\nfrom torch.utils.tensorboard import SummaryWriter\nfrom tqdm import tqdm\n\nimport torch\nimport numpy as np\nimport random\n\nimport predict # import predict.py to get mAP after each epoch\nfrom models.experimental import attempt_load\nfrom models.yolo import Model\nfrom utils.autoanchor import check_anchors\nfrom utils.datasets import create_dataloader\nfrom utils.general import labels_to_class_weights, increment_path, labels_to_image_weights, init_seeds, \\\n fitness, strip_optimizer, get_latest_run, check_dataset, check_file, check_git_status, check_img_size, \\\n check_requirements, print_mutation, set_logging, one_cycle, colorstr\nfrom utils.google_utils import attempt_download\nfrom utils.loss import ComputeLoss\nfrom utils.plots import plot_images, plot_labels, plot_results, plot_evolution\nfrom utils.torch_utils import ModelEMA, select_device, intersect_dicts, torch_distributed_zero_first, de_parallel\nfrom utils.wandb_logging.wandb_utils import WandbLogger, check_wandb_resume\n\nimport json\nfrom PIL import Image\n\nimport os\nimport shutil\n\nfrom os import path\nimport sys\nsys.path.append(path.dirname( path.dirname( path.abspath(__file__) ) ))\nfrom utils.general import xyxy2xywh\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef train(hyp, opt, device, tb_writer=None):\n logger.info(colorstr('hyperparameters: ') + ', '.join(f'{k}={v}' for k, v in hyp.items()))\n save_dir, epochs, batch_size, total_batch_size, weights, rank = \\\n Path(opt.save_dir), opt.epochs, opt.batch_size, opt.total_batch_size, opt.weights, opt.global_rank\n\n # Directories\n wdir = save_dir / 'weights'\n wdir.mkdir(parents=True, exist_ok=True) # make dir\n last = wdir / 'last.pt'\n best = wdir / 'best.pt'\n results_file = save_dir / 'results.txt'\n\n # Save run settings\n with open(save_dir / 'hyp.yaml', 'w') as f:\n yaml.safe_dump(hyp, f, sort_keys=False)\n with open(save_dir / 'opt.yaml', 'w') as f:\n yaml.safe_dump(vars(opt), f, sort_keys=False)\n\n # Configure\n# plots = not opt.evolve # create plots\n plots = True # create plots\n cuda = device.type != 'cpu'\n init_seeds(1 + rank)\n with open(opt.data) as f:\n data_dict = yaml.safe_load(f) # data dict\n\n # Logging- Doing this before checking the dataset. Might update data_dict\n loggers = {'wandb': None} # loggers dict\n if rank in [-1, 0]:\n opt.hyp = hyp # add hyperparameters\n run_id = torch.load(weights).get('wandb_id') if weights.endswith('.pt') and os.path.isfile(weights) else None\n wandb_logger = WandbLogger(opt, save_dir.stem, run_id, data_dict)\n loggers['wandb'] = wandb_logger.wandb\n data_dict = wandb_logger.data_dict\n if wandb_logger.wandb:\n weights, epochs, hyp = opt.weights, opt.epochs, opt.hyp # WandbLogger might update weights, epochs if resuming\n\n nc = 1 if opt.single_cls else int(data_dict['nc']) # number of classes\n names = ['item'] if opt.single_cls and len(data_dict['names']) != 1 else data_dict['names'] # class names\n assert len(names) == nc, '%g names found for nc=%g dataset in %s' % (len(names), nc, opt.data) # check\n is_coco = opt.data.endswith('coco.yaml') and nc == 80 # COCO dataset\n\n # Model\n pretrained = weights.endswith('.pt')\n if pretrained:\n # with torch_distributed_zero_first(rank):\n # weights = attempt_download(weights) # download if not found locally\n ckpt = torch.load(weights, map_location=device) # load checkpoint\n model = Model(opt.cfg or ckpt['model'].yaml, ch=3, nc=nc, anchors=hyp.get('anchors')).to(device) # create\n exclude = ['anchor'] if (opt.cfg or hyp.get('anchors')) and not opt.resume else [] # exclude keys\n state_dict = ckpt['model'].float().state_dict() # to FP32\n state_dict = intersect_dicts(state_dict, model.state_dict(), exclude=exclude) # intersect\n model.load_state_dict(state_dict, strict=False) # load\n logger.info('Transferred %g/%g items from %s' % (len(state_dict), len(model.state_dict()), weights)) # report\n else:\n model = Model(opt.cfg, ch=3, nc=nc, anchors=hyp.get('anchors')).to(device) # create\n with torch_distributed_zero_first(rank):\n check_dataset(data_dict) # check\n train_path = data_dict['train']\n test_path = data_dict['val']\n\n # Freeze\n freeze = ['1', '2', '3', '4', '5', '6' '7', '8', '9', '10', '11'] # parameter names to freeze (full or partial)\n freeze = ['model.' + number + '.' for number in freeze]\n for k, v in model.named_parameters():\n v.requires_grad = True # train all layers\n if any(x in k for x in freeze) and opt.fine_tune is True:\n print('freezing %s' % k)\n v.requires_grad = False\n\n # Optimizer\n nbs = 64 # nominal batch size\n accumulate = max(round(nbs / total_batch_size), 1) # accumulate loss before optimizing\n hyp['weight_decay'] *= total_batch_size * accumulate / nbs # scale weight_decay\n logger.info(f\"Scaled weight_decay = {hyp['weight_decay']}\")\n\n pg0, pg1, pg2 = [], [], [] # optimizer parameter groups\n for k, v in model.named_modules():\n if hasattr(v, 'bias') and isinstance(v.bias, nn.Parameter):\n pg2.append(v.bias) # biases\n if isinstance(v, nn.BatchNorm2d):\n pg0.append(v.weight) # no decay\n elif hasattr(v, 'weight') and isinstance(v.weight, nn.Parameter):\n pg1.append(v.weight) # apply decay\n\n if opt.adam:\n optimizer = optim.Adam(pg0, lr=hyp['lr0'], betas=(hyp['momentum'], 0.999)) # adjust beta1 to momentum\n else:\n optimizer = optim.SGD(pg0, lr=hyp['lr0'], momentum=hyp['momentum'], nesterov=True)\n\n optimizer.add_param_group({'params': pg1, 'weight_decay': hyp['weight_decay']}) # add pg1 with weight_decay\n optimizer.add_param_group({'params': pg2}) # add pg2 (biases)\n logger.info('Optimizer groups: %g .bias, %g conv.weight, %g other' % (len(pg2), len(pg1), len(pg0)))\n del pg0, pg1, pg2\n\n # Scheduler https://arxiv.org/pdf/1812.01187.pdf\n # https://pytorch.org/docs/stable/_modules/torch/optim/lr_scheduler.html#OneCycleLR\n if opt.linear_lr:\n lf = lambda x: (1 - x / (epochs - 1)) * (1.0 - hyp['lrf']) + hyp['lrf'] # linear\n else:\n lf = one_cycle(1, hyp['lrf'], epochs) # cosine 1->hyp['lrf']\n scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf)\n # plot_lr_scheduler(optimizer, scheduler, epochs)\n\n # EMA\n ema = ModelEMA(model) if rank in [-1, 0] else None\n\n # Resume\n start_epoch, best_fitness = 0, 0.0\n if pretrained:\n # Optimizer\n if ckpt['optimizer'] is not None:\n optimizer.load_state_dict(ckpt['optimizer'])\n best_fitness = ckpt['best_fitness']\n\n # EMA\n if ema and ckpt.get('ema'):\n ema.ema.load_state_dict(ckpt['ema'].float().state_dict())\n ema.updates = ckpt['updates']\n\n # Results\n if ckpt.get('training_results') is not None:\n results_file.write_text(ckpt['training_results']) # write results.txt\n\n # Epochs\n start_epoch = ckpt['epoch'] + 1\n if opt.resume:\n assert start_epoch > 0, '%s training to %g epochs is finished, nothing to resume.' % (weights, epochs)\n if epochs < start_epoch:\n logger.info('%s has been trained for %g epochs. Fine-tuning for %g additional epochs.' %\n (weights, ckpt['epoch'], epochs))\n epochs += ckpt['epoch'] # finetune additional epochs\n\n del ckpt, state_dict\n\n # Image sizes\n gs = max(int(model.stride.max()), 32) # grid size (max stride)\n nl = model.model[-1].nl # number of detection layers (used for scaling hyp['obj'])\n imgsz, imgsz_test = [check_img_size(x, gs) for x in opt.img_size] # verify imgsz are gs-multiples\n\n # DP mode\n if cuda and rank == -1 and torch.cuda.device_count() > 1:\n model = torch.nn.DataParallel(model)\n\n # SyncBatchNorm\n if opt.sync_bn and cuda and rank != -1:\n model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model).to(device)\n logger.info('Using SyncBatchNorm()')\n\n # Trainloader\n dataloader, dataset = create_dataloader(train_path, imgsz, batch_size, gs, opt,\n hyp=hyp, augment=True, cache=opt.cache_images, rect=opt.rect, rank=rank,\n world_size=opt.world_size, workers=opt.workers,\n image_weights=opt.image_weights, quad=opt.quad, prefix=colorstr('train: '), task='train', epoch_parts=opt.epoch_parts)\n mlc = np.concatenate(dataset.labels, 0)[:, 0].max() # max label class\n nb = len(dataloader) # number of batches\n assert mlc < nc, 'Label class %g exceeds nc=%g in %s. Possible class labels are 0-%g' % (mlc, nc, opt.data, nc - 1)\n\n # Process 0\n if rank in [-1, 0]:\n testloader = create_dataloader(test_path, imgsz_test, batch_size * 2, gs, opt, # testloader\n hyp=hyp, cache=opt.cache_images and not opt.notest, rect=True, rank=-1,\n world_size=opt.world_size, workers=opt.workers,\n pad=0.5, prefix=colorstr('val: '))[0]\n\n if not opt.resume:\n labels = np.concatenate(dataset.labels, 0)\n c = torch.tensor(labels[:, 0]) # classes\n # cf = torch.bincount(c.long(), minlength=nc) + 1. # frequency\n # model._initialize_biases(cf.to(device))\n if plots:\n plot_labels(labels, names, save_dir, loggers)\n if tb_writer:\n tb_writer.add_histogram('classes', c, 0)\n\n # Anchors\n if not opt.noautoanchor:\n check_anchors(dataset, model=model, thr=hyp['anchor_t'], imgsz=imgsz)\n model.half().float() # pre-reduce anchor precision\n\n # DDP mode\n if cuda and rank != -1:\n model = DDP(model, device_ids=[opt.local_rank], output_device=opt.local_rank,\n # nn.MultiheadAttention incompatibility with DDP https://github.com/pytorch/pytorch/issues/26698\n find_unused_parameters=any(isinstance(layer, nn.MultiheadAttention) for layer in model.modules()))\n\n # Model parameters\n hyp['box'] *= 3. / nl # scale to layers\n hyp['cls'] *= nc / 80. * 3. / nl # scale to classes and layers\n hyp['obj'] *= (imgsz / 640) ** 2 * 3. / nl # scale to image size and layers\n hyp['label_smoothing'] = opt.label_smoothing\n model.nc = nc # attach number of classes to model\n model.hyp = hyp # attach hyperparameters to model\n model.gr = 1.0 # iou loss ratio (obj_loss = 1.0 or iou)\n model.class_weights = labels_to_class_weights(dataset.labels, nc).to(device) * nc # attach class weights\n model.names = names\n\n # Start training\n t0 = time.time()\n nw = max(round(hyp['warmup_epochs'] * nb), 1000) # number of warmup iterations, max(3 epochs, 1k iterations)\n # nw = min(nw, (epochs - start_epoch) / 2 * nb) # limit warmup to < 1/2 of training\n maps = np.zeros(nc) # mAP per class\n results = (0, 0, 0, 0, 0, 0, 0) # P, R, [email protected], [email protected], val_loss(box, obj, cls)\n scheduler.last_epoch = start_epoch - 1 # do not move\n scaler = amp.GradScaler(enabled=cuda)\n compute_loss = ComputeLoss(model) # init loss class\n logger.info(f'Image sizes {imgsz} train, {imgsz_test} test\\n'\n f'Using {dataloader.num_workers} dataloader workers\\n'\n f'Logging results to {save_dir}\\n'\n f'Starting training for {epochs} epochs...')\n for epoch in range(start_epoch, epochs): # epoch ------------------------------------------------------------------\n model.train()\n\n # Update image weights (optional)\n if opt.image_weights:\n # Generate indices\n if rank in [-1, 0]:\n cw = model.class_weights.cpu().numpy() * (1 - maps) ** 2 / nc # class weights\n iw = labels_to_image_weights(dataset.labels, nc=nc, class_weights=cw) # image weights\n dataset.indices = random.choices(range(dataset.n), weights=iw, k=dataset.n) # rand weighted idx\n # Broadcast if DDP\n if rank != -1:\n indices = (torch.tensor(dataset.indices) if rank == 0 else torch.zeros(dataset.n)).int()\n dist.broadcast(indices, 0)\n if rank != 0:\n dataset.indices = indices.cpu().numpy()\n\n # Update mosaic border\n # b = int(random.uniform(0.25 * imgsz, 0.75 * imgsz + gs) // gs * gs)\n # dataset.mosaic_border = [b - imgsz, -b] # height, width borders\n\n mloss = torch.zeros(4, device=device) # mean losses\n if rank != -1:\n dataloader.sampler.set_epoch(epoch)\n pbar = enumerate(dataloader)\n logger.info(('\\n' + '%10s' * 8) % ('Epoch', 'gpu_mem', 'box', 'obj', 'cls', 'total', 'labels', 'img_size'))\n if rank in [-1, 0]:\n pbar = tqdm(pbar, total=nb) # progress bar\n optimizer.zero_grad()\n for i, (imgs, targets, paths, _) in pbar: # batch -------------------------------------------------------------\n ni = i + nb * epoch # number integrated batches (since train start)\n imgs = imgs.to(device, non_blocking=True).float() / 255.0 # uint8 to float32, 0-255 to 0.0-1.0\n\n # Warmup\n if ni <= nw:\n xi = [0, nw] # x interp\n # model.gr = np.interp(ni, xi, [0.0, 1.0]) # iou loss ratio (obj_loss = 1.0 or iou)\n accumulate = max(1, np.interp(ni, xi, [1, nbs / total_batch_size]).round())\n for j, x in enumerate(optimizer.param_groups):\n # bias lr falls from 0.1 to lr0, all other lrs rise from 0.0 to lr0\n x['lr'] = np.interp(ni, xi, [hyp['warmup_bias_lr'] if j == 2 else 0.0, x['initial_lr'] * lf(epoch)])\n if 'momentum' in x:\n x['momentum'] = np.interp(ni, xi, [hyp['warmup_momentum'], hyp['momentum']])\n\n # Multi-scale\n if opt.multi_scale:\n sz = random.randrange(imgsz * 0.5, imgsz * 1.5 + gs) // gs * gs # size\n sf = sz / max(imgs.shape[2:]) # scale factor\n if sf != 1:\n ns = [math.ceil(x * sf / gs) * gs for x in imgs.shape[2:]] # new shape (stretched to gs-multiple)\n imgs = F.interpolate(imgs, size=ns, mode='bilinear', align_corners=False)\n\n # Forward\n with amp.autocast(enabled=cuda):\n pred = model(imgs) # forward\n loss, loss_items = compute_loss(pred, targets.to(device)) # loss scaled by batch_size\n if rank != -1:\n loss *= opt.world_size # gradient averaged between devices in DDP mode\n if opt.quad:\n loss *= 4.\n\n # Backward\n scaler.scale(loss).backward()\n\n # Optimize\n if ni % accumulate == 0:\n scaler.step(optimizer) # optimizer.step\n scaler.update()\n optimizer.zero_grad()\n if ema:\n ema.update(model)\n\n # Print\n if rank in [-1, 0]:\n mloss = (mloss * i + loss_items) / (i + 1) # update mean losses\n mem = '%.3gG' % (torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0) # (GB)\n s = ('%10s' * 2 + '%10.6g' * 6) % (\n '%g/%g' % (epoch, epochs - 1), mem, *mloss, targets.shape[0], imgs.shape[-1])\n pbar.set_description(s)\n\n # Plot\n if plots and ni < 3:\n f = save_dir / f'train_batch{ni}.jpg' # filename\n Thread(target=plot_images, args=(imgs, targets, paths, f), daemon=True).start()\n if tb_writer:\n tb_writer.add_graph(torch.jit.trace(de_parallel(model), imgs, strict=False), []) # model graph\n # tb_writer.add_image(f, result, dataformats='HWC', global_step=epoch)\n elif plots and ni == 10 and wandb_logger.wandb:\n wandb_logger.log({\"Mosaics\": [wandb_logger.wandb.Image(str(x), caption=x.name) for x in\n save_dir.glob('train*.jpg') if x.exists()]})\n\n # end batch ------------------------------------------------------------------------------------------------\n # end epoch ----------------------------------------------------------------------------------------------------\n\n # Scheduler\n lr = [x['lr'] for x in optimizer.param_groups] # for tensorboard\n scheduler.step()\n\n # DDP process 0 or single-GPU\n if rank in [-1, 0]:\n # mAP\n ema.update_attr(model, include=['yaml', 'nc', 'hyp', 'gr', 'names', 'stride', 'class_weights'])\n final_epoch = epoch + 1 == epochs\n\n if (epoch+1) % opt.save_period != 0:\n wandb_logger.current_epoch = epoch + 1\n # Log\n tags = ['train/box_loss', 'train/obj_loss', 'train/cls_loss', # train loss\n 'x/lr0', 'x/lr1', 'x/lr2'] # params\n for x, tag in zip(list(mloss[:-1]) + lr, tags):\n if tb_writer:\n tb_writer.add_scalar(tag, x, epoch) # tensorboard\n if wandb_logger.wandb:\n wandb_logger.log({tag: x}) # W&B\n wandb_logger.end_epoch()\n\n # Write\n with open(results_file, 'a') as f:\n f.write(s + '\\n') # append metrics, val_loss\n\n else:\n if not opt.notest or final_epoch: # Calculate mAP\n wandb_logger.current_epoch = epoch + 1\n results, maps, times = predict.test(data_dict,\n batch_size=batch_size * 2,\n imgsz=imgsz_test,\n model=ema.ema,\n single_cls=opt.single_cls,\n dataloader=testloader,\n save_dir=save_dir,\n save_json=is_coco and final_epoch,\n verbose=nc < 50,\n plots=plots and final_epoch,\n wandb_logger=wandb_logger,\n compute_loss=compute_loss,\n is_coco=is_coco)\n # Write\n with open(results_file, 'a') as f:\n f.write(s + '%10.4g' * 8 % results + '\\n') # append metrics, val_loss\n\n # Log\n tags = ['train/box_loss', 'train/obj_loss', 'train/cls_loss', # train loss\n 'metrics/precision', 'metrics/recall', 'metrics/mAP_0.5', 'metrics/mAP_0.75', 'metrics/mAP_0.5:0.95',\n 'val/box_loss', 'val/obj_loss', 'val/cls_loss', # val loss\n 'x/lr0', 'x/lr1', 'x/lr2'] # params\n for x, tag in zip(list(mloss[:-1]) + list(results) + lr, tags):\n if tb_writer:\n tb_writer.add_scalar(tag, x, epoch) # tensorboard\n if wandb_logger.wandb:\n wandb_logger.log({tag: x}) # W&B\n\n # Update best mAP\n fi = fitness(np.array(results).reshape(1, -1)) # weighted combination of [P, R, [email protected], [email protected], [email protected]]\n if fi > best_fitness:\n best_fitness = fi\n wandb_logger.end_epoch(best_result=best_fitness == fi)\n\n # Save model\n if (not opt.nosave) or (final_epoch and not opt.evolve): # if save\n ckpt = {'epoch': epoch,\n 'best_fitness': best_fitness,\n 'training_results': results_file.read_text(),\n 'model': deepcopy(de_parallel(model)).half(),\n 'ema': deepcopy(ema.ema).half(),\n 'updates': ema.updates,\n 'optimizer': optimizer.state_dict(),\n 'wandb_id': wandb_logger.wandb_run.id if wandb_logger.wandb else None}\n\n # Save last, best and delete\n torch.save(ckpt, last)\n if best_fitness == fi:\n torch.save(ckpt, best)\n if wandb_logger.wandb:\n if ((epoch + 1) % opt.save_period == 0 and not final_epoch) and opt.save_period != -1:\n wandb_logger.log_model(\n last.parent, opt, epoch, fi, best_model=best_fitness == fi)\n del ckpt\n\n # end epoch ----------------------------------------------------------------------------------------------------\n # end training\n if rank in [-1, 0]:\n logger.info(f'{epoch - start_epoch + 1} epochs completed in {(time.time() - t0) / 3600:.3f} hours.\\n')\n if plots:\n plot_results(save_dir=save_dir) # save as results.png\n if wandb_logger.wandb:\n files = ['results.png', 'confusion_matrix.png', *[f'{x}_curve.png' for x in ('F1', 'PR', 'P', 'R')]]\n wandb_logger.log({\"Results\": [wandb_logger.wandb.Image(str(save_dir / f), caption=f) for f in files\n if (save_dir / f).exists()]})\n\n if not opt.evolve:\n if is_coco: # COCO dataset\n for m in [last, best] if best.exists() else [last]: # speed, mAP tests\n results, _, _ = predict.test(opt.data,\n batch_size=batch_size * 2,\n imgsz=imgsz_test,\n conf_thres=0.001,\n iou_thres=0.7,\n model=attempt_load(m, device).half(),\n single_cls=opt.single_cls,\n dataloader=testloader,\n save_dir=save_dir,\n save_json=True,\n plots=False,\n is_coco=is_coco)\n\n # Strip optimizers\n for f in last, best:\n if f.exists():\n strip_optimizer(f) # strip optimizers\n if wandb_logger.wandb: # Log the stripped model\n wandb_logger.wandb.log_artifact(str(best if best.exists() else last), type='model',\n name='run_' + wandb_logger.wandb_run.id + '_model',\n aliases=['latest', 'best', 'stripped'])\n wandb_logger.finish_run()\n else:\n dist.destroy_process_group()\n torch.cuda.empty_cache()\n return results\n\n\ndef data_prepare():\n\n random.seed(100)\n\n names = ['eye_opened', 'eye_closed', 'mouth_opened', 'mouth_closed', 'face', 'phone', 'cigar']\n\n path_train_dir = '/DATA/Final_DATA/task03_train'\n new_dir = '../drowsy_face'\n\n # generate raw_train.json, raw_val.json\n generate_raw_json = True\n if generate_raw_json == True:\n print('generate raw_train.json, raw_val.json')\n\n if os.path.exists(new_dir):\n shutil.rmtree(new_dir)\n os.makedirs(new_dir + '/images/train')\n os.makedirs(new_dir + '/images/val')\n os.makedirs(new_dir + '/labels/train')\n os.makedirs(new_dir + '/labels/val')\n\n with open(path_train_dir + '/labels.json') as f:\n json_data = json.load(f)\n json_anno = json_data[\"annotations\"]\n\n num_data = len(json_anno) # 273224\n\n val_idx = random.sample(list(range(num_data)), 20000)\n\n json_anno_val = []\n json_anno_train = []\n\n for idx, json_img in enumerate(tqdm(json_anno)):\n if idx in val_idx:\n json_anno_val.append(json_img)\n else:\n json_anno_train.append(json_img)\n\n json_data_val = {}\n json_data_val['annotations'] = json_anno_val\n json_data_train = {}\n json_data_train['annotations'] = json_anno_train\n\n if os.path.isfile(new_dir + '/raw_val.json'):\n os.remove(new_dir + '/raw_val.json')\n if os.path.isfile(new_dir + '/raw_train.json'):\n os.remove(new_dir + '/raw_train.json')\n\n with open(new_dir + '/raw_val.json', 'w') as f_val:\n json.dump(json_data_val, f_val)\n with open(new_dir + '/raw_train.json', 'w') as f_train:\n json.dump(json_data_train, f_train)\n\n\n # generate drowsy_face/train, drowsy_face/val\n generate_drowsy_face = True\n if generate_drowsy_face == True:\n print('generate drowsy_face/train, drowsy_face/val')\n\n with open(new_dir + '/raw_val.json') as f:\n json_data = json.load(f)\n json_anno = json_data[\"annotations\"]\n\n for json_img in tqdm(json_anno):\n img_id = json_img['file_name']\n txt_dir = new_dir + '/labels/val/' + img_id.split('.')[0] + '.txt'\n img_dir = new_dir + '/images/val/' + img_id\n\n f_txt = open(txt_dir, 'w')\n img_ = Image.open(path_train_dir + '/images/' + img_id)\n img_size = img_.size\n objects_yolo = ''\n for img_obj in json_img['objects']:\n class_id = str(names.index(img_obj['class']))\n img_pos = img_obj['position']\n\n xywh = xyxy2xywh(np.array([[img_pos[0]/img_size[0], img_pos[1]/img_size[1], img_pos[2]/img_size[0], img_pos[3]/img_size[1]]]))[0]\n f_txt.write(f\"{class_id} {xywh[0]:.5f} {xywh[1]:.5f} {xywh[2]:.5f} {xywh[3]:.5f}\\n\") # write label\n\n f_txt.close()\n\n shutil.copy(path_train_dir + '/images/' + img_id, img_dir)\n\n with open(new_dir + '/raw_train.json') as f:\n json_data = json.load(f)\n json_anno = json_data[\"annotations\"]\n\n for json_img in tqdm(json_anno):\n img_id = json_img['file_name']\n txt_dir = new_dir + '/labels/train/' + img_id.split('.')[0] + '.txt'\n img_dir = new_dir + '/images/train/' + img_id\n\n f_txt = open(txt_dir, 'w')\n img_ = Image.open(path_train_dir + '/images/' + img_id)\n img_size = img_.size\n objects_yolo = ''\n for img_obj in json_img['objects']:\n class_id = str(names.index(img_obj['class']))\n img_pos = img_obj['position']\n\n xywh = xyxy2xywh(np.array([[img_pos[0]/img_size[0], img_pos[1]/img_size[1], img_pos[2]/img_size[0], img_pos[3]/img_size[1]]]))[0]\n f_txt.write(f\"{class_id} {xywh[0]:.5f} {xywh[1]:.5f} {xywh[2]:.5f} {xywh[3]:.5f}\\n\") # write label\n\n f_txt.close()\n\n shutil.copy(path_train_dir + '/images/' + img_id, img_dir)\n\n\n # generate diet_train.json\n generate_diet_json = True\n if generate_diet_json == True:\n print('generate diet_train.json')\n\n json_anno_diet = []\n\n with open(path_train_dir + '/labels.json') as f:\n json_data = json.load(f)\n json_anno = json_data[\"annotations\"]\n\n fidx = 0\n for img_info in tqdm(json_anno):\n file_name = img_info['file_name']\n cigar_check = 0\n phone_check = 0\n eye_closed_check = 0\n mouth_closed_check = 0\n mouth_opened_check = 0\n\n for annotation_info in img_info['objects']:\n if annotation_info['class'] == 'cigar':\n cigar_check = 1\n elif annotation_info['class'] == 'phone':\n phone_check = 1\n elif annotation_info['class'] == 'eye_closed':\n eye_closed_check = 1\n elif annotation_info['class'] == 'mouth_closed':\n mouth_closed_check = 1\n elif annotation_info['class'] == 'mouth_opened':\n mouth_opened_check = 1\n if cigar_check or phone_check:\n json_anno_diet.append(img_info)\n elif eye_closed_check and mouth_closed_check:\n json_anno_diet.append(img_info)\n elif eye_closed_check and mouth_opened_check:\n json_anno_diet.append(img_info)\n elif mouth_opened_check:\n fidx = fidx + 1\n if fidx % 3 == 0:\n json_anno_diet.append(img_info)\n\n json_data_diet = {}\n json_data_diet['annotations'] = json_anno_diet\n\n if os.path.isfile(new_dir + '/diet_train.json'):\n os.remove(new_dir + '/diet_train.json')\n\n with open(new_dir + '/diet_train.json', 'w') as f_diet:\n json.dump(json_data_diet, f_diet)\n\n\n # generate drowsy_face_diet/train\n generate_drowsy_face_diet = True\n if generate_drowsy_face_diet == True:\n print('generate drowsy_face_diet/train')\n\n new_dir_diet = '../drowsy_face_diet'\n if os.path.exists(new_dir_diet):\n shutil.rmtree(new_dir_diet)\n os.makedirs(new_dir_diet + '/images/train')\n os.makedirs(new_dir_diet + '/labels/train')\n\n with open(new_dir + '/diet_train.json') as f:\n json_data = json.load(f)\n json_anno = json_data[\"annotations\"]\n\n for json_img in tqdm(json_anno):\n img_id = json_img['file_name']\n txt_dir = new_dir_diet + '/labels/train/' + img_id.split('.')[0] + '.txt'\n img_dir = new_dir_diet + '/images/train/' + img_id\n\n f_txt = open(txt_dir, 'w')\n img_ = Image.open(path_train_dir + '/images/' + img_id)\n img_size = img_.size\n objects_yolo = ''\n for img_obj in json_img['objects']:\n class_id = str(names.index(img_obj['class']))\n img_pos = img_obj['position']\n\n xywh = xyxy2xywh(np.array([[img_pos[0]/img_size[0], img_pos[1]/img_size[1], img_pos[2]/img_size[0], img_pos[3]/img_size[1]]]))[0]\n f_txt.write(f\"{class_id} {xywh[0]:.5f} {xywh[1]:.5f} {xywh[2]:.5f} {xywh[3]:.5f}\\n\") # write label\n\n f_txt.close()\n\n shutil.copy(path_train_dir + '/images/' + img_id, img_dir)\n\n\n # count classes\n def count_classes(annotations):\n class_dict = {\n 'eye_opened': 0,\n 'eye_closed': 0,\n 'mouth_opened': 0,\n 'mouth_closed': 0,\n 'face': 0,\n 'phone': 0,\n 'cigar': 0\n }\n for img_info in tqdm(annotations):\n for annotation_info in img_info['objects']:\n class_dict[annotation_info['class']] = class_dict[annotation_info['class']] + 1\n print(class_dict)\n\n count_jsons = True\n if count_jsons == True:\n print('count classes')\n with open(new_dir + '/diet_train.json', 'r') as annotation_file:\n annotations = json.load(annotation_file)\n annotations = annotations['annotations']\n print('diet_train.json')\n count_classes(annotations)\n with open(new_dir + '/raw_train.json', 'r') as annotation_file:\n annotations = json.load(annotation_file)\n annotations = annotations['annotations']\n print('raw_train.json')\n count_classes(annotations)\n with open(new_dir + '/raw_val.json', 'r') as annotation_file:\n annotations = json.load(annotation_file)\n annotations = annotations['annotations']\n print('raw_val.json')\n count_classes(annotations)\n\n\nif __name__ == '__main__':\n\n parser = argparse.ArgumentParser()\n parser.add_argument('--random_seed', type=int, default=0, help='')\n parser.add_argument('--weights', type=str, default='', help='initial weights path')\n parser.add_argument('--cfg', type=str, default='models/hub/yolov5l6.yaml', help='model.yaml path')\n parser.add_argument('--data', type=str, default='data/drowsy_face.yaml', help='data.yaml path')\n parser.add_argument('--hyp', type=str, default='data/hyp.scratch-p6.yaml', help='hyperparameters path')\n parser.add_argument('--batch-size', type=int, default=4, help='total batch size for all GPUs')\n parser.add_argument('--img-size', nargs='+', type=int, default=[1280, 1280], help='[train, test] image sizes')\n parser.add_argument('--rect', action='store_true', help='rectangular training')\n parser.add_argument('--resume', nargs='?', const=True, default=False, help='resume most recent training')\n parser.add_argument('--nosave', action='store_true', help='only save final checkpoint')\n parser.add_argument('--notest', action='store_true', help='only test final epoch')\n parser.add_argument('--noautoanchor', action='store_true', help='disable autoanchor check')\n parser.add_argument('--evolve', action='store_true', help='evolve hyperparameters')\n parser.add_argument('--bucket', type=str, default='', help='gsutil bucket')\n parser.add_argument('--cache-images', default='', action='store_true', help='cache images for faster training')\n parser.add_argument('--image-weights', action='store_true', help='use weighted image selection for training')\n parser.add_argument('--device', default='0', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')\n parser.add_argument('--multi-scale', action='store_true', help='vary img-size +/- 50%%')\n parser.add_argument('--single-cls', action='store_true', help='train multi-class data as single-class')\n parser.add_argument('--adam', action='store_true', help='use torch.optim.Adam() optimizer')\n parser.add_argument('--sync-bn', action='store_true', help='use SyncBatchNorm, only available in DDP mode')\n parser.add_argument('--local_rank', type=int, default=-1, help='DDP parameter, do not modify')\n parser.add_argument('--workers', type=int, default=8, help='maximum number of dataloader workers')\n parser.add_argument('--project', default='runs/train', help='save to project/name')\n parser.add_argument('--entity', default=None, help='W&B entity')\n parser.add_argument('--name', default='final', help='save to project/name')\n parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment')\n parser.add_argument('--quad', action='store_true', help='quad dataloader')\n parser.add_argument('--linear-lr', action='store_true', help='linear LR')\n parser.add_argument('--label-smoothing', type=float, default=0.0, help='Label smoothing epsilon')\n parser.add_argument('--upload_dataset', action='store_true', help='Upload dataset as W&B artifact table')\n parser.add_argument('--bbox_interval', type=int, default=300, help='Set bounding-box image logging interval for W&B')\n parser.add_argument('--artifact_alias', type=str, default=\"latest\", help='version of dataset artifact to be used')\n ## for baseline training\n parser.add_argument('--no_data_prepare', action='store_true')\n parser.add_argument('--epochs', type=int, default=300)\n parser.add_argument('--epoch_parts', type=int, default=15, help='Log model after every \"save_period\" epoch')\n parser.add_argument('--save_period', type=int, default=300, help='Log model after every \"save_period\" epoch')\n ## for fine-tuning\n parser.add_argument('--fine_tune', action='store_true', help='fine_tune')\n parser.add_argument('--epochs_tune', type=int, default=50)\n parser.add_argument('--epoch_parts_tune', type=int, default=50, help='Log model after every \"save_period\" epoch')\n parser.add_argument('--save_period_tune', type=int, default=50, help='Log model after every \"save_period\" epoch')\n\n opt = parser.parse_args()\n\n if not opt.no_data_prepare:\n data_prepare()\n\n # Reproducibility\n torch.manual_seed(opt.random_seed)\n torch.cuda.manual_seed(opt.random_seed)\n torch.cuda.manual_seed_all(opt.random_seed) # if use multi-GPU\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False\n np.random.seed(opt.random_seed)\n random.seed(opt.random_seed)\n\n # Set DDP variables\n opt.world_size = int(os.environ['WORLD_SIZE']) if 'WORLD_SIZE' in os.environ else 1\n opt.global_rank = int(os.environ['RANK']) if 'RANK' in os.environ else -1\n set_logging(opt.global_rank)\n if opt.global_rank in [-1, 0]:\n check_requirements(exclude=('pycocotools', 'thop'))\n\n # Resume\n wandb_run = check_wandb_resume(opt)\n if opt.resume and not wandb_run: # resume an interrupted run\n ckpt = opt.resume if isinstance(opt.resume, str) else get_latest_run() # specified or most recent path\n assert os.path.isfile(ckpt), 'ERROR: --resume checkpoint does not exist'\n apriori = opt.global_rank, opt.local_rank\n with open(Path(ckpt).parent.parent / 'opt.yaml') as f:\n opt = argparse.Namespace(**yaml.safe_load(f)) # replace\n opt.cfg, opt.weights, opt.resume, opt.batch_size, opt.global_rank, opt.local_rank = \\\n '', ckpt, True, opt.total_batch_size, *apriori # reinstate\n logger.info('Resuming training from %s' % ckpt)\n else:\n # opt.hyp = opt.hyp or ('hyp.finetune.yaml' if opt.weights else 'hyp.scratch.yaml')\n opt.data, opt.cfg, opt.hyp = check_file(opt.data), check_file(opt.cfg), check_file(opt.hyp) # check files\n assert len(opt.cfg) or len(opt.weights), 'either --cfg or --weights must be specified'\n opt.img_size.extend([opt.img_size[-1]] * (2 - len(opt.img_size))) # extend to 2 sizes (train, test)\n opt.name = 'evolve' if opt.evolve else opt.name\n opt.save_dir = str(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok | opt.evolve))\n\n # DDP mode\n opt.total_batch_size = opt.batch_size\n device = select_device(opt.device, batch_size=opt.batch_size)\n if opt.local_rank != -1:\n assert torch.cuda.device_count() > opt.local_rank\n torch.cuda.set_device(opt.local_rank)\n device = torch.device('cuda', opt.local_rank)\n dist.init_process_group(backend='nccl', init_method='env://') # distributed backend\n assert opt.batch_size % opt.world_size == 0, '--batch-size must be multiple of CUDA device count'\n assert not opt.image_weights, '--image-weights argument is not compatible with DDP training'\n opt.batch_size = opt.total_batch_size // opt.world_size\n\n # Hyperparameters\n with open(opt.hyp) as f:\n hyp = yaml.safe_load(f) # load hyps\n\n # Train\n logger.info(opt)\n if not opt.evolve:\n tb_writer = None # init loggers\n if opt.global_rank in [-1, 0]:\n prefix = colorstr('tensorboard: ')\n logger.info(f\"{prefix}Start with 'tensorboard --logdir {opt.project}', view at http://localhost:6006/\")\n tb_writer = SummaryWriter(opt.save_dir) # Tensorboard\n train(hyp, opt, device, tb_writer)\n\n print(\"### base train completed\")\n\n print(\"### fine-tuning start\")\n\n opt.fine_tune = True\n opt.weights = opt.save_dir + '/weights/last.pt'\n opt.data = 'data/drowsy_face_tuning.yaml'\n opt.hyp = 'data/hyp.finetune-simple.yaml'\n opt.epochs = opt.epochs_tune\n opt.epoch_parts = opt.epoch_parts_tune\n opt.save_period = opt.save_period_tune\n\n opt.data, opt.cfg, opt.hyp = check_file(opt.data), check_file(opt.cfg), check_file(opt.hyp) # check files\n assert len(opt.cfg) or len(opt.weights), 'either --cfg or --weights must be specified'\n opt.img_size.extend([opt.img_size[-1]] * (2 - len(opt.img_size))) # extend to 2 sizes (train, test)\n opt.name = 'evolve' if opt.evolve else opt.name\n opt.save_dir = str(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok | opt.evolve))\n\n # Hyperparameters\n with open(opt.hyp) as f:\n hyp = yaml.safe_load(f) # load hyps\n\n # Train\n logger.info(opt)\n if not opt.evolve:\n tb_writer = None # init loggers\n if opt.global_rank in [-1, 0]:\n prefix = colorstr('tensorboard: ')\n logger.info(f\"{prefix}Start with 'tensorboard --logdir {opt.project}', view at http://localhost:6006/\")\n tb_writer = SummaryWriter(opt.save_dir) # Tensorboard\n train(hyp, opt, device, tb_writer)\n" ]
[ [ "torch.distributed.broadcast", "torch.optim.lr_scheduler.LambdaLR", "torch.load", "torch.zeros", "torch.nn.SyncBatchNorm.convert_sync_batchnorm", "numpy.concatenate", "torch.cuda.amp.autocast", "torch.utils.tensorboard.SummaryWriter", "torch.cuda.manual_seed_all", "torch.nn.functional.interpolate", "torch.device", "torch.cuda.is_available", "torch.save", "torch.cuda.memory_reserved", "torch.distributed.init_process_group", "torch.tensor", "numpy.interp", "torch.optim.SGD", "numpy.zeros", "torch.optim.Adam", "torch.cuda.empty_cache", "torch.cuda.amp.GradScaler", "torch.distributed.destroy_process_group", "torch.cuda.device_count", "numpy.array", "torch.cuda.manual_seed", "numpy.random.seed", "torch.cuda.set_device", "torch.manual_seed", "torch.nn.DataParallel" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
dongchao0612/MyClassicNet
[ "b4363effd3cc930a0f797e49470c20124886b2ba", "b4363effd3cc930a0f797e49470c20124886b2ba" ]
[ "DenseNet/DenseNet.py", "GoogLeNet/test.py" ]
[ "import torch.nn as nn\nimport torch.nn.functional as F\nimport torch\nfrom torchsummary import summary\n\n\nclass BN_Conv2d(nn.Module):\n \"\"\"\n BN_CONV_RELU\n \"\"\"\n\n def __init__(self,\n in_channels: object,\n out_channels: object,\n kernel_size: object,\n stride: object,\n padding: object,\n dilation=1,\n groups=1,\n bias=False) -> object:\n super(BN_Conv2d, self).__init__()\n self.seq = nn.Sequential(\n nn.Conv2d(in_channels,\n out_channels,\n kernel_size=kernel_size,\n stride=stride,\n padding=padding,\n dilation=dilation,\n groups=groups,\n bias=bias), nn.BatchNorm2d(out_channels))\n\n def forward(self, x):\n return F.relu(self.seq(x))\n\n\nclass DenseBlock(nn.Module):\n def __init__(self, input_channels, num_layers, growth_rate):\n super(DenseBlock, self).__init__()\n self.num_layers = num_layers\n self.k0 = input_channels\n self.k = growth_rate\n self.layers = self.__make_layers()\n\n def __make_layers(self):\n layer_list = []\n for i in range(self.num_layers):\n layer_list.append(\n nn.Sequential(\n BN_Conv2d(self.k0 + i * self.k, 4 * self.k, 1, 1, 0),\n BN_Conv2d(4 * self.k, self.k, 3, 1, 1)))\n return layer_list\n\n def forward(self, x):\n feature = self.layers[0](x)\n out = torch.cat((x, feature), 1)\n for i in range(1, len(self.layers)):\n feature = self.layers[i](out)\n out = torch.cat((feature, out), 1)\n return out\n\n\nclass DenseNet(nn.Module):\n def __init__(self, layers: object, k, theta, num_classes) -> object:\n super(DenseNet, self).__init__()\n # params\n self.layers = layers\n self.k = k\n self.theta = theta\n # layers\n self.conv = BN_Conv2d(3, 2 * k, 7, 2, 3)\n self.blocks, patches = self.__make_blocks(2 * k)\n self.fc = nn.Linear(patches, num_classes)\n\n def __make_transition(self, in_chls):\n out_chls = int(self.theta * in_chls)\n return nn.Sequential(BN_Conv2d(in_chls, out_chls, 1, 1, 0),\n nn.AvgPool2d(2)), out_chls\n\n def __make_blocks(self, k0):\n \"\"\"\n make block-transition structures\n :param k0:\n :return:\n \"\"\"\n layers_list = []\n patches = 0\n for i in range(len(self.layers)):\n layers_list.append(DenseBlock(k0, self.layers[i], self.k))\n patches = k0 + self.layers[\n i] * self.k # output feature patches from Dense Block\n if i != len(self.layers) - 1:\n transition, k0 = self.__make_transition(patches)\n layers_list.append(transition)\n return nn.Sequential(*layers_list), patches\n\n def forward(self, x):\n out = self.conv(x)\n out = F.max_pool2d(out, 3, 2, 1)\n # print(out.shape)\n out = self.blocks(out)\n # print(out.shape)\n out = F.avg_pool2d(out, 7)\n # print(out.shape)\n out = out.view(out.size(0), -1)\n out = F.softmax(self.fc(out))\n return out\n\n\nif __name__ == '__main__':\n pass\n", "import numpy as np\nimport torch\nimport torchvision.datasets as datasets\nimport torchvision.transforms as transforms\nfrom torch.utils.data import DataLoader\n\nfrom GoogLeNet import GoogLeNet\nfrom LeNet import LeNet\n\nif __name__ == '__main__':\n\n test_dataset = datasets.MNIST(root=\"../Datasets/MNIST/test\", train=False, transform=transforms.ToTensor(),download=True)\n\n test_dataloader = DataLoader(dataset=test_dataset, batch_size=100, shuffle=True, num_workers=0)\n\n network = GoogLeNet()\n param = torch.load(\"GoogLeNet_param_best.pkl\") # 加载模型\n network.load_state_dict(param) # 将参数放入模型当中\n\n acc = []\n for data in test_dataloader:\n imgs, targets = data\n output = network(imgs) # 输出预测\n _, pre_lab = torch.max(output, 1) # 提取预测序列\n batch_acc = np.array(pre_lab == targets).sum() / test_dataloader.__len__()\n acc.append(batch_acc)\n\n acc = sum(acc) / len(acc)\n print(\"accuracy: \", acc) # 输出正确率 0.9844999999999992\n" ]
[ [ "torch.nn.Sequential", "torch.cat", "torch.nn.functional.avg_pool2d", "torch.nn.Conv2d", "torch.nn.Linear", "torch.nn.AvgPool2d", "torch.nn.BatchNorm2d", "torch.nn.functional.max_pool2d" ], [ "numpy.array", "torch.utils.data.DataLoader", "torch.max", "torch.load" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Voyz/transformers-interpret
[ "8884e0a969be33d2664fb413dd9430d64ecd5ceb" ]
[ "transformers_interpret/attributions.py" ]
[ "from typing import Callable, Dict, List, Tuple\n\nimport torch\nimport torch.nn as nn\nfrom captum.attr import (\n IntegratedGradients,\n LayerConductance,\n LayerIntegratedGradients,\n configure_interpretable_embedding_layer,\n remove_interpretable_embedding_layer,\n)\nfrom captum.attr import visualization as viz\n\nfrom transformers_interpret.errors import AttributionsNotCalculatedError\n\n\nclass Attributions:\n def __init__(self, custom_forward: Callable, embeddings: nn.Module, text: str):\n self.custom_forward = custom_forward\n self.embeddings = embeddings\n self.text = text\n\n\nclass LIGAttributions(Attributions):\n def __init__(\n self,\n custom_forward: Callable,\n embeddings: nn.Module,\n text: str,\n input_ids: torch.Tensor,\n ref_input_ids: torch.Tensor,\n sep_id: int,\n ):\n super().__init__(custom_forward, embeddings, text)\n self.input_ids = input_ids\n self.ref_input_ids = ref_input_ids\n self.lig = LayerIntegratedGradients(self.custom_forward, self.embeddings)\n self._attributions, self.delta = self.lig.attribute(\n inputs=self.input_ids,\n baselines=self.ref_input_ids,\n return_convergence_delta=True,\n )\n\n @property\n def word_attributions(self):\n wa = []\n if len(self.attributions_sum) >= 1:\n for i, (word, attribution) in enumerate(\n zip(self.text.split(), self.attributions_sum)\n ):\n wa.append((word, float(attribution.data.numpy())))\n return wa\n\n else:\n raise AttributionsNotCalculatedError(\"Attributions are not yet calculated\")\n\n def summarize(self):\n self.attributions_sum = self._attributions.sum(dim=-1).squeeze(0)\n self.attributions_sum = self.attributions_sum / torch.norm(\n self.attributions_sum\n )\n\n def visualize_attributions(\n self, pred_prob, pred_class, true_class, attr_class, text, all_tokens\n ):\n\n return viz.VisualizationDataRecord(\n self.attributions_sum,\n pred_prob,\n pred_class,\n true_class,\n attr_class,\n self.attributions_sum.sum(),\n all_tokens,\n self.delta,\n )\n" ]
[ [ "torch.norm" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
georgetown-cset/ai-definitions-for-policymaking
[ "667e928c8bb30f6e02696ac71081c6bae4096f50" ]
[ "analysis/prediction_counts.py" ]
[ "import pandas as pd\nfrom settings import PROJECT_ID\n\ncounts = pd.read_gbq(\"\"\"\\\n SELECT \n countif(arxiv_scibert_hit is true) arxiv_scibert,\n countif(arxiv_scibert_cl_hit is true) arxiv_scibert_cl,\n countif(arxiv_scibert_cv_hit is true) arxiv_scibert_cv,\n countif(arxiv_scibert_ro_hit is true) arxiv_scibert_ro,\n FROM ai_relevant_papers.definitions_brief_latest\n \"\"\", project_id=PROJECT_ID)\n\ncounts.to_csv('analysis/prediction_counts.csv', index=False)\n" ]
[ [ "pandas.read_gbq" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
lilisako/pandas
[ "0eceea496746769e4781f081b8c7159b1ce9f8f0", "0eceea496746769e4781f081b8c7159b1ce9f8f0" ]
[ "pandas/tests/indexes/test_any_index.py", "pandas/tests/indexes/categorical/test_reindex.py" ]
[ "\"\"\"\nTests that can be parametrized over _any_ Index object.\n\nTODO: consider using hypothesis for these.\n\"\"\"\nimport re\n\nimport pytest\n\nimport pandas._testing as tm\n\n\ndef test_boolean_context_compat(index):\n # GH#7897\n with pytest.raises(ValueError, match=\"The truth value of a\"):\n if index:\n pass\n\n with pytest.raises(ValueError, match=\"The truth value of a\"):\n bool(index)\n\n\ndef test_sort(index):\n msg = \"cannot sort an Index object in-place, use sort_values instead\"\n with pytest.raises(TypeError, match=msg):\n index.sort()\n\n\ndef test_hash_error(index):\n with pytest.raises(TypeError, match=f\"unhashable type: '{type(index).__name__}'\"):\n hash(index)\n\n\ndef test_copy_dtype_deprecated(index):\n # GH#35853\n with tm.assert_produces_warning(FutureWarning):\n index.copy(dtype=object)\n\n\ndef test_mutability(index):\n if not len(index):\n return\n msg = \"Index does not support mutable operations\"\n with pytest.raises(TypeError, match=msg):\n index[0] = index[0]\n\n\ndef test_map_identity_mapping(index):\n # GH#12766\n tm.assert_index_equal(index, index.map(lambda x: x))\n\n\ndef test_wrong_number_names(index):\n names = index.nlevels * [\"apple\", \"banana\", \"carrot\"]\n with pytest.raises(ValueError, match=\"^Length\"):\n index.names = names\n\n\ndef test_view_preserves_name(index):\n assert index.view().name == index.name\n\n\ndef test_ravel_deprecation(index):\n # GH#19956 ravel returning ndarray is deprecated\n with tm.assert_produces_warning(FutureWarning):\n index.ravel()\n\n\ndef test_is_type_compatible_deprecation(index):\n # GH#42113\n msg = \"is_type_compatible is deprecated\"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n index.is_type_compatible(index.inferred_type)\n\n\nclass TestConversion:\n def test_to_series(self, index):\n # assert that we are creating a copy of the index\n\n ser = index.to_series()\n assert ser.values is not index.values\n assert ser.index is not index\n assert ser.name == index.name\n\n def test_to_series_with_arguments(self, index):\n # GH#18699\n\n # index kwarg\n ser = index.to_series(index=index)\n\n assert ser.values is not index.values\n assert ser.index is index\n assert ser.name == index.name\n\n # name kwarg\n ser = index.to_series(name=\"__test\")\n\n assert ser.values is not index.values\n assert ser.index is not index\n assert ser.name != index.name\n\n def test_tolist_matches_list(self, index):\n assert index.tolist() == list(index)\n\n\nclass TestRoundTrips:\n def test_pickle_roundtrip(self, index):\n result = tm.round_trip_pickle(index)\n tm.assert_index_equal(result, index)\n if result.nlevels > 1:\n # GH#8367 round-trip with timezone\n assert index.equal_levels(result)\n\n def test_pickle_preserves_name(self, index):\n original_name, index.name = index.name, \"foo\"\n unpickled = tm.round_trip_pickle(index)\n assert index.equals(unpickled)\n index.name = original_name\n\n\nclass TestIndexing:\n def test_slice_keeps_name(self, index):\n assert index.name == index[1:].name\n\n @pytest.mark.parametrize(\"item\", [101, \"no_int\"])\n # FutureWarning from non-tuple sequence of nd indexing\n @pytest.mark.filterwarnings(\"ignore::FutureWarning\")\n def test_getitem_error(self, index, item):\n msg = r\"index 101 is out of bounds for axis 0 with size [\\d]+|\" + re.escape(\n \"only integers, slices (`:`), ellipsis (`...`), numpy.newaxis (`None`) \"\n \"and integer or boolean arrays are valid indices\"\n )\n with pytest.raises(IndexError, match=msg):\n index[item]\n\n\nclass TestRendering:\n def test_str(self, index):\n # test the string repr\n index.name = \"foo\"\n assert \"'foo'\" in str(index)\n assert type(index).__name__ in str(index)\n\n\nclass TestReductions:\n def test_argmax_axis_invalid(self, index):\n # GH#23081\n msg = r\"`axis` must be fewer than the number of dimensions \\(1\\)\"\n with pytest.raises(ValueError, match=msg):\n index.argmax(axis=1)\n with pytest.raises(ValueError, match=msg):\n index.argmin(axis=2)\n with pytest.raises(ValueError, match=msg):\n index.min(axis=-2)\n with pytest.raises(ValueError, match=msg):\n index.max(axis=-3)\n", "import numpy as np\nimport pytest\n\nfrom pandas import (\n Categorical,\n CategoricalIndex,\n DataFrame,\n Index,\n Series,\n)\nimport pandas._testing as tm\n\n\nclass TestReindex:\n def test_reindex_dtype(self):\n c = CategoricalIndex([\"a\", \"b\", \"c\", \"a\"])\n res, indexer = c.reindex([\"a\", \"c\"])\n tm.assert_index_equal(res, Index([\"a\", \"a\", \"c\"]), exact=True)\n tm.assert_numpy_array_equal(indexer, np.array([0, 3, 2], dtype=np.intp))\n\n c = CategoricalIndex([\"a\", \"b\", \"c\", \"a\"])\n res, indexer = c.reindex(Categorical([\"a\", \"c\"]))\n\n exp = CategoricalIndex([\"a\", \"a\", \"c\"], categories=[\"a\", \"c\"])\n tm.assert_index_equal(res, exp, exact=True)\n tm.assert_numpy_array_equal(indexer, np.array([0, 3, 2], dtype=np.intp))\n\n c = CategoricalIndex([\"a\", \"b\", \"c\", \"a\"], categories=[\"a\", \"b\", \"c\", \"d\"])\n res, indexer = c.reindex([\"a\", \"c\"])\n exp = Index([\"a\", \"a\", \"c\"], dtype=\"object\")\n tm.assert_index_equal(res, exp, exact=True)\n tm.assert_numpy_array_equal(indexer, np.array([0, 3, 2], dtype=np.intp))\n\n c = CategoricalIndex([\"a\", \"b\", \"c\", \"a\"], categories=[\"a\", \"b\", \"c\", \"d\"])\n res, indexer = c.reindex(Categorical([\"a\", \"c\"]))\n exp = CategoricalIndex([\"a\", \"a\", \"c\"], categories=[\"a\", \"c\"])\n tm.assert_index_equal(res, exp, exact=True)\n tm.assert_numpy_array_equal(indexer, np.array([0, 3, 2], dtype=np.intp))\n\n def test_reindex_duplicate_target(self):\n # See GH25459\n cat = CategoricalIndex([\"a\", \"b\", \"c\"], categories=[\"a\", \"b\", \"c\", \"d\"])\n res, indexer = cat.reindex([\"a\", \"c\", \"c\"])\n exp = Index([\"a\", \"c\", \"c\"], dtype=\"object\")\n tm.assert_index_equal(res, exp, exact=True)\n tm.assert_numpy_array_equal(indexer, np.array([0, 2, 2], dtype=np.intp))\n\n res, indexer = cat.reindex(\n CategoricalIndex([\"a\", \"c\", \"c\"], categories=[\"a\", \"b\", \"c\", \"d\"])\n )\n exp = CategoricalIndex([\"a\", \"c\", \"c\"], categories=[\"a\", \"b\", \"c\", \"d\"])\n tm.assert_index_equal(res, exp, exact=True)\n tm.assert_numpy_array_equal(indexer, np.array([0, 2, 2], dtype=np.intp))\n\n def test_reindex_empty_index(self):\n # See GH16770\n c = CategoricalIndex([])\n res, indexer = c.reindex([\"a\", \"b\"])\n tm.assert_index_equal(res, Index([\"a\", \"b\"]), exact=True)\n tm.assert_numpy_array_equal(indexer, np.array([-1, -1], dtype=np.intp))\n\n def test_reindex_missing_category(self):\n # GH: 18185\n ser = Series([1, 2, 3, 1], dtype=\"category\")\n msg = r\"Cannot setitem on a Categorical with a new category \\(-1\\)\"\n with pytest.raises(TypeError, match=msg):\n ser.reindex([1, 2, 3, 4, 5], fill_value=-1)\n\n @pytest.mark.parametrize(\n \"index_df,index_res,index_exp\",\n [\n (\n CategoricalIndex([], categories=[\"A\"]),\n Index([\"A\"]),\n Index([\"A\"]),\n ),\n (\n CategoricalIndex([], categories=[\"A\"]),\n Index([\"B\"]),\n Index([\"B\"]),\n ),\n (\n CategoricalIndex([], categories=[\"A\"]),\n CategoricalIndex([\"A\"]),\n CategoricalIndex([\"A\"]),\n ),\n (\n CategoricalIndex([], categories=[\"A\"]),\n CategoricalIndex([\"B\"]),\n CategoricalIndex([\"B\"]),\n ),\n ],\n )\n def test_reindex_not_category(self, index_df, index_res, index_exp):\n # GH: 28690\n df = DataFrame(index=index_df)\n result = df.reindex(index=index_res)\n expected = DataFrame(index=index_exp)\n tm.assert_frame_equal(result, expected)\n" ]
[ [ "pandas._testing.assert_produces_warning", "pandas._testing.round_trip_pickle", "pandas._testing.assert_index_equal" ], [ "pandas.CategoricalIndex", "pandas.Series", "pandas.Categorical", "pandas.Index", "pandas.DataFrame", "numpy.array", "pandas._testing.assert_frame_equal", "pandas._testing.assert_index_equal" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Hadrien-Montanelli/chebpy
[ "c22f1f13b42b3c80f2e34be6e7136ef2d0277971" ]
[ "chebpy/nla/sphankel.py" ]
[ "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Dec 10 14:49:44 2020\n\nCopyright 2020 by Hadrien Montanelli.\n\"\"\"\n# Standard library imports:\nimport numpy as np\nfrom scipy.sparse import triu\n\n# Chebpy imports:\nfrom .sptoeplitz import sptoeplitz\n\ndef sphankel(col):\n \"\"\"Return a sparse Hankel matrix.\"\"\"\n col = np.flipud(col)\n H = triu(sptoeplitz(col, col), format='csr')\n H = np.flip(H, axis=1)\n return H" ]
[ [ "numpy.flipud", "numpy.flip" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Shujun-He/Nucleic-Transformer
[ "c6527132cd4c04489b28617beb0694605f320ed9" ]
[ "src/Eukaryotic_Promoters_Classification/mouse_tata_deepromoter/Metrics.py" ]
[ "import numpy as np\n\n\ndef accuracy(predictions,ground_truths):\n return np.sum(predictions==ground_truths)/len(ground_truths)\n \n \ndef sensitivity(predictions,ground_truths):\n '''\n Here it is assumed:\n 0=negative\n 1=positive\n '''\n return 1-len(predictions[(predictions==0)*(ground_truths==1)])/len(ground_truths[ground_truths==1])\n\n\n\ndef specificity(predictions,ground_truths):\n '''\n Here it is assumed:\n 0=negative\n 1=positive\n '''\n return 1-len(predictions[(predictions==1)*(ground_truths==0)])/len(ground_truths[ground_truths==0])\n \ndef MCC(predictions,ground_truths):\n '''\n Here it is assumed:\n 0=negative\n 1=positive\n '''\n N1=len(predictions[(predictions==0)&(ground_truths==1)])\n N2=len(predictions[(predictions==1)&(ground_truths==0)])\n N3=len(ground_truths[ground_truths==1])\n N4=len(ground_truths[ground_truths==0])\n sens=1-N1/N3\n spec=1-N2/N4\n denom=np.sqrt((1+(N2-N1)/N3)*(1+(N1-N2)/N4))\n return (1-sens-spec)/denom\n \n \n " ]
[ [ "numpy.sum", "numpy.sqrt" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
stefaniespeichert/l5kit
[ "e7ef272b80d71c5080891b27f478c6d3e001774e" ]
[ "l5kit/l5kit/tests/geometry/image_test.py" ]
[ "import unittest\n\nimport numpy as np\n\nfrom l5kit.geometry import crop_rectangle_from_image\n\n\nclass TestImageGeometry(unittest.TestCase):\n def test_crop_rectangle_from_image(self) -> None:\n # . X X X X 0 X X X\n # . X X X X X X X X\n # . X X X X X X X X\n # . 3 . . . . . . 1\n # . . . . . . . . .\n # . . . . 2 . . . .\n # 0,1,2,3 are the corners, X are 1-value pixels\n # Actual image is 10x larger in both dimensions\n im = np.zeros((60, 90), dtype=np.uint8)\n im[:30, 10:] = 1\n\n corners = np.array([[0, 60], [30, 80], [50, 40], [30, 10]])\n crop_image = crop_rectangle_from_image(im, corners)\n\n # Only one corner is in the \"1\" area\n corner_sum = crop_image[0, 0] + crop_image[0, -1] + crop_image[-1, 0] + crop_image[-1, -1]\n self.assertEqual(corner_sum, 1)\n\n self.assertTrue(0.5 > crop_image.mean() > 0.4)\n" ]
[ [ "numpy.array", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
jwcalder/peikonal
[ "3091eb547b1a7458a054be2383cd38602a00f31b" ]
[ "depth_images.py" ]
[ "import numpy as np\nimport graphlearning as gl\nimport matplotlib.pyplot as plt\nimport sklearn.datasets as datasets\nfrom utils import peikonal_depth\nimport sys\n\nk = 20\nfrac=0.05\nalpha=2\n\n#Plotting\nnumw = 16\nnumh = 10\n\n\nfor dataset in ['mnist','fashionmnist']:\n\n f_bdy, axarr_bdy = plt.subplots(numh,numw,gridspec_kw={'wspace':0.1,'hspace':0.1})\n f_peikonal_median, axarr_peikonal = plt.subplots(numh,numw,gridspec_kw={'wspace':0.1,'hspace':0.1})\n f_bdy.suptitle('Boundary images')\n f_peikonal_median.suptitle('peikonal Median images')\n\n\n X, labels = gl.datasets.load(dataset)\n pathID = np.zeros((10,200))\n\n for label in range(10):\n print(\"Digit %d...\"%label)\n\n #Subset labels\n X_sub = X[labels==label,:]\n num = X_sub.shape[0]\n\n #KNN search\n knn_ind, knn_dist = gl.weightmatrix.knnsearch(X_sub,30)\n W = gl.weightmatrix.knn(X_sub,k,knn_data=(knn_ind,knn_dist))\n G = gl.graph(W)\n if not G.isconnected():\n sys.exit('Graph is not connected')\n d = np.max(knn_dist,axis=1)\n kde = (d/d.max())**(-1)\n \n median, depth = peikonal_depth(G, kde, frac, alpha)\n\n depth = depth/np.max(depth)\n depth = 1-depth\n \n ind_boundary = np.argsort(+depth)\n ind_peikonal = np.argsort(-depth)\n \n b_indx = ind_boundary[0]\n m_indx = ind_peikonal[0] \n \n \n W = W.tocsr()\n neigh_num = 20\n b_indx_up = b_indx\n pathID[label,0] = b_indx\n maxItt = 1e2\n dp = 0\n cnt = 0\n while (dp < 1) and (cnt < maxItt):\n cnt += 1\n #xnId = knn_ind[b_indx_up,1:neigh_num]\n xnId = W[b_indx_up,:].nonzero()[1]\n wnId = depth[xnId]\n wnMx = np.argmax(wnId)\n b_indx_up = xnId[wnMx]\n pathID[label,cnt] = b_indx_up\n dp = depth[b_indx_up]\n\n print(dp)\n #Visualization\n for j in range(numw):\n img = X_sub[ind_boundary[j],:]\n m = int(np.sqrt(img.shape[0]))\n img = np.reshape(img,(m,m))\n if dataset.lower() == 'mnist':\n img = np.transpose(img)\n axarr_bdy[label,j].imshow(img,cmap='gray')\n axarr_bdy[label,j].axis('off')\n axarr_bdy[label,j].set_aspect('equal')\n\n img = X_sub[ind_peikonal[j],:]\n m = int(np.sqrt(img.shape[0]))\n img = np.reshape(img,(m,m))\n if dataset.lower() == 'mnist':\n img = np.transpose(img)\n axarr_peikonal[label,j].imshow(img,cmap='gray')\n axarr_peikonal[label,j].axis('off')\n axarr_peikonal[label,j].set_aspect('equal')\n\n\n\n f_bdy.savefig('figures/'+dataset+'_boundary.png')\n f_peikonal_median.savefig('figures/'+dataset+'_peikonal_median.png')\n\n\n # path from boundary to median plots\n columns = 1\n for i in range(10):\n x = pathID[i,:]\n indx = np.nonzero(x)\n digitIndx = indx[0]\n lp = len(digitIndx)\n if (lp > columns):\n columns = lp\n \n #Plotting\n numw = columns\n numh = 10\n\n f_peikonal_path, axarr_peikonal_path = plt.subplots(numh,numw,gridspec_kw={'wspace':0.1,'hspace':0.1})\n f_peikonal_path.suptitle('peikonal boundary to median images')\n\n\n img = X_sub[0,:]\n lm = img.shape[0]\n for label in range(10):\n x = pathID[label,:]\n indx = np.nonzero(x)\n digitIndx = indx[0]\n lp = len(digitIndx)\n path = pathID[label,digitIndx]\n \n X_sub = X[labels==label,:]\n\n #Visualization\n for j in range(numw):\n if (j < lp):\n i = int(path[j])\n img = X_sub[i,:]\n m = int(np.sqrt(img.shape[0]))\n img = np.reshape(img,(m,m))\n if dataset.lower() == 'mnist':\n img = np.transpose(img)\n axarr_peikonal_path[label,j].imshow(img,cmap='gray')\n else:\n img = np.ones(lm)\n m = int(np.sqrt(img.shape[0]))\n img = np.reshape(img,(m,m))\n axarr_peikonal_path[label,j].imshow(img,cmap='binary')\n axarr_peikonal_path[label,j].axis('off')\n axarr_peikonal_path[label,j].set_aspect('equal')\n \n f_peikonal_path.savefig('figures/'+dataset+'_peikonal_path.png')\n\nplt.show()\n\n\n" ]
[ [ "numpy.sqrt", "numpy.nonzero", "numpy.reshape", "matplotlib.pyplot.subplots", "numpy.ones", "numpy.max", "numpy.argmax", "numpy.transpose", "numpy.argsort", "matplotlib.pyplot.show", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
albert-yue/objectnav
[ "95ce9bc2c1d953887275e8d9809a506aeb5682fb" ]
[ "habitat_baselines/rl/ppo/encoder_dict.py" ]
[ "#!/usr/bin/env python3\n\n# Copyright (c) Facebook, Inc. and its affiliates.\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\nimport abc\nimport math\nfrom collections import defaultdict\nfrom typing import Dict, Tuple, List, Optional\nimport numpy as np\nimport torch\nimport torch.nn as nn\n\nfrom habitat_baselines.common.utils import Flatten\nimport habitat_baselines.rl.models.resnet as resnet\nfrom habitat_baselines.rl.ppo.policy import Policy, Net, GOAL_EMBEDDING_SIZE, ObservationSequential\n\n\"\"\"\nThis module was experimental and used to support multiple visual streams\n(e.g. separate resnets processing RGBD and Semantics).\nWe found little difference when splitting, but kept the module.\n\"\"\"\n\n_key_to_sensor = {\n \"rgbd\": [\"rgb\", \"depth\"],\n \"rgbdsem\": [\"rgb\", \"depth\", \"semantic\"],\n \"none\": []\n}\ndef key_to_sensor(k):\n if k in _key_to_sensor:\n return _key_to_sensor[k]\n return [k]\n\ndef get_vision_encoder_inputs(ppo_cfg):\n r\"\"\"\n Different downstream modules will query for a certain input modality.\n Here, we map requested modalities to the inputs of not yet instantiated CNN.\n \"\"\"\n policy_encoders = {}\n ENCODERS = ppo_cfg.POLICY.BELIEFS.ENCODERS\n assert len(ENCODERS) == 1 or (len(ENCODERS) == ppo_cfg.POLICY.BELIEFS.NUM_BELIEFS and \"all\" not in ENCODERS)\n default_sensors = key_to_sensor(ENCODERS[0])\n\n policy_encoders[\"all\"] = default_sensors\n\n # For each visual encoder (keyed by modality) specify the sensors used\n # If a module requestss a subset of modalities (e.g. only rgb), we will give them the superset (e.g. rgbd) that is used\n\n if \"rgb\" in default_sensors:\n policy_encoders[\"rgb\"] = default_sensors # superset\n if \"depth\" in default_sensors:\n policy_encoders[\"depth\"] = default_sensors # superset\n # semantics, edge cases, aren't really thorough.\n\n if len(ENCODERS) == 1 and ppo_cfg.POLICY.USE_SEMANTICS:\n default_sensors.append(\"semantic\")\n policy_encoders[\"semantic\"] = default_sensors\n for encoder in ENCODERS:\n if encoder not in policy_encoders:\n policy_encoders[encoder] = key_to_sensor(encoder)\n return policy_encoders\n\nclass BlindDummyResnet(nn.Module):\n r\"\"\"\n Rather than have irregular visions we can't stack, just feed zero as the blind vision embedding\n \"\"\"\n def __init__(self, hidden_size):\n super().__init__()\n self.hidden_size = hidden_size\n def forward(self, observations, **kwargs):\n return torch.zeros(observations[\"depth\"].size(0), self.hidden_size, device=observations[\"depth\"].device) # Assuming depth\n\nclass ResnetVisionEncoderSet(nn.Module):\n r\"\"\"\n Holds onto a number of encoders, each of which can be associated with more than one label.\n Used to make sure everyone gets the right sensory information without dup-ing forward passes.\n JIT-able\n \"\"\"\n def __init__(self,\n encoder_labels,\n observation_space,\n resnet_baseplanes = 32,\n backbone = \"resnet18\",\n hidden_size = 512,\n mock_semantics: bool = False,\n **kwargs,\n ):\n r\"\"\"\n encoder_labels: requirements dict.\n key: sensor requirement\n value: inputs to corresponding encoder (a hash for the encoder)\n **kwargs forward to resnet construction\n \"\"\"\n super().__init__()\n sensor_to_encoder = {k: sorted(v) for k, v in encoder_labels.items()}\n self.encoder_labels = {k: str(v) for k, v in sensor_to_encoder.items()}\n encoders = {}\n for modalities in sensor_to_encoder.values():\n if str(modalities) in encoders:\n continue\n if len(modalities) == 0:\n encoders[str(modalities)] = BlindDummyResnet(hidden_size)\n continue\n\n # re: mock objectnav: Semantic space is not read so we don't have to modify space\n visual_resnet = resnet.ResNetEncoder(\n observation_space,\n baseplanes=resnet_baseplanes,\n ngroups=resnet_baseplanes // 2,\n make_backbone=getattr(resnet, backbone),\n use_if_available=modalities,\n mock_semantics=mock_semantics,\n **kwargs\n # While we ideally wouldn't record this on resnet, I don't think there's harm\n # And it's more convenient than passing arg through nn.Sequential (which is the top-level module we're using)\n )\n\n visual_encoder = ObservationSequential(\n visual_resnet,\n Flatten(),\n nn.Linear(\n int(np.prod(visual_resnet.output_shape)), hidden_size # int cast for jit\n ),\n nn.ReLU(True),\n )\n\n encoders[str(modalities)] = visual_encoder\n self.encoders = nn.ModuleDict(encoders)\n\n def has_modality(self, modality):\n return modality in self.encoder_labels\n\n def forward(self,\n observations: Dict[str, torch.Tensor],\n # other_embeddings: Optional[List[torch.Tensor]] = None\n ):\n r\"\"\"\n Forward each encoder and assign encoder per sensor requirements.\n observations: dictionary of raw sensor inputs\n # other_embeddings: list of other embeddings to cat onto the vision embedding\n \"\"\"\n embeddings = {}\n for label, encoder in self.encoders.items():\n embeddings[label] = encoder(observations) # b x h\n # if other_embeddings is not None:\n # # Written out for JIT\n # all_embeddings = [embeddings[label]]\n # for other in other_embeddings:\n # all_embeddings.append(other)\n # embeddings[label] = torch.cat(all_embeddings, dim=-1)\n # del all_embeddings\n # embeddings[label] = torch.cat([embeddings[label], *other_embeddings], dim=-1)\n sensor_to_encoder = {}\n for k, v in self.encoder_labels.items():\n sensor_to_encoder[k] = embeddings[v]\n return sensor_to_encoder\n" ]
[ [ "numpy.prod", "torch.nn.ModuleDict", "torch.nn.ReLU" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
zlichen/GoMoKu-Game-MCTS
[ "2a559d19ad7712e72c46108d622c8e15074b413c" ]
[ "main_GUI.py" ]
[ "import numpy as np\nfrom MCTSearch import *\nfrom nodes import *\nimport random\nimport pygame\nfrom GUI import *\n\"\"\"\ndef printboard(mat):\n m,n=mat.shape\n print(\" \",end='')\n for i in range(1,n+1):\n print(\"{:4d}\".format(i),end='')\n print()\n print(' '*4+'-'*22)\n for i in range(m):\n print(\"{:3d}|\".format(i+1),end='')\n for j in range(n):\n char='-'\n if mat[i,j]==1:\n char='*'\n elif mat[i,j]==-1:\n char='o'\n print(' {:3s}'.format(char),end='')\n print()\n\"\"\"\nif __name__==\"__main__\":\n \"\"\"\n If You Get The Plus then Initialize the node with State of Plus\n \"\"\"\n m,n=(8,8)\n board=np.zeros((m,n),dtype=np.int8)\n\n pygame.init()\n screen=pygame.display.set_mode((360,360))\n pygame.display.set_caption('Five-in-a-Row')\n done=False\n over=False\n\n #printboard(board)\n \"\"\"\n \"\"\"\n mst=None\n while not done:\n for event in pygame.event.get():\n \"\"\"\n test = -2\n while test != 0:\n x = int(input(\"Enter the x coordinate of your chosen site (1-%d):\"%(n)))\n y = int(input(\"Enter the y coordinate of your chosen site (1-%d):\"%(n)))\n while (x not in range(1, m+1)) or (y not in range(1, n+1)):\n x = int(input(\"Enter the x coordinate of your chosen site (1-%d):\"%(n)))\n y = int(input(\"Enter the y coordinate of your chosen site (1-%d):\"%(n)))\n test = board[x - 1][y - 1]\n if test == 0:\n board[x - 1][y - 1] = 1\n print(\"Adding %d to (%d,%d) successfully\" % (1, x, y))\n printboard(board)\n break\n print(\"Site is occupied, choose a different location\")\n \"\"\"\n render(screen, board)\n board, done, ocp, x, y=update_by_man(event, board)\n if ocp==0:\n continue\n render(screen, board)\n result=check02(board,1)\n if result==1:\n print(\"You win!\")\n done=True\n over=True\n break\n elif result==0:\n print(\"Draw\")\n done=True\n over=True\n break\n if event.type == pygame.MOUSEBUTTONDOWN:\n print(\"Computer round...\")\n root=None\n if isinstance(mst,MCTSNode) and len(mst.action)==0:\n print(\"Computer take it serious!!\")\n root=find_MCTS_trees(mst,(x,y))\n # root.parent=None\n # print(root.action)\n if root is None:\n root=MCTSNode(1,array=board)\n # mst=MCTSearch(root,board,strategy=True).monte_carlo_tree_search()\n mst = multiThreadingMCTS(root,1,board,strategy=True)\n if isinstance(mst,tuple):\n u,v=mst\n else:\n u,v=mst.move\n board[u][v] = -1\n print(\"Computer move to (%d,%d)\"%(u+1,v+1))\n render(screen, board)\n #printboard(board)\n result = check02(board, -1)\n if result==-1:\n print(\"Computer win!\")\n done=True\n over=True\n break\n elif result==0:\n print(\"Draw\")\n done=True\n over=True\n break\n if over == False:\n pygame.quit()\n else:\n for i in range(5,0,-1):\n print(i)\n time.sleep(1)\n print(\"Quit!\")\n pygame.quit()\n # board[4,3]=1\n # node=MCTSNode(1,array=board)\n # mst=MCTSearch(node,board).monte_carlo_tree_search()\n # print(len(mst.action))" ]
[ [ "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
seignovert/pyvims
[ "a70b5b9b8bc5c37fa43b7db4d15407f312a31849", "a70b5b9b8bc5c37fa43b7db4d15407f312a31849", "a70b5b9b8bc5c37fa43b7db4d15407f312a31849" ]
[ "pyvims/projections/path3d.py", "tests/test_vectors.py", "pyvims/isis/isis.py" ]
[ "\"\"\"Path 3D module.\"\"\"\n\nimport numpy as np\n\nfrom matplotlib.path import Path\n\n\nclass Path3D(Path):\n \"\"\"Extend matplotlib 2D Path object with altitude attribute.\n\n Parameters\n ----------\n vertices: Nx2 array\n Path vertices.\n codes: N array\n Path codes.\n alt: N array\n Point altitude [km].\n\n Raises\n ------\n ValueError\n If the altitude array does not have the same length\n as the vertices array.\n\n \"\"\"\n\n def __init__(self, *args, alt=None, **kwargs):\n super().__init__(*args, **kwargs)\n\n if alt is not None and \\\n (np.ndim(alt) == 0 or len(self.vertices) != np.shape(alt)[0]):\n raise ValueError('Altitude array must have the same length as the vertices.')\n\n self.alt = alt\n", "\"\"\"Test vector module.\"\"\"\n\nimport numpy as np\nfrom numpy.testing import assert_array_almost_equal as assert_array\n\nfrom pyvims.vectors import angle, azimuth, areaquad, vdot\n\nfrom pytest import approx, raises\n\n\ndef test_areaquad():\n \"\"\"Test spherical quadrangle area.\"\"\"\n assert areaquad(0, -90, 360, 90) == approx(4 * np.pi, abs=1e-6)\n assert areaquad(0, 15, 30, 45) == approx(4 * np.pi * .0187, abs=1e-3)\n\n assert areaquad(0, 15, 0, 45) == 0\n assert areaquad(0, 15, 30, 15) == 0\n\n\ndef test_vdot():\n \"\"\"Test dot product between two vectors.\"\"\"\n assert vdot([1, 0, 0], [1, 0, 0]) == 1\n assert vdot([1, 0, 0], [0, 1, 0]) == 0\n assert vdot([1, 0, 0], [0, 0, 1]) == 0\n assert vdot([1, 0, 0], [-1, 0, 0]) == -1\n\n v1 = np.transpose([[1, 0, 0], [1, 0, 0], [1, 0, 0], [1, 0, 0]])\n v2 = np.transpose([[1, 0, 0], [0, 1, 0], [0, 0, 1], [-1, 0, 0]])\n\n assert v1.shape == (3, 4)\n assert v2.shape == (3, 4)\n\n assert_array(vdot([1, 0, 0], v2), [1, 0, 0, -1])\n assert_array(vdot(v2, [1, 0, 0]), [1, 0, 0, -1])\n\n assert_array(vdot(v1, v2), [1, 0, 0, -1])\n\n v1 = np.transpose([[[1, 0, 0], [1, 0, 0]], [[1, 0, 0], [1, 0, 0]]])\n v2 = np.transpose([[[1, 0, 0], [0, 1, 0]], [[-1, 0, 0], [0, 0, 1]]])\n\n assert v1.shape == (3, 2, 2)\n assert v2.shape == (3, 2, 2)\n\n assert_array(vdot(v1, v2), [[1, 0], [-1, 0]])\n\n with raises(ValueError):\n _ = vdot([[1, 0, 0]], v1)\n\n\ndef test_angle():\n \"\"\"Test dot product.\"\"\"\n assert angle([1, 0, 0], [1, 0, 0]) == 0\n assert angle([1, 0, 0], [0, 1, 0]) == 90\n assert angle([1, 0, 0], [-1, 0, 0]) == 180\n\n assert angle([1, 0, 0], [2, 0, 0]) == 0\n assert angle([1, 0, 0], [0, 2, 0]) == 90\n assert angle([1, 0, 0], [-2, 0, 0]) == 180\n\n v1 = np.transpose([[1, 0, 0], [1, 0, 0], [1, 0, 0], [1, 0, 0]])\n v2 = np.transpose([[1, 0, 0], [0, 1, 0], [0, 0, 1], [-1, 0, 0]])\n\n assert v1.shape == (3, 4)\n assert v2.shape == (3, 4)\n\n assert_array(angle([1, 0, 0], v2), [0, 90, 90, 180])\n assert_array(angle(v2, [1, 0, 0]), [0, 90, 90, 180])\n\n assert_array(angle(v1, v2), [0, 90, 90, 180])\n\n v1 = np.transpose([[[1, 0, 0], [1, 0, 0]], [[1, 0, 0], [1, 0, 0]]])\n v2 = np.transpose([[[1, 0, 0], [0, 1, 0]], [[-1, 0, 0], [0, 0, 1]]])\n\n assert v1.shape == (3, 2, 2)\n assert v2.shape == (3, 2, 2)\n\n assert_array(angle(v1, v2), [[0, 90], [180, 90]])\n\n\ndef test_azimuth():\n \"\"\"Test azimuth illumination angle.\"\"\"\n assert azimuth(0, 0, 0) == 0\n assert azimuth(10, 0, 0) == 0\n assert azimuth(90, 90, 135) == approx(135)\n assert azimuth(90, 45, 135) == approx(180)\n\n _azi = 2 * np.degrees(np.arcsin(1 / np.sqrt(3)))\n\n assert azimuth(60, 60, 60) == approx(_azi)\n\n # 1D array\n inc, eme, phase = [0, 10, 90, 90, 60], [0, 0, 90, 45, 60], [0, 0, 135, 135, 60]\n\n assert_array(\n azimuth(inc, eme, phase),\n [0, 0, 135, 180, _azi]\n )\n\n # 2D array\n inc, eme, phase = [[10, 90], [90, 60]], [[0, 90], [45, 60]], [[0, 135], [135, 60]]\n\n assert_array(\n azimuth(inc, eme, phase),\n [[0, 135], [180, _azi]]\n )\n\n with raises(ValueError):\n _ = azimuth(0, [0], [0])\n\n with raises(ValueError):\n _ = azimuth([0], 0, [0])\n\n with raises(ValueError):\n _ = azimuth([0], [0], [[0]])\n", "\"\"\"VIMS ISIS header.\"\"\"\n\nimport os\n\nimport numpy as np\n\nimport pvl\n\nfrom .errors import ISISError\nfrom .history import ISISHistory\nfrom .labels import ISISLabels\nfrom .tables import ISISTables\nfrom .time import time as _dt\nfrom .vars import BYTE_ORDERS, FIELD_TYPES\nfrom ..misc import get_md5\n\nclass ISISCube:\n \"\"\"VIMS ISIS header object.\n\n Parameters\n ----------\n filename: str\n Input ISIS filename.\n\n \"\"\"\n\n def __init__(self, filename):\n self.filename = filename\n\n def __str__(self):\n return self.filename\n\n def __repr__(self):\n return f'<{self.__class__.__name__}> ISIS Cube: {self}'\n\n def __contains__(self, key):\n return key in self.keys()\n\n def __getitem__(self, key):\n if key not in self:\n raise KeyError(f'Key `{key}` not found.')\n\n if key in self.labels:\n return self.labels[key]\n\n if key in self.orig_lbl:\n return self.orig_lbl[key]\n\n return self.tables[key]\n\n @property\n def filename(self):\n return self.__filename\n\n @filename.setter\n def filename(self, filename):\n self.__filename = filename\n self.__pvl = None\n self.__labels = None\n self.__tables = None\n self.__history = None\n self.__orig_lbl = None\n self.__cube = None\n\n if not self.is_file:\n raise FileNotFoundError(f'File `{self.filename}` not found.')\n\n if not self.is_isis:\n raise ISISError(f'File `{self.filename}` is not in ISIS format.')\n\n @property\n def is_file(self):\n \"\"\"Check if the file exists.\"\"\"\n return os.path.exists(self.filename)\n\n @property\n def is_isis(self):\n \"\"\"Check if the file is in ISIS format.\"\"\"\n with open(self.filename, 'rb') as f:\n header = f.read(17)\n\n return header == b'Object = IsisCube'\n\n @property\n def md5(self):\n \"\"\"QUB MD5 hash.\"\"\"\n return get_md5(self.filename)\n\n @property\n def pvl(self):\n \"\"\"Full ISIS header in PVL format.\"\"\"\n if self.__pvl is None:\n self.__pvl = pvl.load(self.filename)\n return self.__pvl\n\n @property\n def labels(self):\n \"\"\"ISIS label labels.\"\"\"\n if self.__labels is None:\n self.__labels = ISISLabels(self.pvl)\n return self.__labels\n\n @property\n def tables(self):\n \"\"\"ISIS tables.\"\"\"\n if self.__tables is None:\n self.__tables = ISISTables(self.filename, self.pvl)\n return self.__tables\n\n @property\n def history(self):\n \"\"\"ISIS cube history.\"\"\"\n if self.__history is None:\n self.__history = ISISHistory(self.filename, self.pvl['History'])\n return self.__history\n\n @property\n def orig_lbl(self):\n \"\"\"ISIS cube original labels.\"\"\"\n if self.__orig_lbl is None:\n lbl = ISISHistory(self.filename, self.pvl['OriginalLabel'])['QUBE']\n self.__orig_lbl = lbl\n return self.__orig_lbl\n\n def keys(self):\n \"\"\"ISIS labels and tables keys.\"\"\"\n return set(\n list(self.labels.keys())\n + list(self.tables.keys())\n + list(self.orig_lbl.keys())\n )\n\n @property\n def header(self):\n \"\"\"Main ISIS Cube header.\"\"\"\n return self.pvl['IsisCube']\n\n @property\n def _core(self):\n \"\"\"ISIS core header.\"\"\"\n return self.header['Core']\n\n @property\n def _dim(self):\n \"\"\"ISIS dimension header.\"\"\"\n return self._core['Dimensions']\n\n @property\n def NS(self):\n \"\"\"Number of samples.\"\"\"\n return self._dim['Samples']\n\n @property\n def NL(self):\n \"\"\"Number of lines.\"\"\"\n return self._dim['Lines']\n\n @property\n def NB(self):\n \"\"\"Number of bands.\"\"\"\n return self._dim['Bands']\n\n @property\n def shape(self):\n \"\"\"Cube shape.\"\"\"\n return (self.NB, self.NL, self.NS)\n\n @property\n def _pix(self):\n \"\"\"ISIS core header.\"\"\"\n return self._core['Pixels']\n\n @property\n def dtype(self):\n \"\"\"Cube data type.\"\"\"\n return np.dtype(BYTE_ORDERS[self._pix['ByteOrder']]\n + FIELD_TYPES[self._pix['Type']])\n\n @property\n def _start_byte(self):\n \"\"\"Cube data start byte.\"\"\"\n return self._core['StartByte'] - 1\n\n @property\n def _nbytes(self):\n \"\"\"Cube data bytes size.\"\"\"\n return self.NB * self.NL * self.NS * self.dtype.itemsize\n\n @property\n def _base(self):\n \"\"\"Cube data base factor.\"\"\"\n return self._pix['Base']\n\n @property\n def _mult(self):\n \"\"\"Cube data multiplication factor.\"\"\"\n return self._pix['Multiplier']\n\n @property\n def cube(self):\n \"\"\"ISIS cube.\"\"\"\n if self.__cube is None:\n self.__cube = self._load_data()\n return self.__cube\n\n def _load_data(self):\n \"\"\"Load ISIS table data.\"\"\"\n with open(self.filename, 'rb') as f:\n f.seek(self._start_byte)\n data = f.read(self._nbytes)\n\n data = np.frombuffer(data, dtype=self.dtype) * self._mult + self._base\n data[self._is_null(data)] = np.nan\n return self._reshape(data)\n\n @property\n def _underflow(self):\n \"\"\"Data type underflow value.\"\"\"\n return np.finfo(self.dtype).min if self.dtype.char == 'f' \\\n else np.iinfo(self.dtype).min\n\n @property\n def _overflow(self):\n \"\"\"Data type overflow value.\"\"\"\n return np.finfo(self.dtype).max if self.dtype.char == 'f' \\\n else np.iinfo(self.dtype).max\n\n def _is_null(self, data, tol=1e-6):\n \"\"\"Find NULL values.\n\n Find the values lower than underflow or higher than overflow.\n\n Parameters\n ----------\n data: np.array\n Input array to test.\n tol: float\n Relative tolerance factor\n\n Returns\n -------\n np.array\n Location of the null values.\n\n \"\"\"\n return (np.abs(data / self._underflow) >= tol) | \\\n (np.abs(data / self._overflow) >= tol)\n\n @property\n def _TL(self):\n \"\"\"Number of tiles in the line direction.\"\"\"\n return self._core['TileLines']\n\n @property\n def _TS(self):\n \"\"\"Number of tiles in the sample direction.\"\"\"\n return self._core['TileSamples']\n\n def _reshape(self, data):\n \"\"\"Reshape data based on tile size.\"\"\"\n if self._TS == self.NS and self._TL == self.NL:\n return np.reshape(data, self.shape)\n\n size = np.size(data)\n shape = (size // (self._TL * self._TS), self._TL, self._TS)\n tiled_data = np.reshape(data, shape)\n\n # Stack in the samples direction\n shape = (size // (self._TL * self.NS), self.NS, self._TL)\n samples_stacked = np.moveaxis(\n np.moveaxis(tiled_data, 1, 2).reshape(shape), 1, 2)\n\n # Stack in the lines direction\n return np.reshape(samples_stacked, self.shape)\n\n @property\n def _bands(self):\n \"\"\"Cube band bin header.\"\"\"\n return self.header['BandBin']\n\n @property\n def bands(self):\n \"\"\"Cube bands numbers.\"\"\"\n return np.array(self._bands['OriginalBand'])\n\n @property\n def wvlns(self):\n \"\"\"Cube central wavelengths (um).\"\"\"\n return np.array([float(w[:-1]) if isinstance(w, str) else w\n for w in self._bands['Center']])\n\n @property\n def _inst(self):\n \"\"\"Cube instrument header.\"\"\"\n return self.header['Instrument']\n\n @property\n def start(self):\n \"\"\"Instrument start time (UTC).\"\"\"\n return _dt(self._inst['StartTime'])\n\n @property\n def stop(self):\n \"\"\"Instrument stop time (UTC).\"\"\"\n return _dt(self._inst['StopTime'])\n\n @property\n def duration(self):\n \"\"\"Instrument acquisition dureation.\"\"\"\n return self.stop - self.start\n\n @property\n def time(self):\n \"\"\"Instrument mid time (UTC).\"\"\"\n return self.start + self.duration / 2\n\n @property\n def _naif(self):\n \"\"\"NAIF keywords stored in ISIS header.\"\"\"\n return self.pvl['NaifKeywords']\n\n @property\n def exposure(self):\n \"\"\"ISIS header exposure duration.\"\"\"\n return self._inst['ExposureDuration']\n\n @property\n def kernels(self):\n \"\"\"List of kernels cached by ISIS.\"\"\"\n if 'Kernels' not in self:\n return None\n\n kernels = []\n for kernel in self['Kernels'].values():\n if isinstance(kernel, str) and '$' in kernel:\n kernels.append(kernel)\n elif isinstance(kernel, list):\n for k in kernel:\n if '$' in k:\n kernels.append(k)\n\n return sorted(kernels)\n\n @property\n def target_name(self):\n \"\"\"Main target name.\"\"\"\n return self._inst['TargetName']\n\n @property\n def target_radii(self):\n \"\"\"Main target radii (km).\"\"\"\n for k, v in self.pvl['NaifKeywords']:\n if 'RADII' in k:\n return v\n raise ValueError('Target radii not found in the header.')\n\n @property\n def target_radius(self):\n \"\"\"Main target mean radius (km).\"\"\"\n return np.power(np.prod(self.target_radii), 1 / 3)\n\n def dumps_header(self):\n \"\"\"Dumps cube header.\"\"\"\n return pvl.dumps(self.header).decode()\n" ]
[ [ "numpy.ndim", "numpy.shape" ], [ "numpy.sqrt", "numpy.transpose" ], [ "numpy.abs", "numpy.reshape", "numpy.dtype", "numpy.finfo", "numpy.frombuffer", "numpy.size", "numpy.iinfo", "numpy.prod", "numpy.moveaxis", "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
seamount-knight/perf-tests
[ "0e406f34924f26756f3742eba3f3224d52c4f9e6" ]
[ "dns/py/data.py" ]
[ "#!/usr/bin/env python\n\n# Copyright 2016 The Kubernetes Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport logging\nimport numpy\nimport re\nimport sqlite3\n\nfrom params import PARAMETERS\n\n_log = logging.getLogger(__name__)\n\n\nclass Result(object):\n \"\"\"\n Represents a column in the results table.\n \"\"\"\n def __init__(self, name, val_type, regex):\n self.name = name\n self.val_type = val_type\n self.regex = regex\n\nRESULTS = [\n Result('queries_sent', int,\n re.compile(r'\\s*Queries sent:\\s*(\\d+)')),\n Result('queries_completed', int,\n re.compile(r'\\s*Queries completed:\\s*(\\d+).*')),\n Result('queries_lost', int,\n re.compile(r'\\s*Queries lost:\\s*(\\d+).*')),\n Result('run_time', float,\n re.compile(r'\\s*Run time \\(s\\):\\s*([0-9.]+)')),\n Result('qps', float,\n re.compile(r'\\s*Queries per second:\\s*([0-9.]+)')),\n\n Result('avg_latency', float,\n re.compile(r'\\s*Average Latency \\(s\\):\\s*([0-9.]+).*')),\n Result('min_latency', float,\n re.compile(r'\\s*Average Latency \\(s\\):.*min ([0-9.]+).*')),\n Result('max_latency', float,\n re.compile(r'\\s*Average Latency \\(s\\):.*max ([0-9.]+).*')),\n Result('stddev_latency', float,\n re.compile(r'\\s*Latency StdDev \\(s\\):\\s*([0-9.]+)')),\n Result('max_perfserver_cpu', int, None),\n Result('max_perfserver_memory', int, None),\n Result('max_kubedns_cpu', int, None),\n Result('max_kubedns_memory', int, None),\n # Derived results\n Result('latency_50_percentile', float, None),\n Result('latency_95_percentile', float, None),\n Result('latency_99_percentile', float, None),\n Result('latency_99_5_percentile', float, None),\n]\n\n\nclass Parser(object):\n \"\"\"\n Parses dnsperf output file.\n \"\"\"\n def __init__(self, out):\n self.lines = [x.strip() for x in out.split('\\n')]\n self.results = {}\n self.histogram = []\n\n def parse(self):\n self._parse_results()\n self._parse_histogram()\n self._compute_derived()\n\n def _parse_results(self):\n results = {}\n for line in self.lines:\n for result in RESULTS:\n if result.regex is None:\n continue\n match = result.regex.match(line)\n if not match:\n continue\n results[result.name] = result.val_type(match.group(1))\n self.results = results\n\n def _parse_histogram(self):\n lines = [x for x in self.lines if re.match('^#histogram .*', x)]\n for line in lines:\n match = re.match(r'^#histogram\\s+(\\d+) (\\d+)', line)\n rtt, count = [int(x) for x in match.groups()]\n self.histogram.append([rtt, count])\n\n def _compute_derived(self):\n # Note: not very efficient, but functional\n from functools import reduce\n histogram = reduce(\n list.__add__,\n [[rtt]*count for rtt, count in self.histogram],\n [])\n _log.debug('Latency histogram = %s', histogram)\n\n for name, ptile in [('latency_50_percentile', 50),\n ('latency_95_percentile', 95),\n ('latency_99_percentile', 99),\n ('latency_99_5_percentile', 99.5)]:\n self.results[name] = float(numpy.percentile(histogram, ptile)) # pylint: disable=no-member\n\n\nclass ResultDb(object):\n def __init__(self, dbfile):\n self.db = sqlite3.connect(dbfile)\n\n self.c = self.db.cursor()\n\n sql = \"\"\"-- run parameters\nCREATE TABLE IF NOT EXISTS runs (\n run_id,\n run_subid,\n pod_name,\n {params},\n primary key (run_id, run_subid, pod_name)\n)\"\"\".format(params=',\\n '.join([param.name for param in PARAMETERS]))\n self.c.execute(sql)\n _log.debug('%s', sql)\n\n sql = \"\"\"-- run results\nCREATE TABLE IF NOT EXISTS results (\n run_id,\n run_subid,\n pod_name,\n {results},\n primary key (run_id, run_subid, pod_name)\n)\"\"\".format(results=',\\n '.join([r.name for r in RESULTS]))\n _log.debug('%s', sql)\n self.c.execute(sql)\n\n sql = \"\"\"-- latency histogram\nCREATE TABLE IF NOT EXISTS histograms (\n run_id,\n run_subid,\n pod_name,\n rtt_ms,\n rtt_ms_count\n)\n\"\"\"\n _log.debug('%s', sql)\n self.c.execute(sql)\n\n _log.info('Using DB %s', dbfile)\n\n def put(self, results, ignore_if_dup=True):\n key = [results['params']['run_id'], results['params']['run_subid'], results['params']['pod_name']]\n if self._exists(key) and ignore_if_dup:\n _log.info('Ignoring duplicate results %s', key)\n return\n\n sql = ('INSERT INTO runs (run_id, run_subid, pod_name,'\n + ','.join([p.name for p in PARAMETERS])\n + ') VALUES ('\n + ','.join(['?'] * (3 + len(PARAMETERS)))\n + ')')\n _log.debug('runs sql -- %s', sql)\n self.c.execute(sql, key + [\n results['params'][p.name] if p.name in results['params'] else None\n for p in PARAMETERS\n ])\n\n sql = ('INSERT INTO results (run_id, run_subid, pod_name,'\n + ','.join([r.name for r in RESULTS])\n + ') VALUES ('\n + ','.join(['?'] * (3 + len(RESULTS)))\n + ')')\n _log.debug('results sql -- %s', sql)\n self.c.execute(sql, key +\n [results['data'][r.name]\n if r.name in results['data'] else None\n for r in RESULTS])\n\n for rtt_ms, count in results['data']['histogram']:\n data = {\n 'run_id': results['params']['run_id'],\n 'run_subid': results['params']['run_subid'],\n 'rtt_ms': rtt_ms,\n 'rtt_ms_count': count,\n }\n\n columns = ','.join(data.keys())\n qs = ','.join(['?'] * len(data))\n stmt = 'INSERT INTO histograms (' + columns + ') VALUES (' + qs + ')'\n _log.debug('histogram sql -- %s', stmt)\n self.c.execute(stmt, data.values())\n\n def get_results(self, run_id, run_subid):\n sql = ('SELECT ' + ','.join([r.name for r in RESULTS])\n + ' FROM results WHERE run_id = ? and run_subid = ? and pod_name = ?')\n _log.debug('%s', sql)\n self.c.execute(sql, (run_id, run_subid, pod_name))\n rows = self.c.fetchall()\n return dict(zip([r.name for r in RESULTS], rows[0])) if rows else None\n\n def _exists(self, key):\n self.c.execute(\n 'SELECT COUNT(*) FROM runs WHERE run_id = ? and run_subid = ? and pod_name = ?', key)\n count = self.c.fetchall()[0][0]\n return count != 0\n\n def commit(self):\n self.db.commit()\n" ]
[ [ "numpy.percentile" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
imatge-upc/pixelcoordEDL
[ "353632feed6ac8c93758c1a2a1b7a477e7ff053c", "353632feed6ac8c93758c1a2a1b7a477e7ff053c", "353632feed6ac8c93758c1a2a1b7a477e7ff053c" ]
[ "src/minerl/herobraine/data/pipeline_with_reward.py", "src/minerl/herobraine/hero/handlers/observables.py", "src/main/vqvae.py" ]
[ "import json\r\nimport logging\r\nimport multiprocessing\r\nimport os\r\nfrom collections import OrderedDict\r\nfrom queue import Queue, PriorityQueue\r\nfrom typing import List, Tuple, Any\r\n\r\nimport cv2\r\nimport numpy as np\r\nfrom multiprocess.pool import Pool\r\n\r\nfrom minerl.herobraine.hero.agent_handler import HandlerCollection, AgentHandler\r\nfrom minerl.herobraine.hero.handlers import RewardHandler\r\n\r\nlogger = logging.getLogger(__name__)\r\n\r\n\r\nclass DataPipelineWithReward:\r\n \"\"\"\r\n Creates a data pipeline that also outputs discounted reward.\r\n \"\"\"\r\n\r\n def __init__(self,\r\n observables: List[AgentHandler],\r\n actionables: List[AgentHandler],\r\n mission_handlers: List[AgentHandler],\r\n nsteps,\r\n gamma,\r\n data_directory,\r\n num_workers,\r\n worker_batch_size,\r\n min_size_to_dequeue):\r\n \"\"\"\r\n Sets up a tensorflow dataset to load videos from a given data directory.\r\n :param data_directory: the directory of the data to be loaded, eg: 'minerl.herobraine_parse/output/rendered/'\r\n \"\"\"\r\n\r\n self.data_dir = data_directory\r\n self.observables = observables\r\n self.actionables = actionables\r\n self.mission_handlers = mission_handlers\r\n # self.vectorizer = vectorizer\r\n\r\n self.number_of_workers = num_workers\r\n self.worker_batch_size = worker_batch_size\r\n self.size_to_dequeue = min_size_to_dequeue\r\n self.nsteps = nsteps\r\n self.gamma = gamma\r\n\r\n self.processing_pool = Pool(self.number_of_workers)\r\n self.m = multiprocessing.Manager()\r\n self.data_queue = self.m.Queue(maxsize=self.size_to_dequeue//self.worker_batch_size*4)\r\n\r\n pool_size = self.size_to_dequeue*4\r\n self.random_queue = PriorityQueue(maxsize=pool_size)\r\n\r\n def batch_iter(self, batch_size):\r\n \"\"\"\r\n Returns a generator for iterating through batches of the dataset.\r\n :param batch_size:\r\n :param number_of_workers:\r\n :param worker_batch_size:\r\n :param size_to_dequeue:\r\n :return:\r\n \"\"\"\r\n logger.info(\"Starting batch iterator on {}\".format(self.data_dir))\r\n data_list = self._get_all_valid_recordings(self.data_dir)\r\n\r\n load_data_func = self._get_load_data_func(self.data_queue, self.nsteps,\r\n self.worker_batch_size, self.mission_handlers,\r\n self.observables, self.actionables,\r\n self.gamma)\r\n map_promise = self.processing_pool.map_async(load_data_func, data_list)\r\n\r\n # We map the files -> load_data -> batch_pool -> random shuffle -> yield.\r\n # batch_pool = []\r\n start = 0\r\n incr = 0\r\n while not map_promise.ready() or not self.data_queue.empty() or not self.random_queue.empty():\r\n #print(\"d: {} r: {}\".format(data_queue.qsize(), random_queue.qsize()))\r\n\r\n while not self.data_queue.empty() and not self.random_queue.full():\r\n for ex in self.data_queue.get():\r\n if not self.random_queue.full():\r\n r_num = np.random.rand(1)[0]*(1 - start) + start\r\n self.random_queue.put(\r\n (r_num, ex)\r\n )\r\n incr += 1\r\n # print(\"d: {} r: {} rqput\".format(data_queue.qsize(), random_queue.qsize()))\r\n else:\r\n break\r\n\r\n if incr > self.size_to_dequeue:\r\n if self.random_queue.qsize() < (batch_size):\r\n if map_promise.ready():\r\n break\r\n else:\r\n continue\r\n batch_with_incr = [self.random_queue.get() for _ in range(batch_size)]\r\n\r\n r1, batch = zip(*batch_with_incr)\r\n start = 0\r\n traj_obs, traj_acts, traj_handlers, traj_n_obs, discounted_rewards, elapsed = zip(*batch)\r\n\r\n observation_batch = [\r\n HandlerCollection({\r\n o: np.asarray(traj_ob[i]) for i, o in enumerate(self.observables)\r\n }) for traj_ob in traj_obs\r\n ]\r\n action_batch = [\r\n HandlerCollection({\r\n a: np.asarray(traj_act[i]) for i, a in enumerate(self.actionables)\r\n }) for traj_act in traj_acts\r\n ]\r\n mission_handler_batch = [\r\n HandlerCollection({\r\n m: np.asarray(traj_handler[i]) for i, m in enumerate(self.mission_handlers)\r\n }) for traj_handler in traj_handlers\r\n ]\r\n next_observation_batch = [\r\n HandlerCollection({\r\n o: np.asarray(traj_n_ob[i]) for i, o in enumerate(self.observables)\r\n }) for traj_n_ob in traj_n_obs\r\n ]\r\n yield observation_batch, action_batch, mission_handler_batch, next_observation_batch, discounted_rewards, elapsed\r\n # Move on to the next batch bool.\r\n # Todo: Move to a running pool, sampling as we enqueue. This is basically the random queue impl.\r\n # Todo: This will prevent the data from getting arbitrarily segmented.\r\n # batch_pool = []\r\n try:\r\n map_promise.get()\r\n except RuntimeError as e:\r\n logger.error(\"Failure in data pipeline: {}\".format(e))\r\n\r\n logger.info(\"Epoch complete.\")\r\n \r\n def close(self):\r\n self.processing_pool.close()\r\n self.processing_pool.join()\r\n\r\n ############################\r\n ## PRIVATE METHODS\r\n #############################\r\n\r\n @staticmethod\r\n def _get_load_data_func(data_queue, nsteps, worker_batch_size, mission_handlers,\r\n observables, actionables, gamma):\r\n def _load_data(inst_dir):\r\n recording_path = str(os.path.join(inst_dir, 'recording.mp4'))\r\n univ_path = str(os.path.join(inst_dir, 'univ.json'))\r\n\r\n try:\r\n cap = cv2.VideoCapture(recording_path)\r\n # Litty uni\r\n with open(univ_path, 'r') as f:\r\n univ = {int(k): v for (k, v) in (json.load(f)).items()}\r\n univ = OrderedDict(univ)\r\n univ = np.array(list(univ.values()))\r\n\r\n # Litty viddy\r\n batches = []\r\n rewards = []\r\n frames_queue = Queue(maxsize=nsteps)\r\n\r\n # Loop through the video and construct frames\r\n # of observations to be sent via the multiprocessing queue\r\n # in chunks of worker_batch_size to the batch_iter loop.\r\n frame_num = 0\r\n while True:\r\n ret, frame = cap.read()\r\n \r\n if not ret or frame_num >= len(univ):\r\n break\r\n else:\r\n #print(\"Batches {} and worker batch size {}\".format(len(batches), self.worker_batch_size))\r\n if len(batches) >= worker_batch_size:\r\n data_queue.put(batches)\r\n batches = []\r\n\r\n try:\r\n # Construct a single observation object.\r\n vf = (np.clip(frame[:, :, ::-1], 0, 255))\r\n uf = univ[frame_num]\r\n\r\n frame = {'pov': vf}\r\n frame.update(uf)\r\n\r\n cur_reward = 0\r\n for m in mission_handlers:\r\n try:\r\n if isinstance(m, RewardHandler):\r\n cur_reward += m.from_universal(frame)\r\n except NotImplementedError:\r\n pass\r\n rewards.append(cur_reward)\r\n\r\n #print(\"Frames queue size {}\".format(frames_queue.qsize()))\r\n frames_queue.put(frame)\r\n if frames_queue.full():\r\n next_obs = [o.from_universal(frame) for o in observables]\r\n frame = frames_queue.get()\r\n obs = [o.from_universal(frame) for o in observables]\r\n act = [a.from_universal(frame) for a in actionables]\r\n mission = []\r\n for m in mission_handlers:\r\n try:\r\n mission.append(m.from_universal(frame))\r\n except NotImplementedError:\r\n mission.append(None)\r\n pass\r\n \r\n batches.append((obs, act, mission, next_obs, DataPipelineWithReward._calculate_discount_rew(rewards[-nsteps:], gamma), frame_num + 1 - nsteps))\r\n except Exception as e:\r\n # If there is some error constructing the batch we just start a new sequence\r\n # at the point that the exception was observed\r\n logger.warn(\"Exception {} caught in the middle of parsing {} in \"\r\n \"a worker of the data pipeline.\".format(e, inst_dir))\r\n\r\n frame_num += 1\r\n\r\n return batches\r\n except Exception as e:\r\n logger.error(\"Caught Exception\")\r\n raise e\r\n return None\r\n return _load_data\r\n\r\n @staticmethod\r\n def _calculate_discount_rew(rewards, gamma):\r\n total_reward = 0\r\n for i, rew in enumerate(rewards):\r\n total_reward += (gamma ** i) * rew\r\n return total_reward\r\n\r\n @staticmethod\r\n def _get_all_valid_recordings(path):\r\n directoryList = []\r\n\r\n # return nothing if path is a file\r\n if os.path.isfile(path):\r\n return []\r\n\r\n # add dir to directorylist if it contains .txt files\r\n if len([f for f in os.listdir(path) if f.endswith('.mp4')]) > 0:\r\n if len([f for f in os.listdir(path) if f.endswith('.json')]) > 0:\r\n directoryList.append(path)\r\n\r\n for d in os.listdir(path):\r\n new_path = os.path.join(path, d)\r\n if os.path.isdir(new_path):\r\n directoryList += DataPipelineWithReward._get_all_valid_recordings(new_path)\r\n\r\n directoryList = np.array(directoryList)\r\n np.random.shuffle(directoryList)\r\n return directoryList.tolist()\r\n", "import logging\nfrom typing import Tuple\n\nimport gym\nimport numpy as np\n\nfrom minerl.herobraine.hero import AgentHandler, mc, spaces\n\n\ndef strip_of_prefix(minecraft_name):\n # Names in minecraft start with 'minecraft:', like:\n # 'minecraft:log', or 'minecraft:cobblestone'\n if minecraft_name.startswith('minecraft:'):\n return minecraft_name[len('minecraft:'):]\n\n return minecraft_name\n\n\nclass ObservationFromFullStats(AgentHandler):\n logger = logging.getLogger(__name__ + \".ObservationFromFullStats\")\n\n def to_string(self):\n return 'observation_from_full_stats'\n\n @staticmethod\n def command_list():\n return ['XPos', 'ZPos', ]\n\n def __init__(self):\n space = spaces.Box(0, 1, [6], dtype=np.float32)\n super().__init__(space)\n\n def flaten_handler(self):\n return []\n\n def from_universal(self, x):\n try:\n return self.space.sample()\n except NotImplementedError:\n raise NotImplementedError('Observation from full state not implementing from_universal')\n\nclass POVObservation(AgentHandler):\n \"\"\"\n Handles POV observations.\n \"\"\"\n logger = logging.getLogger(__name__ + \".POVObservation\")\n\n def to_string(self):\n return 'pov'\n\n def __init__(self, video_resolution: Tuple[int, int], include_depth: bool = False):\n self.include_depth = include_depth\n self.video_resolution = video_resolution\n space = None\n if include_depth:\n space = spaces.Box(0, 255, list(video_resolution)[::-1] + [4], dtype=np.uint8)\n self.video_depth = 4\n\n else:\n space = spaces.Box(0, 255, list(video_resolution)[::-1] + [3], dtype=np.uint8)\n self.video_depth = 3\n self.video_height = video_resolution[0]\n self.video_width = video_resolution[1]\n\n super().__init__(space)\n\n def add_to_mission_spec(self, mission_spec):\n if self.include_depth:\n mission_spec.requestVideoWithDepth(*self.video_resolution)\n else:\n mission_spec.requestVideo(*self.video_resolution)\n\n def from_universal(self, obs):\n if \"pov\" in obs:\n assert not np.isnan(np.sum(obs[\"pov\"])), \"NAN in observation!\"\n return obs[\"pov\"]\n else:\n self.logger.warning(\"No video found in universal observation! Yielding 0 image.\")\n return self.space.sample() * 0\n\n def from_hero(self, obs):\n # process the video frame\n if \"video\" in obs:\n return obs[\"video\"]\n else:\n self.logger.warning(\"No video found in observation! Yielding 0 image.\")\n return self.space.sample() * 0\n\n def __or__(self, other):\n \"\"\"\n Combines two POV observations into one. If all of the properties match return self\n otherwise raise an exception.\n \"\"\"\n if isinstance(other, POVObservation) and self.include_depth == other.include_depth and \\\n self.video_resolution == other.video_resolution:\n return POVObservation(self.video_resolution, include_depth=self.include_depth)\n else:\n raise ValueError(\"Incompatible observables!\")\n\n # def __eq__(self, other):\n\nclass GUIContainerObservation(AgentHandler):\n \"\"\"\n Handles GUI Container Observations.\n # Todo investigate obs['inventoryAvailable']\n In addition to this information, whether {{{flat}}} is true or false, an array called \"inventoriesAvailable\" will also be returned.\n This will contain a list of all the inventories available (usually just the player's, but if the player is pointed at a container, this\n will also be available.)\n \"\"\"\n\n ITEM_ATTRS = [\n \"item\",\n \"variant\",\n \"size\"\n ]\n\n def to_string(self):\n return 'gui_container'\n\n def __init__(self, container_name, num_slots):\n super().__init__(spaces.Tuple([\n spaces.MultiDiscrete([len(mc.MC_ITEM_IDS), 16, 64], dtype=np.int32) for _ in range(num_slots)]))\n self.container_name = container_name\n self.num_slots = num_slots\n\n def from_universal(self, x):\n raise NotImplementedError('from_universal not implemented in GuiContainerObservation')\n\n def from_hero(self, obs):\n \"\"\"\n Converts the Hero observation into a one-hot of the inventory items\n for a given inventory container.\n :param obs:\n :return:\n \"\"\"\n keys = [k for k in obs if k.startswith(self.container_name)]\n hotbar_vec = [[0] * len(GUIContainerObservation.ITEM_ATTRS)\n for _ in range(self.num_slots)]\n for k in keys:\n normal_k = k.split(self.container_name + \"_\")[-1]\n sid, attr = normal_k.split(\"_\")\n\n # Parse the attribute.\n if attr == \"item\":\n val = mc.get_item_id(obs[k])\n elif attr == \"variant\":\n val = 0 # Todo: Implement variants\n elif attr == \"size\":\n val = int(obs[k])\n else:\n # Unknown type is not supported!\n # Todo: Investigate unknown types.\n break\n\n # Add it to the vector.\n attr_id = GUIContainerObservation.ITEM_ATTRS.index(attr)\n hotbar_vec[int(sid)][int(attr_id)] = val\n\n return hotbar_vec\n\n def __or__(self, other):\n \"\"\"\n Combines two gui container observations into one.\n The new observable has the max of self and other's num_slots.\n Container names must match.\n \"\"\"\n if isinstance(other, GUIContainerObservation):\n if self.container_name != other.container_name:\n raise ValueError(\"Observations can only be combined if they share a container name.\")\n return GUIContainerObservation(self.container_name, max(self.num_slots, other.num_slots))\n else:\n raise ValueError('Observations can only be combined with gui container observations')\n\n def __eq__(self, other):\n return (\n isinstance(other, GUIContainerObservation)\n and self.container_name == other.container_name\n and self.num_slots == other.num_slots)\n\nclass FlatInventoryObservation(AgentHandler):\n \"\"\"\n Handles GUI Container Observations for selected items\n \"\"\"\n\n def to_string(self):\n return 'inventory'\n\n def to_hero(self, x) -> str:\n raise NotImplementedError('FlatInventoryObservation must implement to_hero')\n\n logger = logging.getLogger(__name__ + \".FlatInventoryObservation\")\n\n def __init__(self, item_list):\n item_list = sorted(item_list)\n super().__init__(spaces.Dict(spaces={\n k: spaces.Box(low=0, high=2304, shape=(), dtype=np.int32, normalizer_scale='log')\n for k in item_list\n }))\n self.num_items = len(item_list)\n self.items = item_list\n\n def add_to_mission_spec(self, mission_spec):\n pass\n # Flat obs not supported by API for some reason - should be mission_spec.observeFullInventory(flat=True)\n\n def from_hero(self, obs):\n \"\"\"\n Converts the Hero observation into a one-hot of the inventory items\n for a given inventory container. Ignores variant / color\n :param obs:\n :return:\n \"\"\"\n item_dict = self.space.no_op()\n if 'inventory' in obs:\n # TODO change to map\n for stack in obs['inventory']:\n if 'type' in stack and 'quantity' in stack:\n try:\n i = self.items.index(stack['type'])\n item_dict[stack['type']] += stack['quantity']\n except ValueError:\n continue\n else:\n self.logger.warning(\"No inventory found in malmo observation! Yielding empty inventory.\")\n self.logger.warning(obs)\n\n # TODO: ADD LOGG\n return item_dict\n\n def from_universal(self, obs):\n \n item_dict = self.space.no_op()\n\n try:\n if obs['slots']['gui']['type'] == 'class net.minecraft.inventory.ContainerPlayer' or \\\n obs['slots']['gui']['type'] == 'class net.minecraft.inventory.ContainerWorkbench':\n slots = obs['slots']['gui']['slots'][1:]\n elif obs['slots']['gui']['type'] == 'class net.minecraft.inventory.ContainerFurnace':\n slots = obs['slots']['gui']['slots'][0:2] + obs['slots']['gui']['slots'][3:]\n else:\n slots = obs['slots']['gui']['slots']\n\n # Add in the cursor item tracking if present\n try:\n slots.append(obs['slots']['gui']['cursor_item'])\n except KeyError:\n pass\n\n # Add from all slots\n for stack in slots:\n try:\n name = strip_of_prefix(stack['name'])\n name = 'log' if name == 'log2' else name\n item_dict[name] += stack['count']\n except (KeyError, ValueError):\n continue\n\n except KeyError as e:\n self.logger.warning(\"KeyError found in universal observation! Yielding empty inventory.\")\n self.logger.error(e)\n return item_dict\n\n return item_dict\n\n def __or__(self, other):\n \"\"\"\n Combines two flat inventory observations into one by taking the\n union of their items.\n Asserts that other is also a flat observation.\n \"\"\"\n assert isinstance(other, FlatInventoryObservation)\n return FlatInventoryObservation(list(set(self.items) | (set(other.items))))\n\n def __eq__(self, other):\n return isinstance(other, FlatInventoryObservation) and \\\n (self.items) == (other.items)\n\nclass DeathObservation(AgentHandler):\n\n def to_string(self):\n return 'alive'\n\n def from_hero(self, obs_dict):\n return obs_dict[\"IsAlive\"] if \"IsAlive\" in obs_dict else True\n\nclass HotbarObservation(GUIContainerObservation):\n \"\"\"\n Handles hotbar observation.\n \"\"\"\n\n def to_string(self):\n return 'hotbar'\n\n def __init__(self):\n super().__init__(\"Hotbar\", 9)\n\n def add_to_mission_spec(self, mission_spec):\n mission_spec.observeHotBar()\n\nclass TypeObservation(AgentHandler):\n \"\"\"\n Returns the item list index of the tool in the given hand\n List must start with 'none' as 0th element and end with 'other' as wildcard element\n \"\"\"\n\n def __init__(self, hand: str, items: list):\n \"\"\"\n Initializes the space of the handler with a spaces.Dict\n of all of the spaces for each individual command.\n \"\"\"\n self._items = sorted(items)\n self._hand = hand\n self._univ_items = ['minecraft:' + item for item in items]\n self._default = 'none' # 'none'\n self._other = 'other' # 'othe\n assert 'other' in items\n assert 'none' in items\n super().__init__(spaces.Enum(*self._items, default='none'))\n\n @property\n def items(self):\n return self._items\n\n @property\n def universal_items(self):\n return self._univ_items\n\n @property\n def hand(self):\n return self._hand\n\n def proc(self, hero_obs):\n minerl_obs = {}\n for o in self.task.observation_handlers:\n minerl_obs[o.to_string()] = o.from_hero(hero_obs)\n\n\n\n @property\n def default(self):\n return self._default\n\n def to_string(self):\n return 'equipped_items.{}.type'.format(self._hand)\n\n def from_hero(self, obs_dict):\n return obs_dict['equipped_item']['mainhand']['type']\n\n def from_universal(self, obs):\n try:\n if self._hand == 'mainhand':\n offset = -9\n hotbar_index = obs['hotbar']\n if obs['slots']['gui']['type'] == 'class net.minecraft.inventory.ContainerPlayer':\n offset -= 1\n\n item_name = (\n obs['slots']['gui']['slots'][offset + hotbar_index]['name'].split(\"minecraft:\")[-1])\n if not item_name in self._items:\n raise ValueError()\n if item_name == 'air':\n raise KeyError()\n\n return item_name\n else: \n raise NotImplementedError('type not implemented for hand type' + self._hand)\n except KeyError:\n # No item in hotbar slot - return 'none'\n return 'none'\n except ValueError:\n return 'other'\n\n def add_to_mission_spec(self, mission_spec):\n raise NotImplementedError('add_to_mission_spec not implemented for TypeObservation')\n # mission_spec.observeEquipedDurrability()\n\n def __or__(self, other):\n \"\"\"\n Combines two TypeObservation's (self and other) into one by \n taking the union of self.items and other.items\n \"\"\"\n if isinstance(other, TypeObservation):\n return TypeObservation(self.hand, list(set(self.items + other.items)))\n else:\n raise TypeError('Operands have to be of type TypeObservation')\n\n def __eq__(self, other):\n return self.hand == other.hand and self.items == other.items\n\n\nclass DamageObservation(AgentHandler):\n \"\"\"\n Returns the item list index of the tool in the given hand\n List must start with 'none' as 0th element and end with 'other' as wildcard element\n \"\"\"\n\n def __init__(self, hand: str):\n \"\"\"\n Initializes the space of the handler with a spaces.Dict\n of all of the spaces for each individual command.\n \"\"\"\n\n self._hand = hand\n self._default = 0 # 'none'\n super().__init__(spaces.Box(low=-1, high=1562, shape=(), dtype=np.int))\n\n @property\n def hand(self):\n return self._hand\n\n @property\n def default(self):\n return self._default\n\n def to_string(self):\n return 'equipped_items.{}.damage'.format(self._hand)\n\n def from_universal(self, obs):\n try:\n if self._hand == 'mainhand':\n offset = -9\n hotbar_index = obs['hotbar']\n if obs['slots']['gui']['type'] == 'class net.minecraft.inventory.ContainerPlayer':\n offset -= 1\n if obs['slots']['gui']['slots'][offset + hotbar_index]['maxDamage'] > 0:\n return np.array(obs['slots']['gui']['slots'][offset + hotbar_index]['damage'], dtype=np.int32)\n else:\n return np.array(self._default, dtype=np.int32)\n else:\n raise NotImplementedError('damage not implemented for hand type' + self._hand)\n except KeyError:\n return np.array(self._default, dtype=np.int32)\n\n def add_to_mission_spec(self, mission_spec):\n raise NotImplementedError('add_to_mission_spec not implemented for TypeObservation')\n\n def __eq__(self, other):\n return isinstance(other, self.__class__) and self._hand == other._hand\n\n\nclass MaxDamageObservation(AgentHandler):\n \"\"\"\n Returns the item list index of the tool in the given hand\n List must start with 'none' as 0th element and end with 'other' as wildcard element\n \"\"\"\n\n def __init__(self, hand: str):\n \"\"\"\n Initializes the space of the handler with a spaces.Dict\n of all of the spaces for each individual command.\n \"\"\"\n\n self._hand = hand\n self._default = 0 # 'none'\n super().__init__(spaces.Box(low=-1, high=1562, shape=(), dtype=np.int))\n\n @property\n def hand(self):\n return self._hand\n\n @property\n def default(self):\n return self._default\n\n def to_string(self):\n return 'equipped_items.{}.maxDamage'.format(self._hand)\n\n def from_universal(self, obs):\n try:\n if self._hand == 'mainhand':\n offset = -9\n hotbar_index = obs['hotbar']\n if obs['slots']['gui']['type'] == 'class net.minecraft.inventory.ContainerPlayer':\n offset -= 1\n return np.array(obs['slots']['gui']['slots'][offset + hotbar_index]['maxDamage'], dtype=np.int32)\n else:\n raise NotImplementedError('damage not implemented for hand type' + self._hand)\n except KeyError:\n return np.array(self._default, dtype=np.int32)\n\n def add_to_mission_spec(self, mission_spec):\n raise NotImplementedError('add_to_mission_spec not implemented for TypeObservation')\n\n def __eq__(self, other):\n return isinstance(other, self.__class__) and self._hand == other._hand\n\n\nclass PlayerInventoryObservation(GUIContainerObservation):\n \"\"\"\n Handles player inventory observations.\n \"\"\"\n\n def to_string(self):\n return 'normal_inventory'\n\n def __init__(self):\n super().__init__(\"InventorySlot\", 41)\n\n def add_to_mission_spec(self, mission_spec):\n mission_spec.observeFullInventory()\n\n def from_universal(self, x):\n # Todo: Universal\n pass\n\n\nclass CompassObservation(AgentHandler):\n \"\"\"\n Handles compass observations.\n \"\"\"\n logger = logging.getLogger(__name__ + \".CompassObservation\")\n\n def to_string(self):\n return 'compassAngle'\n\n def __init__(self):\n\n super().__init__(spaces.Box(low=-180.0, high=180.0, shape=(), dtype=np.float32))\n\n def add_to_mission_spec(self, mission_spec):\n mission_spec.observeCompass()\n\n def from_universal(self, obs):\n if \"compass\" in obs and \"angle\" in obs[\"compass\"]:\n y = np.array(((obs[\"compass\"][\"angle\"] * 360.0 + 180) % 360.0) - 180)\n return y\n else:\n self.logger.warning(\"No compass angle found in universal observation! Yielding random angle.\")\n return self.space.sample()\n\n def from_hero(self, obs):\n # TODO np datatype parameter support for compressed replay buffers\n # process the compass handler\n if \"angle\" in obs:\n t = np.array((obs['angle'] + 0.5) % 1.0);\n return t\n else:\n self.logger.warning(\"No compass found in observation! Yielding random angle.\")\n return self.space.sample()\n\n\nclass CompassDistanceObservation(AgentHandler):\n \"\"\"\n Handles compass observations.\n \"\"\"\n logger = logging.getLogger(__name__ + \".CompassDistanceObservation\")\n\n def to_string(self):\n return 'compass_distance'\n\n def __init__(self):\n\n super().__init__(spaces.Box(low=0, high=128, shape=(1,), dtype=np.uint8))\n\n def add_to_mission_spec(self, mission_spec):\n mission_spec.observeCompass()\n\n def from_universal(self, obs):\n if \"compass\" in obs and \"distance\" in obs[\"compass\"]:\n return [obs['compass']['distance']]\n else:\n self.logger.warning(\"No compass angle found in universal observation! Yielding random distance.\")\n return self.space.sample()\n\n def from_hero(self, obs):\n # process the compass handler\n if \"distance\" in obs:\n return np.array([obs['distance']])\n else:\n print(obs)\n self.logger.warning(\"No compass found in observation! Yielding random distance.\")\n return self.space.sample()\n\n\nclass ChatObservation(AgentHandler):\n \"\"\"\n Handles chat observations.\n \"\"\"\n\n def to_string(self):\n return 'chat'\n\n def __init__(self):\n super().__init__(spaces.Text([1]))\n\n def add_to_mission_spec(self, mission_spec):\n mission_spec.observeChat()\n\n def from_hero(self, x):\n # Todo: From Hero\n pass\n\n\nclass RecentCommandsObservation(AgentHandler):\n \"\"\"\n Handles recent command observations\n \"\"\"\n\n def to_string(self):\n return 'recent_commands'\n\n def __init__(self):\n super().__init__(spaces.Text([1]))\n\n def add_to_mission_spec(self, mission_spec):\n mission_spec.observeRecentCommands()\n\n def from_hero(self, x):\n # Todo: From Heri\n\n pass\n", "import os\nimport csv\nimport time\nimport wandb\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nimport pytorch_lightning as pl\nimport matplotlib.pyplot as plt\n\nfrom os.path import join\nfrom pathlib import Path\nfrom pprint import pprint\nfrom config import setSeed, getConfig\nfrom collections import Counter, defaultdict\nfrom main.utils import *\n\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.nn.functional as F\n\nfrom torch.utils.data import DataLoader\nfrom torchvision.utils import make_grid\nfrom customLoader import *\nfrom torchvision.transforms import transforms\n\nfrom models.CustomVQVAE import VQVAE_PL\n\nfrom pytorch_lightning.loggers import WandbLogger\n\nfrom mod.q_functions import parse_arch\nfrom sklearn.cluster import KMeans\n\nclass VQVAE(VQVAE_PL):\n def __init__(self, conf):\n super(VQVAE, self).__init__(conf['data_type'], **conf['vqvae'])\n\n self.experiment = conf['experiment']\n self.batch_size = conf['batch_size']\n self.lr = conf['lr']\n self.split = conf['split']\n self.num_clusters = conf['vqvae']['num_embeddings']\n\n self.delay = conf['delay']\n self.trajectories = conf['trajectories']\n self.trajectories_train, self.trajectories_val = get_train_val_split(self.trajectories, self.split)\n\n self.conf = {\n 'k_std': conf['k_std'], \n 'k_mean': conf['k_mean'],\n 'data_type': conf['data_type']\n }\n\n self.transform = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.5,0.5,0.5), (1.0,1.0,1.0))\n ])\n\n self.test = conf['test']\n self.type = self.test['type']\n self.shuffle = self.test['shuffle']\n self.limit = self.test['limit']\n\n\n def on_train_start(self):\n embeddings = []\n\n print(\"Computing embeddings...\")\n for batch in self.trainer.train_dataloader:\n z_1 = self.model.compute_embedding(batch, self.device)\n embeddings.append(z_1.detach().cpu().numpy())\n\n e = np.concatenate(np.array(embeddings))\n\n print(\"Computing kmeans...\")\n kmeans = KMeans(n_clusters=self.num_clusters, random_state=0).fit(e)\n\n kmeans_tensor = torch.from_numpy(kmeans.cluster_centers_).to(self.device)\n self.model._vq_vae._embedding.weight = nn.Parameter(kmeans_tensor)\n self.model._vq_vae._ema_w = nn.Parameter(kmeans_tensor)\n \n def training_step(self, batch, batch_idx):\n\n loss = self.model(batch, batch_idx, self.logger, \"train\")\n\n return loss\n\n def validation_step(self, batch, batch_idx):\n\n\n loss = self.model(batch, batch_idx, self.logger, \"val\")\n\n return loss\n\n def on_epoch_end(self):\n self.model.log_reconstructions(self.trainer.train_dataloader, self.logger)\n\n\n def configure_optimizers(self):\n return torch.optim.Adam(params=self.parameters(), lr=self.lr, weight_decay=1e-5)\n\n def train_dataloader(self):\n train_dataset = CustomMinecraftData(self.trajectories_train, transform=self.transform, delay=self.delay, **self.conf)\n train_dataloader = DataLoader(train_dataset, batch_size=self.batch_size, shuffle=True, num_workers=2)\n return train_dataloader\n\n def val_dataloader(self):\n val_dataset = CustomMinecraftData(self.trajectories_val, transform=self.transform, delay=self.delay, **self.conf)\n val_dataloader = DataLoader(val_dataset, batch_size=self.batch_size, shuffle=False, num_workers=2)\n return val_dataloader\n\n def _construct_map(self):\n construct_map(self)\n" ]
[ [ "numpy.clip", "numpy.asarray", "numpy.random.shuffle", "numpy.random.rand", "numpy.array" ], [ "numpy.array", "numpy.sum" ], [ "torch.nn.Parameter", "sklearn.cluster.KMeans", "torch.utils.data.DataLoader", "torch.from_numpy", "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
CN-TU/remy
[ "0c0887322b0cbf6e3497e3aeb95c979907f03623" ]
[ "async_deep_reinforce/a3c_visualize.py" ]
[ "# -*- coding: utf-8 -*-\nimport tensorflow as tf\nimport numpy as np\nimport cv2\nimport sys\nimport matplotlib.pyplot as plt\nimport random\n\nfrom game_state import GameState\nfrom game_ac_network import GameACFFNetwork, GameACLSTMNetwork\nfrom a3c_training_thread import A3CTrainingThread\nfrom rmsprop_applier import RMSPropApplier\n\nfrom constants import ACTION_SIZE\nfrom constants import PARALLEL_SIZE\nfrom constants import MAX_TIME_STEP\nfrom constants import CHECKPOINT_DIR\nfrom constants import RMSP_EPSILON\nfrom constants import RMSP_ALPHA\nfrom constants import GRAD_NORM_CLIP\nfrom constants import USE_GPU\nfrom constants import USE_LSTM\n\n# use CPU for weight visualize tool\ndevice = \"/cpu:0\"\n\nif USE_LSTM:\n global_network = GameACLSTMNetwork(ACTION_SIZE, -1, device)\nelse:\n global_network = GameACFFNetwork(ACTION_SIZE, -1, device)\n\ntraining_threads = []\n\nlearning_rate_input = tf.placeholder(PRECISION)\n\ngrad_applier = RMSPropApplier(learning_rate = learning_rate_input,\n decay = RMSP_ALPHA,\n momentum = 0.0,\n epsilon = RMSP_EPSILON,\n clip_norm = GRAD_NORM_CLIP,\n device = device)\n\nsess = tf.Session()\ninit = tf.global_variables_initializer()\nsess.run(init)\n\nsaver = tf.train.Saver()\ncheckpoint = tf.train.get_checkpoint_state(CHECKPOINT_DIR)\nif checkpoint and checkpoint.model_checkpoint_path:\n saver.restore(sess, checkpoint.model_checkpoint_path)\n print(\"checkpoint loaded:\", checkpoint.model_checkpoint_path)\nelse:\n print(\"Could not find old checkpoint\")\n \nW_conv1 = sess.run(global_network.W_conv1)\n\n# show graph of W_conv1\nfig, axes = plt.subplots(4, 16, figsize=(12, 6),\n subplot_kw={'xticks': [], 'yticks': []})\nfig.subplots_adjust(hspace=0.1, wspace=0.1)\n\nfor ax,i in zip(axes.flat, range(4*16)):\n inch = i//16\n outch = i%16\n img = W_conv1[:,:,inch,outch]\n ax.imshow(img, cmap=plt.cm.gray, interpolation='nearest')\n ax.set_title(str(inch) + \",\" + str(outch))\n\nplt.show()\n\n" ]
[ [ "tensorflow.train.get_checkpoint_state", "tensorflow.placeholder", "matplotlib.pyplot.subplots", "tensorflow.global_variables_initializer", "tensorflow.Session", "tensorflow.train.Saver", "matplotlib.pyplot.show" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] } ]
00sapo/MMSP2021-Audio2ScoreAlignment
[ "192919efd1d4f8fe8c435d09b0063b5b3406123a" ]
[ "alignment/alignment_tafe.py" ]
[ "import numpy as np\n# from dtw import dtw\nimport fastdtw\n\nfrom . import utils, cdist\n\n# from scipy.spatial.distance import cosine\n\nSTART_NOTE = 21\nEPS = np.finfo(np.float64).eps\n#: how many realignment do\nNUM_REALIGNMENT = 3\n#: how many seconds for each hop size in fine alignment\nFINE_HOP = [5, 2.5, 0.5]\n# FINE_HOP = [90 / (2**i) for i in range(NUM_REALIGNMENT)]\n#: how many seconds for each window in fine alignment\nFINE_WIN = [10, 5, 1]\n\n\ndef _my_prep_inputs(x, y, dist):\n \"\"\"\n Fastdtw sucks too and convicts you to use float64...\n \"\"\"\n return x, y\n\n\ndef dtw_align(pianoroll, audio_features, misaligned, res: float, radius: int,\n # dist: str, step: str):\n dist: str):\n \"\"\"\n perform alignment and return new times\n \"\"\"\n\n # parameters for dtw were chosen with midi2midi on musicnet (see dtw_tuning)\n # hack to let fastdtw accept float32\n fastdtw._fastdtw.__prep_inputs = _my_prep_inputs\n _D, path = fastdtw.fastdtw(pianoroll.astype(np.float32).T,\n audio_features.astype(np.float32).T,\n dist=getattr(cdist, dist),\n radius=radius)\n\n # result = dtw(x=cdist.cdist(pianoroll.T, audio_features.T,\n # metric=dist).astype(np.float64),\n # result = dtw(x=pianoroll.T, y=audio_features.T,\n # dist_method=dist,\n # step_pattern=step,\n # window_type='slantedband',\n # window_args=dict(window_size=radius))\n # path = np.stack([result.index1, result.index2], axis=1)\n\n path = np.array(path) * res\n new_ons = np.interp(misaligned[:, 1], path[:, 0], path[:, 1])\n new_offs = np.interp(misaligned[:, 2], path[:, 0], path[:, 1])\n\n return new_ons, new_offs\n\n\ndef get_usable_features(matscore, matperfm, res):\n \"\"\"\n compute pianoroll and remove extra columns\n \"\"\"\n utils.mat_prestretch(matscore, matperfm)\n score_pr = utils.make_pianoroll(\n matscore, res=res, velocities=False) + utils.make_pianoroll(\n matscore, res=res, velocities=False, only_onsets=True)\n perfm_pr = utils.make_pianoroll(\n matperfm, res=res, velocities=False) + utils.make_pianoroll(\n matperfm, res=res, velocities=False, only_onsets=True)\n\n return score_pr, perfm_pr\n\n\ndef tafe_align(matscore, matperfm, res=0.02, radius=178, dist='cosine',\n # step='symmetric2'):\n ):\n \"\"\"\n Returns new onsets and offsets\n\n Works in-place modifying matscore\n \"\"\"\n\n score_pr, perfm_pr = get_usable_features(matscore, matperfm, res)\n # first alignment\n new_ons, new_offs = dtw_align(score_pr, perfm_pr, matscore, res, radius,\n # dist, step)\n dist)\n matscore[:, 1] = new_ons\n matscore[:, 2] = new_offs\n\n # # realign segment by segment\n # for j in range(NUM_REALIGNMENT):\n # score_pr, perfm_pr = get_usable_features(matscore, matperfm, res)\n # hop_size = int(FINE_HOP[j] // res)\n # win_size = int(FINE_WIN[j] // res)\n # num_win = int(score_pr.shape[1] // hop_size)\n # for i in range(num_win):\n # start = i * hop_size\n # end = min(i * hop_size + win_size, score_pr.shape[1])\n # indices_of_notes_in_win = np.argwhere(\n # np.logical_and(matscore[:, 1] >= start * res,\n # matscore[:, 2] <= end * res))\n # if indices_of_notes_in_win.shape[0] > 1:\n # indices_of_notes_in_win = indices_of_notes_in_win[0]\n # else:\n # continue\n # score_win = score_pr[:, start:end]\n # perfm_win = perfm_pr[:, start:end]\n # ons_win, offs_win = dtw_align(score_win,\n # perfm_win,\n # matscore[indices_of_notes_in_win],\n # res,\n # radius=1,\n # dist=dist)\n # matscore[indices_of_notes_in_win, 1] = ons_win\n # matscore[indices_of_notes_in_win, 2] = offs_win\n\n return matscore[:, 1], matscore[:, 2]\n" ]
[ [ "numpy.array", "numpy.interp", "numpy.finfo" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ompugao/cosypose
[ "4e471c16f19d5ee632668cd52eaa57b562f287d6", "4e471c16f19d5ee632668cd52eaa57b562f287d6", "4e471c16f19d5ee632668cd52eaa57b562f287d6", "4e471c16f19d5ee632668cd52eaa57b562f287d6" ]
[ "cosypose/integrated/detector.py", "cosypose/utils/distributed.py", "cosypose/lib3d/symmetries.py", "cosypose/scripts/make_ycbv_compat_models.py" ]
[ "import torch\nimport numpy as np\nimport pandas as pd\n\nimport cosypose.utils.tensor_collection as tc\n\n\nclass Detector:\n def __init__(self, model):\n model.eval()\n self.model = model\n self.config = model.config\n self.category_id_to_label = {v: k for k, v in self.config.label_to_category_id.items()}\n\n def cast(self, obj):\n return obj.cuda()\n\n @torch.no_grad()\n def get_detections(self, images, detection_th=None,\n output_masks=False, mask_th=0.8,\n one_instance_per_class=False):\n images = self.cast(images).float()\n if images.shape[-1] == 3:\n images = images.permute(0, 3, 1, 2)\n if images.max() > 1:\n images = images / 255.\n images = images.float().cuda()\n outputs_ = self.model([image_n for image_n in images])\n\n infos = []\n bboxes = []\n masks = []\n for n, outputs_n in enumerate(outputs_):\n outputs_n['labels'] = [self.category_id_to_label[category_id.item()] \\\n for category_id in outputs_n['labels']]\n for obj_id in range(len(outputs_n['boxes'])):\n bbox = outputs_n['boxes'][obj_id]\n info = dict(\n batch_im_id=n,\n label=outputs_n['labels'][obj_id],\n score=outputs_n['scores'][obj_id].item(),\n )\n mask = outputs_n['masks'][obj_id, 0] > mask_th\n bboxes.append(torch.as_tensor(bbox))\n masks.append(torch.as_tensor(mask))\n infos.append(info)\n\n if len(bboxes) > 0:\n bboxes = torch.stack(bboxes).cuda().float()\n masks = torch.stack(masks).cuda()\n else:\n infos = dict(score=[], label=[], batch_im_id=[])\n bboxes = torch.empty(0, 4).cuda().float()\n masks = torch.empty(0, images.shape[1], images.shape[2], dtype=torch.bool).cuda()\n\n outputs = tc.PandasTensorCollection(\n infos=pd.DataFrame(infos),\n bboxes=bboxes,\n )\n if output_masks:\n outputs.register_tensor('masks', masks)\n if detection_th is not None:\n keep = np.where(outputs.infos['score'] > detection_th)[0]\n outputs = outputs[keep]\n\n if one_instance_per_class:\n infos = outputs.infos\n infos['det_idx'] = np.arange(len(infos))\n keep_ids = infos.sort_values('score', ascending=False).drop_duplicates('label')['det_idx'].values\n outputs = outputs[keep_ids]\n outputs.infos = outputs.infos.drop('det_idx', axis=1)\n return outputs\n\n def __call__(self, *args, **kwargs):\n return self.get_detections(*args, **kwargs)\n", "import sys\nimport os\nimport torch.distributed as dist\nimport torch\nfrom pathlib import Path\n\n\ndef get_tmp_dir():\n if 'JOB_DIR' in os.environ:\n tmp_dir = Path(os.environ['JOB_DIR']) / 'tmp'\n else:\n tmp_dir = Path('/tmp/cosypose_job')\n tmp_dir.mkdir(exist_ok=True)\n return tmp_dir\n\n\ndef sync_model(model):\n sync_dir = get_tmp_dir() / 'models'\n sync_dir.mkdir(exist_ok=True)\n sync_ckpt = sync_dir / 'sync.checkpoint'\n if get_rank() == 0 and get_world_size() > 1:\n torch.save(model.state_dict(), sync_ckpt)\n dist.barrier()\n if get_rank() > 0:\n model.load_state_dict(torch.load(sync_ckpt))\n dist.barrier()\n return model\n\n\ndef redirect_output():\n if 'JOB_DIR' in os.environ:\n rank = get_rank()\n output_file = Path(os.environ['JOB_DIR']) / f'stdout{rank}.out'\n sys.stdout = open(output_file, 'w')\n sys.stderr = open(output_file, 'w')\n return\n\n\ndef get_rank():\n if not torch.distributed.is_initialized():\n rank = 0\n else:\n rank = torch.distributed.get_rank()\n return rank\n\n\ndef get_world_size():\n if not torch.distributed.is_initialized():\n world_size = 1\n else:\n world_size = torch.distributed.get_world_size()\n return world_size\n\n\ndef init_distributed_mode(initfile=None):\n assert torch.cuda.device_count() == 1\n rank = int(os.environ.get('SLURM_PROCID', 0))\n world_size = int(os.environ.get('SLURM_NTASKS', 1))\n if initfile is None:\n initfile = get_tmp_dir() / 'initfile'\n if initfile.exists() and world_size == 1:\n initfile.unlink()\n initfile = Path(initfile)\n assert initfile.parent.exists()\n torch.distributed.init_process_group(\n backend='nccl', rank=rank, world_size=world_size,\n init_method=f'file://{initfile.as_posix()}'\n )\n torch.distributed.barrier()\n\n\ndef reduce_dict(input_dict, average=True):\n \"\"\"\n https://github.com/pytorch/vision/blob/master/references/detection/utils.py\n Args:\n input_dict (dict): all the values will be reduced\n average (bool): whether to do average or sum\n Reduce the values in the dictionary from all processes so that all processes\n have the averaged results. Returns a dict with the same fields as\n input_dict, after reduction.\n \"\"\"\n world_size = dist.get_world_size()\n if world_size < 2:\n return input_dict\n with torch.no_grad():\n names = []\n values = []\n reduced_dict = []\n # sort the keys so that they are consistent across processes\n for k in sorted(input_dict.keys()):\n names.append(k)\n values.append(input_dict[k])\n values = torch.tensor(values).float().cuda()\n dist.all_reduce(values)\n if average:\n values /= world_size\n reduced_dict = {k: v.item() for k, v in zip(names, values)}\n return reduced_dict\n", "import numpy as np\n\nfrom .transform import Transform\nfrom .rotations import euler2quat\n\n\ndef make_bop_symmetries(dict_symmetries, n_symmetries_continuous=8, scale=0.001):\n # Note: See https://github.com/thodan/bop_toolkit/blob/master/bop_toolkit_lib/misc.py\n sym_discrete = dict_symmetries.get('symmetries_discrete', [])\n sym_continous = dict_symmetries.get('symmetries_continuous', [])\n all_M_discrete = [Transform((0, 0, 0, 1), (0, 0, 0))]\n all_M_continuous = []\n all_M = []\n for sym_n in sym_discrete:\n M = np.array(sym_n).reshape(4, 4)\n M[:3, -1] *= scale\n M = Transform(M)\n all_M_discrete.append(M)\n for sym_n in sym_continous:\n assert np.allclose(sym_n['offset'], 0)\n axis = np.array(sym_n['axis'])\n assert axis.sum() == 1\n for n in range(n_symmetries_continuous):\n euler = axis * 2 * np.pi * n / n_symmetries_continuous\n q = euler2quat(euler)\n M = Transform(q, (0, 0, 0))\n all_M_continuous.append(M)\n for sym_d in all_M_discrete:\n if len(all_M_continuous) > 0:\n for sym_c in all_M_continuous:\n M = sym_c * sym_d\n all_M.append(M.toHomogeneousMatrix())\n else:\n all_M.append(sym_d.toHomogeneousMatrix())\n return np.array(all_M)\n", "import trimesh\nimport shutil\nfrom copy import deepcopy\nimport numpy as np\nimport json\nfrom cosypose.config import LOCAL_DATA_DIR\n\n\nif __name__ == '__main__':\n ds_dir = LOCAL_DATA_DIR / 'bop_datasets/ycbv'\n models_dir = ds_dir / 'models'\n\n orig_names = (ds_dir / 'ycbv_friendly_names.txt').read_text()\n orig_names = {str(int(l.split(' ')[0])): l.split(' ')[1] for l in orig_names.split('\\n')[:-1]}\n\n infos = json.loads((models_dir / 'models_info.json').read_text())\n compat_infos = deepcopy(infos)\n\n # Consider these 2 objects asymmetric\n for str_obj_id, orig_name in orig_names.items():\n if orig_name == '002_master_chef_can' or orig_name == '040_large_marker':\n compat_infos[str_obj_id]['symmetries_discrete'] = []\n compat_infos[str_obj_id]['symmetries_continuous'] = []\n\n bop_compat_dir = ds_dir / 'models_bop-compat'\n bop_compat_dir.mkdir(exist_ok=True)\n for file_path in models_dir.iterdir():\n shutil.copy(file_path, bop_compat_dir / file_path.name)\n (bop_compat_dir / 'models_info.json').write_text(json.dumps(compat_infos))\n\n l_offsets = (ds_dir / 'offsets.txt').read_text().split('\\n')[:-1]\n offsets = dict()\n for l_n in l_offsets:\n obj_id, offset = l_n[:2], l_n[3:]\n obj_id = int(obj_id)\n offset = np.array(json.loads(offset))\n offsets[str(obj_id)] = offset\n\n # Models used in the original evaluation\n bop_compat_eval_dir = ds_dir / 'models_bop-compat_eval'\n bop_compat_eval_dir.mkdir(exist_ok=True)\n (bop_compat_eval_dir / 'models_info.json').write_text(json.dumps(compat_infos))\n for obj_id, orig_name in orig_names.items():\n xyz = (ds_dir / 'models_original' / orig_name / 'points.xyz').read_text()\n xyz = xyz.split('\\n')[:-1]\n xyz = [list(map(float, xyz_n.split(' '))) for xyz_n in xyz]\n vertices = np.array(xyz) * 1000 + offsets[obj_id]\n mesh = trimesh.Trimesh(vertices=vertices)\n mesh.export(bop_compat_eval_dir / f'obj_{int(obj_id):06d}.ply')\n" ]
[ [ "torch.empty", "pandas.DataFrame", "torch.no_grad", "torch.stack", "numpy.where", "torch.as_tensor" ], [ "torch.load", "torch.cuda.device_count", "torch.distributed.is_initialized", "torch.distributed.barrier", "torch.tensor", "torch.no_grad", "torch.distributed.get_rank", "torch.distributed.get_world_size", "torch.distributed.all_reduce" ], [ "numpy.array", "numpy.allclose" ], [ "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
nmarticorena/CenterNet
[ "f08477224fa14bae3fe3e4e8db26fb6ca638db2a" ]
[ "src/latency_test_images.py" ]
[ "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom PIL import Image\nimport torchvision.transforms.functional as TF\nimport _init_paths\n\nimport os\n\nimport torch\nimport torch.utils.data\nfrom opts import opts\nfrom models.model import create_model, load_model, save_model\nfrom models.data_parallel import DataParallel\nfrom logger import Logger\nfrom datasets.dataset_factory import get_dataset\nfrom trains.train_factory import train_factory\nimport time\nimport numpy as np\nfrom torchsummary import summary\n\nfactor = 8\n\ndef computeTime(model, device='cuda'):\n inputs = torch.randn(1, 3, 640,480)\n if device == 'cuda':\n model = model.cuda()\n inputs = inputs.cuda()\n\n #module = torch.jit.trace(model, inputs)\n #m = torch.jit.script(model)\n #torch.jit.save(m,'test.pt')\n model.eval()\n\n i = 0\n time_spent = []\n lista=[]\n\n for x in os.listdir('../data/coco/test2017/'):\n lista.append(x)\n\n while i < 2:\n #if device == 'cuda':\n #image= Image.open('../data/coco/test2017/{}'.format(lista[7]))\n #inputs=TF.to_tensor(image)\n #inputs.unsqueeze_(0)\n #inputs=inputs.cuda()\n start_time = time.time()\n with torch.no_grad():\n _ = model(inputs)\n\n if device == 'cuda':\n torch.cuda.synchronize() # wait for cuda to finish (cuda is asynchronous!)\n if i != 0:\n time_spent.append(time.time() - start_time)\n i += 1\n print('Avg execution time (ms): {:.3f}'.format(np.mean(time_spent)))\n\n\n\ndef main(opt):\n torch.manual_seed(opt.seed)\n torch.backends.cudnn.benchmark = not opt.not_cuda_benchmark and not opt.test\n Dataset = get_dataset(opt.dataset, opt.task)\n opt = opts().update_dataset_info_and_set_heads(opt, Dataset)\n print(opt)\n\n logger = Logger(opt)\n\n os.environ['CUDA_VISIBLE_DEVICES'] = opt.gpus_str\n opt.device = torch.device('cuda' if opt.gpus[0] >= 0 else 'cpu')\n \n print('Creating model...')\n model = create_model(opt.arch, opt.heads, opt.head_conv)\n print(next(model.parameters()).device)\n model.to(\"cpu\")\n #summary(model, (3, factor*224, 224*factor),device=\"cpu\")\n\n computeTime(model)\n\nif __name__ == '__main__':\n opt = opts().parse()\n main(opt)" ]
[ [ "torch.cuda.synchronize", "torch.randn", "torch.manual_seed", "torch.no_grad", "numpy.mean", "torch.device" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
rizalgowandy/petastorm
[ "f7aad8cf5ef6878231b335911e3e95541b388d40", "f7aad8cf5ef6878231b335911e3e95541b388d40", "f7aad8cf5ef6878231b335911e3e95541b388d40" ]
[ "petastorm/tests/test_unischema.py", "petastorm/tests/test_ngram_end_to_end.py", "examples/hello_world/petastorm_dataset/tensorflow_hello_world.py" ]
[ "# Copyright (c) 2017-2018 Uber Technologies, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import division\n\nfrom decimal import Decimal\n\nimport numpy as np\nimport pyarrow as pa\nimport pyarrow.parquet as pq\nimport pytest\nfrom pyspark import Row\nfrom pyspark.sql.types import StringType, IntegerType, DecimalType, ShortType, LongType\n\nfrom petastorm.codecs import ScalarCodec, NdarrayCodec\nfrom petastorm.unischema import Unischema, UnischemaField, dict_to_spark_row, \\\n insert_explicit_nulls, match_unischema_fields, _new_gt_255_compatible_namedtuple, _fullmatch\n\nfrom unittest import mock\n\n\ndef _mock_parquet_dataset(partitions, arrow_schema):\n \"\"\"Creates a pyarrow.ParquetDataset mock capable of returning:\n\n parquet_dataset.pieces[0].get_metadata(parquet_dataset.fs.open).schema.to_arrow_schema() == schema\n parquet_dataset.partitions = partitions\n\n :param partitions: expected to be a list of pa.parquet.PartitionSet\n :param arrow_schema: an instance of pa.arrow_schema to be assumed by the mock parquet dataset object.\n :return:\n \"\"\"\n piece_mock = mock.Mock()\n piece_mock.get_metadata().schema.to_arrow_schema.return_value = arrow_schema\n\n dataset_mock = mock.Mock()\n type(dataset_mock).pieces = mock.PropertyMock(return_value=[piece_mock])\n type(dataset_mock).partitions = partitions\n\n return dataset_mock\n\n\ndef test_fields():\n \"\"\"Try using 'fields' getter\"\"\"\n TestSchema = Unischema('TestSchema', [\n UnischemaField('int_field', np.int8, (), ScalarCodec(IntegerType()), False),\n UnischemaField('string_field', np.string_, (), ScalarCodec(StringType()), False),\n ])\n\n assert len(TestSchema.fields) == 2\n assert TestSchema.fields['int_field'].name == 'int_field'\n assert TestSchema.fields['string_field'].name == 'string_field'\n\n\ndef test_as_spark_schema():\n \"\"\"Try using 'as_spark_schema' function\"\"\"\n TestSchema = Unischema('TestSchema', [\n UnischemaField('int_field', np.int8, (), ScalarCodec(IntegerType()), False),\n UnischemaField('string_field', np.string_, (), ScalarCodec(StringType()), False),\n UnischemaField('string_field_implicit', np.string_, ()),\n ])\n\n spark_schema = TestSchema.as_spark_schema()\n assert spark_schema.fields[0].name == 'int_field'\n\n assert spark_schema.fields[1].name == 'string_field'\n assert spark_schema.fields[1].dataType == StringType()\n\n assert spark_schema.fields[2].name == 'string_field_implicit'\n assert spark_schema.fields[2].dataType == StringType()\n\n assert TestSchema.fields['int_field'].name == 'int_field'\n assert TestSchema.fields['string_field'].name == 'string_field'\n\n\ndef test_as_spark_schema_unspecified_codec_type_for_non_scalars_raises():\n \"\"\"Do not currently support choosing spark type automatically for non-scalar types.\"\"\"\n TestSchema = Unischema('TestSchema', [\n UnischemaField('int_vector_unspecified_codec', np.int8, (1,)),\n ])\n\n with pytest.raises(ValueError, match='has codec set to None'):\n TestSchema.as_spark_schema()\n\n\ndef test_as_spark_schema_unspecified_codec_type_unknown_scalar_type_raises():\n \"\"\"We have a limited list of scalar types we can automatically map from numpy (+Decimal) types to spark types.\n Make sure that a ValueError is raised if an unknown type is used.\"\"\"\n TestSchema = Unischema('TestSchema', [\n UnischemaField('int_vector_unspecified_codec', object, ()),\n ])\n\n with pytest.raises(ValueError, match='Was not able to map type'):\n TestSchema.as_spark_schema()\n\n\ndef test_dict_to_spark_row_field_validation_scalar_types():\n \"\"\"Test various validations done on data types when converting a dictionary to a spark row\"\"\"\n TestSchema = Unischema('TestSchema', [\n UnischemaField('string_field', np.string_, (), ScalarCodec(StringType()), False),\n ])\n\n assert isinstance(dict_to_spark_row(TestSchema, {'string_field': 'abc'}), Row)\n\n # Not a nullable field\n with pytest.raises(ValueError):\n isinstance(dict_to_spark_row(TestSchema, {'string_field': None}), Row)\n\n # Wrong field type\n with pytest.raises(TypeError):\n isinstance(dict_to_spark_row(TestSchema, {'string_field': []}), Row)\n\n\ndef test_dict_to_spark_row_field_validation_scalar_nullable():\n \"\"\"Test various validations done on data types when converting a dictionary to a spark row\"\"\"\n TestSchema = Unischema('TestSchema', [\n UnischemaField('string_field', np.string_, (), ScalarCodec(StringType()), True),\n UnischemaField('nullable_implicitly_set', np.string_, (), ScalarCodec(StringType()), True),\n ])\n\n assert isinstance(dict_to_spark_row(TestSchema, {'string_field': None}), Row)\n\n\ndef test_dict_to_spark_row_field_validation_ndarrays():\n \"\"\"Test various validations done on data types when converting a dictionary to a spark row\"\"\"\n TestSchema = Unischema('TestSchema', [\n UnischemaField('tensor3d', np.float32, (10, 20, 30), NdarrayCodec(), False),\n ])\n\n assert isinstance(dict_to_spark_row(TestSchema, {'tensor3d': np.zeros((10, 20, 30), dtype=np.float32)}), Row)\n\n # Null value into not nullable field\n with pytest.raises(ValueError):\n isinstance(dict_to_spark_row(TestSchema, {'string_field': None}), Row)\n\n # Wrong dimensions\n with pytest.raises(ValueError):\n isinstance(dict_to_spark_row(TestSchema, {'string_field': np.zeros((1, 2, 3), dtype=np.float32)}), Row)\n\n\ndef test_dict_to_spark_row_order():\n TestSchema = Unischema('TestSchema', [\n UnischemaField('float_col', np.float64, ()),\n UnischemaField('int_col', np.int64, ()),\n ])\n row_dict = {\n TestSchema.int_col.name: 3,\n TestSchema.float_col.name: 2.0,\n }\n spark_row = dict_to_spark_row(TestSchema, row_dict)\n schema_field_names = list(TestSchema.fields)\n assert spark_row[0] == row_dict[schema_field_names[0]]\n assert spark_row[1] == row_dict[schema_field_names[1]]\n\n\ndef test_make_named_tuple():\n TestSchema = Unischema('TestSchema', [\n UnischemaField('string_scalar', np.string_, (), ScalarCodec(StringType()), True),\n UnischemaField('int32_scalar', np.int32, (), ScalarCodec(ShortType()), False),\n UnischemaField('uint8_scalar', np.uint8, (), ScalarCodec(ShortType()), False),\n UnischemaField('int32_matrix', np.float32, (10, 20, 3), NdarrayCodec(), True),\n UnischemaField('decimal_scalar', Decimal, (10, 20, 3), ScalarCodec(DecimalType(10, 9)), False),\n ])\n\n TestSchema.make_namedtuple(string_scalar='abc', int32_scalar=10, uint8_scalar=20,\n int32_matrix=np.int32((10, 20, 3)), decimal_scalar=Decimal(123) / Decimal(10))\n\n TestSchema.make_namedtuple(string_scalar=None, int32_scalar=10, uint8_scalar=20,\n int32_matrix=None, decimal_scalar=Decimal(123) / Decimal(10))\n\n\ndef test_insert_explicit_nulls():\n TestSchema = Unischema('TestSchema', [\n UnischemaField('nullable', np.int32, (), ScalarCodec(StringType()), True),\n UnischemaField('not_nullable', np.int32, (), ScalarCodec(ShortType()), False),\n ])\n\n # Insert_explicit_nulls to leave the dictionary as is.\n row_dict = {'nullable': 0, 'not_nullable': 1}\n insert_explicit_nulls(TestSchema, row_dict)\n assert len(row_dict) == 2\n assert row_dict['nullable'] == 0\n assert row_dict['not_nullable'] == 1\n\n # Insert_explicit_nulls to leave the dictionary as is.\n row_dict = {'nullable': None, 'not_nullable': 1}\n insert_explicit_nulls(TestSchema, row_dict)\n assert len(row_dict) == 2\n assert row_dict['nullable'] is None\n assert row_dict['not_nullable'] == 1\n\n # We are missing a nullable field here. insert_explicit_nulls should add a None entry.\n row_dict = {'not_nullable': 1}\n insert_explicit_nulls(TestSchema, row_dict)\n assert len(row_dict) == 2\n assert row_dict['nullable'] is None\n assert row_dict['not_nullable'] == 1\n\n # We are missing a not_nullable field here. Should raise an ValueError.\n row_dict = {'nullable': 0}\n with pytest.raises(ValueError):\n insert_explicit_nulls(TestSchema, row_dict)\n\n\ndef test_create_schema_view_fails_validate():\n \"\"\" Exercises code paths unischema.create_schema_view ValueError, and unischema.__str__.\"\"\"\n TestSchema = Unischema('TestSchema', [\n UnischemaField('int_field', np.int8, (), ScalarCodec(IntegerType()), False),\n UnischemaField('string_field', np.string_, (), ScalarCodec(StringType()), False),\n ])\n with pytest.raises(ValueError, match='does not belong to the schema'):\n TestSchema.create_schema_view([UnischemaField('id', np.int64, (), ScalarCodec(LongType()), False)])\n\n\ndef test_create_schema_view_using_invalid_type():\n \"\"\" Exercises code paths unischema.create_schema_view ValueError, and unischema.__str__.\"\"\"\n TestSchema = Unischema('TestSchema', [\n UnischemaField('int_field', np.int8, (), ScalarCodec(IntegerType()), False),\n UnischemaField('string_field', np.string_, (), ScalarCodec(StringType()), False),\n ])\n with pytest.raises(ValueError, match='must be either a string'):\n TestSchema.create_schema_view([42])\n\n\ndef test_create_schema_view_using_unischema_fields():\n TestSchema = Unischema('TestSchema', [\n UnischemaField('int_field', np.int8, (), ScalarCodec(IntegerType()), False),\n UnischemaField('string_field', np.string_, (), ScalarCodec(StringType()), False),\n ])\n view = TestSchema.create_schema_view([TestSchema.int_field])\n assert set(view.fields.keys()) == {'int_field'}\n\n\ndef test_create_schema_view_using_regex():\n TestSchema = Unischema('TestSchema', [\n UnischemaField('int_field', np.int8, (), ScalarCodec(IntegerType()), False),\n UnischemaField('string_field', np.string_, (), ScalarCodec(StringType()), False),\n ])\n view = TestSchema.create_schema_view(['int.*$'])\n assert set(view.fields.keys()) == {'int_field'}\n\n view = TestSchema.create_schema_view([u'int.*$'])\n assert set(view.fields.keys()) == {'int_field'}\n\n\ndef test_create_schema_view_using_regex_and_unischema_fields():\n TestSchema = Unischema('TestSchema', [\n UnischemaField('int_field', np.int8, (), ScalarCodec(IntegerType()), False),\n UnischemaField('string_field', np.string_, (), ScalarCodec(StringType()), False),\n UnischemaField('other_string_field', np.string_, (), ScalarCodec(StringType()), False),\n ])\n view = TestSchema.create_schema_view(['int.*$', TestSchema.string_field])\n assert set(view.fields.keys()) == {'int_field', 'string_field'}\n\n\ndef test_create_schema_view_using_regex_and_unischema_fields_with_duplicates():\n TestSchema = Unischema('TestSchema', [\n UnischemaField('int_field', np.int8, (), ScalarCodec(IntegerType()), False),\n UnischemaField('string_field', np.string_, (), ScalarCodec(StringType()), False),\n UnischemaField('other_string_field', np.string_, (), ScalarCodec(StringType()), False),\n ])\n view = TestSchema.create_schema_view(['int.*$', TestSchema.int_field])\n assert set(view.fields.keys()) == {'int_field'}\n\n\ndef test_create_schema_view_no_field_matches_regex():\n TestSchema = Unischema('TestSchema', [\n UnischemaField('int_field', np.int8, (), ScalarCodec(IntegerType()), False),\n UnischemaField('string_field', np.string_, (), ScalarCodec(StringType()), False),\n ])\n view = TestSchema.create_schema_view(['bogus'])\n assert not view.fields\n\n\ndef test_name_property():\n TestSchema = Unischema('TestSchema', [\n UnischemaField('nullable', np.int32, (), ScalarCodec(StringType()), True),\n ])\n\n assert 'TestSchema' == TestSchema._name\n\n\ndef test_field_name_conflict_with_unischema_attribute():\n # fields is an existing attribute of Unischema\n with pytest.warns(UserWarning, match='Can not create dynamic property'):\n Unischema('TestSchema', [UnischemaField('fields', np.int32, (), ScalarCodec(StringType()), True)])\n\n\ndef test_match_unischema_fields():\n TestSchema = Unischema('TestSchema', [\n UnischemaField('int32', np.int32, (), None, False),\n UnischemaField('uint8', np.uint8, (), None, False),\n UnischemaField('uint16', np.uint16, (), None, False),\n ])\n\n assert match_unischema_fields(TestSchema, ['.*nt.*6']) == [TestSchema.uint16]\n assert match_unischema_fields(TestSchema, ['nomatch']) == []\n assert set(match_unischema_fields(TestSchema, ['.*'])) == set(TestSchema.fields.values())\n assert set(match_unischema_fields(TestSchema, ['int32', 'uint8'])) == {TestSchema.int32, TestSchema.uint8}\n\n\ndef test_match_unischema_fields_legacy_warning():\n TestSchema = Unischema('TestSchema', [\n UnischemaField('int32', np.int32, (), None, False),\n UnischemaField('uint8', np.uint8, (), None, False),\n UnischemaField('uint16', np.uint16, (), None, False),\n ])\n\n # Check that no warnings are shown if the legacy and the new way of filtering produce the same results.\n with pytest.warns(None) as unexpected_warnings:\n match_unischema_fields(TestSchema, ['uint8'])\n assert not unexpected_warnings\n\n # uint8 and uint16 would have been matched using the old method, but not the new one\n with pytest.warns(UserWarning, match=r'schema_fields behavior has changed.*uint16, uint8'):\n assert match_unischema_fields(TestSchema, ['uint']) == []\n\n # Now, all fields will be matched, but in different order (legacy vs current). Make sure we don't issue a warning.\n with pytest.warns(None) as unexpected_warnings:\n match_unischema_fields(TestSchema, ['int', 'uint8', 'uint16', 'int32'])\n assert not unexpected_warnings\n\n\ndef test_arrow_schema_convertion():\n fields = [\n pa.field('string', pa.string()),\n pa.field('int8', pa.int8()),\n pa.field('int16', pa.int16()),\n pa.field('int32', pa.int32()),\n pa.field('int64', pa.int64()),\n pa.field('float', pa.float32()),\n pa.field('double', pa.float64()),\n pa.field('bool', pa.bool_(), False),\n pa.field('fixed_size_binary', pa.binary(10)),\n pa.field('variable_size_binary', pa.binary()),\n pa.field('decimal', pa.decimal128(3, 4)),\n pa.field('timestamp_s', pa.timestamp('s')),\n pa.field('timestamp_ns', pa.timestamp('ns')),\n pa.field('date_32', pa.date32()),\n pa.field('date_64', pa.date64())\n ]\n arrow_schema = pa.schema(fields)\n\n mock_dataset = _mock_parquet_dataset([], arrow_schema)\n\n unischema = Unischema.from_arrow_schema(mock_dataset)\n for name in arrow_schema.names:\n assert getattr(unischema, name).name == name\n assert getattr(unischema, name).codec is None\n\n if name == 'bool':\n assert not getattr(unischema, name).nullable\n else:\n assert getattr(unischema, name).nullable\n\n # Test schema preserve fields order\n field_name_list = [f.name for f in fields]\n assert list(unischema.fields.keys()) == field_name_list\n\n\ndef test_arrow_schema_convertion_with_string_partitions():\n arrow_schema = pa.schema([\n pa.field('int8', pa.int8()),\n ])\n\n mock_dataset = _mock_parquet_dataset([pq.PartitionSet('part_name', ['a', 'b'])], arrow_schema)\n\n unischema = Unischema.from_arrow_schema(mock_dataset)\n assert unischema.part_name.numpy_dtype == np.str_\n\n\ndef test_arrow_schema_convertion_with_int_partitions():\n arrow_schema = pa.schema([\n pa.field('int8', pa.int8()),\n ])\n\n mock_dataset = _mock_parquet_dataset([pq.PartitionSet('part_name', ['0', '1', '2'])], arrow_schema)\n\n unischema = Unischema.from_arrow_schema(mock_dataset)\n assert unischema.part_name.numpy_dtype == np.int64\n\n\ndef test_arrow_schema_convertion_fail():\n arrow_schema = pa.schema([\n pa.field('list_of_int', pa.float16()),\n ])\n\n mock_dataset = _mock_parquet_dataset([], arrow_schema)\n\n with pytest.raises(ValueError, match='Cannot auto-create unischema due to unsupported column type'):\n Unischema.from_arrow_schema(mock_dataset, omit_unsupported_fields=False)\n\n\ndef test_arrow_schema_arrow_1644_list_of_struct():\n arrow_schema = pa.schema([\n pa.field('id', pa.string()),\n pa.field('list_of_struct', pa.list_(pa.struct([pa.field('a', pa.string()), pa.field('b', pa.int32())])))\n ])\n\n mock_dataset = _mock_parquet_dataset([], arrow_schema)\n\n unischema = Unischema.from_arrow_schema(mock_dataset)\n assert getattr(unischema, 'id').name == 'id'\n assert not hasattr(unischema, 'list_of_struct')\n\n\ndef test_arrow_schema_arrow_1644_list_of_list():\n arrow_schema = pa.schema([\n pa.field('id', pa.string()),\n pa.field('list_of_list',\n pa.list_(pa.list_(pa.struct([pa.field('a', pa.string()), pa.field('b', pa.int32())]))))\n ])\n\n mock_dataset = _mock_parquet_dataset([], arrow_schema)\n\n unischema = Unischema.from_arrow_schema(mock_dataset)\n assert getattr(unischema, 'id').name == 'id'\n assert not hasattr(unischema, 'list_of_list')\n\n\ndef test_arrow_schema_convertion_ignore():\n arrow_schema = pa.schema([\n pa.field('list_of_int', pa.float16()),\n pa.field('struct', pa.struct([pa.field('a', pa.string()), pa.field('b', pa.int32())])),\n ])\n\n mock_dataset = _mock_parquet_dataset([], arrow_schema)\n\n unischema = Unischema.from_arrow_schema(mock_dataset, omit_unsupported_fields=True)\n assert not hasattr(unischema, 'list_of_int')\n\n\[email protected]()\ndef equality_fields():\n class Fixture(object):\n string1 = UnischemaField('random', np.string_, (), ScalarCodec(StringType()), False)\n string2 = UnischemaField('random', np.string_, (), ScalarCodec(StringType()), False)\n string_implicit = UnischemaField('random', np.string_, ())\n string_nullable = UnischemaField('random', np.string_, (), nullable=True)\n other_string = UnischemaField('Random', np.string_, (), ScalarCodec(StringType()), False)\n int1 = UnischemaField('id', np.int32, (), ScalarCodec(ShortType()), False)\n int2 = UnischemaField('id', np.int32, (), ScalarCodec(ShortType()), False)\n other_int = UnischemaField('ID', np.int32, (), ScalarCodec(ShortType()), False)\n\n return Fixture()\n\n\ndef test_equality(equality_fields):\n # Use assertTrue instead of assertEqual/assertNotEqual so we don't depend on which operator (__eq__ or __ne__)\n # actual implementation of assert uses\n assert equality_fields.string1 == equality_fields.string2\n assert equality_fields.string1 == equality_fields.string_implicit\n assert equality_fields.int1 == equality_fields.int2\n assert equality_fields.string1 != equality_fields.other_string\n assert equality_fields.other_string != equality_fields.string_implicit\n assert equality_fields.int1 != equality_fields.other_int\n assert equality_fields.string_nullable != equality_fields.string_implicit\n\n\ndef test_hash(equality_fields):\n assert hash(equality_fields.string1) == hash(equality_fields.string2)\n assert hash(equality_fields.int1) == hash(equality_fields.int2)\n assert hash(equality_fields.string1) != hash(equality_fields.other_string)\n assert hash(equality_fields.int1) != hash(equality_fields.other_int)\n\n\ndef test_new_gt_255_compatible_namedtuple():\n fields_count = 1000\n field_names = ['f{}'.format(i) for i in range(fields_count)]\n values = list(range(1000))\n huge_tuple = _new_gt_255_compatible_namedtuple('HUGE_TUPLE', field_names)\n huge_tuple_instance = huge_tuple(**dict(zip(field_names, values)))\n assert len(huge_tuple_instance) == fields_count\n assert huge_tuple_instance.f764 == 764\n\n\ndef test_fullmatch():\n assert _fullmatch('abc', 'abc')\n assert _fullmatch('^abc', 'abc')\n assert _fullmatch('abc$', 'abc')\n assert _fullmatch('a.c', 'abc')\n assert _fullmatch('.*abcdef', 'abcdef')\n assert _fullmatch('abc.*', 'abcdef')\n assert _fullmatch('.*c.*', 'abcdef')\n assert _fullmatch('', '')\n assert not _fullmatch('abc', 'xyz')\n assert not _fullmatch('abc', 'abcx')\n assert not _fullmatch('abc', 'xabc')\n", "# Copyright (c) 2017-2018 Uber Technologies, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom decimal import Decimal\n\nimport numpy as np\nimport pytest\nimport tensorflow.compat.v1 as tf # pylint: disable=import-error\n\nfrom unittest import mock\n\nfrom petastorm import make_reader\nfrom petastorm.ngram import NGram\nfrom petastorm.tests.conftest import SyntheticDataset, maybe_cached_dataset\nfrom petastorm.tests.test_common import create_test_dataset, TestSchema\nfrom petastorm.tests.test_tf_utils import create_tf_graph\nfrom petastorm.tf_utils import tf_tensors\n\n# Tests in this module will run once for each entry in the READER_FACTORIES\n# pylint: disable=unnecessary-lambda\nREADER_FACTORIES = [\n lambda url, **kwargs: make_reader(url, reader_pool_type='dummy', **kwargs),\n lambda url, **kwargs: make_reader(url, reader_pool_type='process', workers_count=1, **kwargs),\n]\n\n\[email protected](scope=\"session\")\ndef dataset_num_files_1(request, tmpdir_factory):\n def _dataset_generator():\n path = tmpdir_factory.mktemp(\"data\").strpath\n url = 'file://' + path\n data = create_test_dataset(url, range(99), num_files=1)\n return SyntheticDataset(url=url, path=path, data=data)\n\n return maybe_cached_dataset(request.config, 'dataset_num_files_1', _dataset_generator)\n\n\[email protected](scope=\"session\")\ndef dataset_0_3_8_10_11_20_23(request, tmpdir_factory):\n def _dataset_generator():\n path = tmpdir_factory.mktemp(\"data\").strpath\n url = 'file://' + path\n ids = [0, 3, 8, 10, 11, 20, 23]\n data = create_test_dataset(url, ids, num_files=1)\n return SyntheticDataset(url=url, path=path, data=data)\n\n return maybe_cached_dataset(request.config, 'dataset_0_3_8_10_11_20_23', _dataset_generator)\n\n\[email protected](scope=\"session\")\ndef dataset_range_0_99_5(request, tmpdir_factory):\n def _dataset_generator():\n path = tmpdir_factory.mktemp(\"data\").strpath\n url = 'file://' + path\n ids = range(0, 99, 5)\n data = create_test_dataset(url, ids)\n return SyntheticDataset(url=url, path=path, data=data)\n\n return maybe_cached_dataset(request.config, 'dataset_range_0_99_5', _dataset_generator)\n\n\ndef _assert_equal_ngram(actual_ngram, expected_ngram):\n np.testing.assert_equal(sorted(actual_ngram.keys()), sorted(expected_ngram.keys()))\n for timestep in actual_ngram:\n actual_dict = actual_ngram[timestep]._asdict()\n expected_dict = expected_ngram[timestep]._asdict()\n np.testing.assert_equal(sorted(list(actual_dict.keys())), sorted(list(expected_dict.keys())))\n for field_name in actual_dict:\n actual_field = actual_dict[field_name]\n expected_field = expected_dict[field_name]\n\n if isinstance(expected_field, Decimal) or isinstance(expected_field, str):\n # Tensorflow returns all strings as bytes in python3. So we will need to decode it\n actual_field = actual_field.decode()\n elif isinstance(expected_field, np.ndarray) and expected_field.dtype.type == np.unicode_:\n actual_field = np.array([item.decode() for item in actual_field])\n\n if isinstance(expected_field, Decimal):\n np.testing.assert_equal(expected_field, Decimal(actual_field),\n '{0} field is different'.format(field_name))\n else:\n np.testing.assert_equal(expected_field, actual_field, '{0} field is different'.format(field_name))\n\n\ndef _get_named_tuple_from_ngram(ngram, dataset_dicts, starting_index):\n expected_ngram = {}\n for index, key in enumerate(range(min(ngram.fields.keys()), max(ngram.fields.keys()) + 1)):\n if key in ngram.fields:\n current_field_names = [field.name for field in ngram.fields[key]]\n else:\n current_field_names = []\n new_schema = TestSchema.create_schema_view([\n TestSchema.fields.get(field) for field in TestSchema.fields if field in current_field_names])\n current_dict = dataset_dicts[starting_index + index]\n new_dict = {k: current_dict[k] for k in current_dict if k in current_field_names}\n expected_ngram[key] = new_schema.make_namedtuple(**new_dict)\n return expected_ngram\n\n\n@create_tf_graph\ndef _test_continuous_ngram_tf(ngram_fields, dataset_num_files_1, reader_factory):\n \"\"\"Tests continuous ngram in tf of a certain length. Continuous here refers to\n that this reader will always return consecutive ngrams due to shuffle being false\n and partition being 1.\n \"\"\"\n\n ngram = NGram(fields=ngram_fields, delta_threshold=10, timestamp_field=TestSchema.id)\n with reader_factory(dataset_num_files_1.url,\n schema_fields=ngram,\n shuffle_row_groups=False) as reader:\n\n readout_examples = tf_tensors(reader)\n\n # Make sure we have static shape info for all fields\n for timestep in readout_examples:\n for field in readout_examples[timestep]:\n assert field.get_shape().dims is not None\n\n # Read a bunch of entries from the dataset and compare the data to reference\n expected_id = 0\n with tf.Session() as sess:\n for _ in range(5):\n actual = sess.run(readout_examples)\n expected_ngram = _get_named_tuple_from_ngram(ngram, dataset_num_files_1.data, expected_id)\n _assert_equal_ngram(actual, expected_ngram)\n expected_id = expected_id + 1\n\n\ndef _test_continuous_ngram(ngram_fields, dataset_num_files_1, reader_factory):\n \"\"\"Test continuous ngram of a certain length. Continuous here refers to\n that this reader will always return consecutive ngrams due to shuffle being false\n and partition being 1.\"\"\"\n\n ngram = NGram(fields=ngram_fields, delta_threshold=10, timestamp_field=TestSchema.id)\n with reader_factory(dataset_num_files_1.url, schema_fields=ngram, shuffle_row_groups=False) as reader:\n expected_id = 0\n\n for _ in range(ngram.length):\n actual = next(reader)\n expected_ngram = _get_named_tuple_from_ngram(ngram, dataset_num_files_1.data, expected_id)\n np.testing.assert_equal(actual, expected_ngram)\n expected_id = expected_id + 1\n\n\n@create_tf_graph\ndef _test_noncontinuous_ngram_tf(ngram_fields, synthetic_dataset, reader_factory):\n \"\"\"Test non continuous ngram in tf of a certain length. Non continuous here refers\n to that the reader will not necessarily return consecutive ngrams because partition is more\n than one and false is true.\"\"\"\n\n dataset_dicts = synthetic_dataset.data\n ngram = NGram(fields=ngram_fields, delta_threshold=10, timestamp_field=TestSchema.id)\n reader = reader_factory(synthetic_dataset.url, schema_fields=ngram)\n\n readout_examples = tf_tensors(reader)\n\n # Make sure we have static shape info for all fields\n for timestep in readout_examples:\n for field in readout_examples[timestep]:\n assert field.get_shape().dims is not None\n\n # Read a bunch of entries from the dataset and compare the data to reference\n with tf.Session() as sess:\n for _ in range(5):\n actual = sess.run(readout_examples)\n expected_ngram = _get_named_tuple_from_ngram(ngram, dataset_dicts, actual[min(actual.keys())].id)\n _assert_equal_ngram(actual, expected_ngram)\n\n reader.stop()\n reader.join()\n\n\ndef _test_noncontinuous_ngram(ngram_fields, synthetic_dataset, reader_factory):\n \"\"\"Test noncontinuous ngram of a certain length. Non continuous here refers\n to that the reader will not necessarily return consecutive ngrams because partition is more\n than one and false is true.\"\"\"\n\n dataset_dicts = synthetic_dataset.data\n ngram = NGram(fields=ngram_fields, delta_threshold=10, timestamp_field=TestSchema.id)\n with reader_factory(synthetic_dataset.url,\n schema_fields=ngram,\n shuffle_row_groups=True,\n shuffle_row_drop_partitions=5) as reader:\n for _ in range(10):\n actual = next(reader)\n expected_ngram = _get_named_tuple_from_ngram(ngram, dataset_dicts, actual[min(actual.keys())].id)\n np.testing.assert_equal(actual, expected_ngram)\n\n\[email protected]\[email protected]('reader_factory', READER_FACTORIES)\ndef test_ngram_basic_tf(dataset_num_files_1, reader_factory):\n \"\"\"Tests basic ngram with no delta threshold with no shuffle and in the same partition.\"\"\"\n fields = {\n -1: [TestSchema.id, TestSchema.id2, TestSchema.image_png, TestSchema.matrix],\n 0: [TestSchema.id, TestSchema.id2, TestSchema.sensor_name],\n }\n _test_continuous_ngram_tf(fields, dataset_num_files_1, reader_factory)\n\n\[email protected]('reader_factory', READER_FACTORIES)\[email protected]('petastorm.unischema._UNISCHEMA_FIELD_ORDER', 'alphabetical')\ndef test_ngram_basic(dataset_num_files_1, reader_factory):\n \"\"\"Tests basic ngram with no delta threshold with no shuffle and in the same partition.\"\"\"\n fields = {\n -1: [TestSchema.id, TestSchema.id2, TestSchema.image_png, TestSchema.matrix],\n 0: [TestSchema.id, TestSchema.id2, TestSchema.sensor_name],\n }\n _test_continuous_ngram(fields, dataset_num_files_1, reader_factory)\n\n\[email protected]\[email protected]('reader_factory', READER_FACTORIES)\ndef test_ngram_basic_longer_tf(dataset_num_files_1, reader_factory):\n \"\"\"Tests basic ngram with no delta threshold with no shuffle and in the same partition.\"\"\"\n fields = {\n -2: [TestSchema.id, TestSchema.id2, TestSchema.matrix],\n -1: [TestSchema.id, TestSchema.id2, TestSchema.image_png],\n 0: [TestSchema.id, TestSchema.id2, TestSchema.decimal],\n 1: [TestSchema.id, TestSchema.id2, TestSchema.sensor_name],\n 2: [TestSchema.id, TestSchema.id2]\n }\n _test_continuous_ngram_tf(fields, dataset_num_files_1, reader_factory)\n\n\[email protected]('reader_factory', READER_FACTORIES)\[email protected]('petastorm.unischema._UNISCHEMA_FIELD_ORDER', 'alphabetical')\ndef test_ngram_basic_longer(dataset_num_files_1, reader_factory):\n \"\"\"Tests basic ngram with no delta threshold with no shuffle and in the same partition.\"\"\"\n fields = {\n -2: [TestSchema.id, TestSchema.id2, TestSchema.matrix],\n -1: [TestSchema.id, TestSchema.id2, TestSchema.image_png],\n 0: [TestSchema.id, TestSchema.id2, TestSchema.decimal],\n 1: [TestSchema.id, TestSchema.id2, TestSchema.sensor_name],\n 2: [TestSchema.id, TestSchema.id2]\n }\n _test_continuous_ngram(fields, dataset_num_files_1, reader_factory)\n\n\[email protected]\[email protected]('reader_factory', READER_FACTORIES)\ndef test_ngram_basic_shuffle_multi_partition_tf(synthetic_dataset, reader_factory):\n \"\"\"Tests basic ngram with no delta threshold with shuffle and in many partitions.\"\"\"\n fields = {\n -1: [TestSchema.id, TestSchema.id2, TestSchema.image_png, TestSchema.matrix],\n 0: [TestSchema.id, TestSchema.id2, TestSchema.sensor_name],\n }\n _test_noncontinuous_ngram_tf(fields, synthetic_dataset, reader_factory)\n\n\[email protected]('reader_factory', READER_FACTORIES)\[email protected]('petastorm.unischema._UNISCHEMA_FIELD_ORDER', 'alphabetical')\ndef test_ngram_basic_shuffle_multi_partition(synthetic_dataset, reader_factory):\n \"\"\"Tests basic ngram with no delta threshold with shuffle and in many partitions.\"\"\"\n fields = {\n 0: [TestSchema.id, TestSchema.id2, TestSchema.image_png, TestSchema.matrix],\n 1: [TestSchema.id, TestSchema.id2, TestSchema.sensor_name],\n }\n _test_noncontinuous_ngram(fields, synthetic_dataset, reader_factory)\n\n\[email protected]\[email protected]('reader_factory', READER_FACTORIES)\ndef test_ngram_basic_longer_shuffle_multi_partition_tf(synthetic_dataset, reader_factory):\n \"\"\"Tests basic ngram with no delta threshold with shuffle and in many partitions.\"\"\"\n fields = {\n -2: [TestSchema.id, TestSchema.id2, TestSchema.matrix],\n -1: [TestSchema.id, TestSchema.id2, TestSchema.image_png],\n 0: [TestSchema.id, TestSchema.id2, TestSchema.decimal],\n 1: [TestSchema.id, TestSchema.id2, TestSchema.sensor_name],\n 2: [TestSchema.id, TestSchema.id2]\n }\n _test_noncontinuous_ngram_tf(fields, synthetic_dataset, reader_factory)\n\n\[email protected]('reader_factory', READER_FACTORIES)\[email protected]('petastorm.unischema._UNISCHEMA_FIELD_ORDER', 'alphabetical')\ndef test_ngram_basic_longer_shuffle_multi_partition(synthetic_dataset, reader_factory):\n \"\"\"Tests basic ngram with no delta threshold with shuffle and in many partitions.\"\"\"\n fields = {\n -5: [TestSchema.id, TestSchema.id2, TestSchema.matrix],\n -4: [TestSchema.id, TestSchema.id2, TestSchema.image_png],\n -3: [TestSchema.id, TestSchema.id2, TestSchema.decimal],\n -2: [TestSchema.id, TestSchema.id2, TestSchema.sensor_name],\n -1: [TestSchema.id, TestSchema.id2]\n }\n _test_noncontinuous_ngram(fields, synthetic_dataset, reader_factory)\n\n\[email protected]('reader_factory', READER_FACTORIES)\[email protected]('petastorm.unischema._UNISCHEMA_FIELD_ORDER', 'alphabetical')\ndef test_ngram_basic_longer_no_overlap(synthetic_dataset, reader_factory):\n \"\"\"Tests basic ngram with no delta threshold with no overlaps of timestamps.\"\"\"\n fields = {\n -5: [TestSchema.id, TestSchema.id2, TestSchema.matrix],\n -4: [TestSchema.id, TestSchema.id2, TestSchema.image_png],\n -3: [TestSchema.id, TestSchema.id2, TestSchema.decimal],\n -2: [TestSchema.id, TestSchema.id2, TestSchema.sensor_name],\n -1: [TestSchema.id, TestSchema.id2]\n }\n\n dataset_dicts = synthetic_dataset.data\n ngram = NGram(fields=fields, delta_threshold=10, timestamp_field=TestSchema.id, timestamp_overlap=False)\n with reader_factory(synthetic_dataset.url, schema_fields=ngram, shuffle_row_groups=False) as reader:\n timestamps_seen = set()\n for actual in reader:\n expected_ngram = _get_named_tuple_from_ngram(ngram, dataset_dicts, actual[min(actual.keys())].id)\n np.testing.assert_equal(actual, expected_ngram)\n for step in actual.values():\n timestamp = step.id\n assert timestamp not in timestamps_seen\n timestamps_seen.add(timestamp)\n\n\[email protected]\[email protected]('reader_factory', READER_FACTORIES)\n@create_tf_graph\ndef test_ngram_delta_threshold_tf(dataset_0_3_8_10_11_20_23, reader_factory):\n \"\"\"Test to verify that delta threshold work as expected in one partition in the same ngram\n and between consecutive ngrams. delta threshold here refers that each ngram must not be\n more than delta threshold apart for the field specified by timestamp_field.\"\"\"\n\n fields = {\n 0: [TestSchema.id, TestSchema.id2, TestSchema.image_png, TestSchema.matrix],\n 1: [TestSchema.id, TestSchema.id2, TestSchema.sensor_name],\n }\n ngram = NGram(fields=fields, delta_threshold=4, timestamp_field=TestSchema.id)\n with reader_factory(\n dataset_0_3_8_10_11_20_23.url,\n schema_fields=ngram,\n shuffle_row_groups=False) as reader:\n\n # Ngrams expected: (0, 3), (8, 10), (10, 11)\n\n with tf.Session() as sess:\n readout = tf_tensors(reader)\n for timestep in readout:\n for field in readout[timestep]:\n assert field.get_shape().dims is not None\n first_item = sess.run(readout)\n expected_item = _get_named_tuple_from_ngram(ngram, dataset_0_3_8_10_11_20_23.data, 0)\n _assert_equal_ngram(first_item, expected_item)\n\n readout = tf_tensors(reader)\n for timestep in readout:\n for field in readout[timestep]:\n assert field.get_shape().dims is not None\n second_item = sess.run(readout)\n expected_item = _get_named_tuple_from_ngram(ngram, dataset_0_3_8_10_11_20_23.data, 3)\n _assert_equal_ngram(second_item, expected_item)\n\n readout = tf_tensors(reader)\n for timestep in readout:\n for field in readout[timestep]:\n assert field.get_shape().dims is not None\n third_item = sess.run(readout)\n expected_item = _get_named_tuple_from_ngram(ngram, dataset_0_3_8_10_11_20_23.data, 5)\n _assert_equal_ngram(third_item, expected_item)\n\n with pytest.raises(tf.errors.OutOfRangeError):\n sess.run(tf_tensors(reader))\n\n\[email protected]('reader_factory', READER_FACTORIES)\[email protected]('petastorm.unischema._UNISCHEMA_FIELD_ORDER', 'alphabetical')\ndef test_ngram_delta_threshold(dataset_0_3_8_10_11_20_23, reader_factory):\n \"\"\"Test to verify that delta threshold work as expected in one partition in the same ngram\n and between consecutive ngrams. delta threshold here refers that each ngram must not be\n more than delta threshold apart for the field specified by timestamp_field.\"\"\"\n\n fields = {\n 0: [TestSchema.id, TestSchema.id2, TestSchema.image_png, TestSchema.matrix],\n 1: [TestSchema.id, TestSchema.id2, TestSchema.sensor_name],\n }\n ngram = NGram(fields=fields, delta_threshold=4, timestamp_field=TestSchema.id)\n with reader_factory(dataset_0_3_8_10_11_20_23.url, schema_fields=ngram,\n shuffle_row_groups=False) as reader:\n # NGrams expected: (0, 3), (8, 10), (10, 11)\n\n first_item = next(reader)\n expected_ngram = _get_named_tuple_from_ngram(ngram, dataset_0_3_8_10_11_20_23.data, 0)\n np.testing.assert_equal(first_item, expected_ngram)\n\n second_item = next(reader)\n expected_ngram = _get_named_tuple_from_ngram(ngram, dataset_0_3_8_10_11_20_23.data, 3)\n np.testing.assert_equal(second_item, expected_ngram)\n\n third_item = next(reader)\n expected_ngram = _get_named_tuple_from_ngram(ngram, dataset_0_3_8_10_11_20_23.data, 5)\n np.testing.assert_equal(third_item, expected_ngram)\n\n with pytest.raises(StopIteration):\n next(reader)\n\n\[email protected]\[email protected]('reader_factory', READER_FACTORIES)\n@create_tf_graph\ndef test_ngram_delta_small_threshold_tf(reader_factory, dataset_range_0_99_5):\n \"\"\"Test to verify that a small threshold work in ngrams.\"\"\"\n\n fields = {\n 0: [TestSchema.id, TestSchema.id2, TestSchema.image_png, TestSchema.matrix],\n 1: [TestSchema.id, TestSchema.id2, TestSchema.sensor_name],\n }\n ngram = NGram(fields=fields, delta_threshold=1, timestamp_field=TestSchema.id)\n with reader_factory(dataset_range_0_99_5.url, schema_fields=ngram) as reader:\n with tf.Session() as sess:\n with pytest.raises(tf.errors.OutOfRangeError):\n sess.run(tf_tensors(reader))\n\n\[email protected]('reader_factory', READER_FACTORIES)\ndef test_ngram_delta_small_threshold(reader_factory, dataset_range_0_99_5):\n \"\"\"Test to verify that a small threshold work in ngrams.\"\"\"\n\n fields = {\n 0: [TestSchema.id, TestSchema.id2, TestSchema.image_png, TestSchema.matrix],\n 1: [TestSchema.id, TestSchema.id2, TestSchema.sensor_name],\n }\n ngram = NGram(fields=fields, delta_threshold=1, timestamp_field=TestSchema.id)\n with reader_factory(dataset_range_0_99_5.url, schema_fields=ngram) as reader:\n with pytest.raises(StopIteration):\n next(reader)\n\n\ndef test_ngram_validation():\n \"\"\"Test to verify that ngram validation work as expected.\"\"\"\n\n fields = {\n 0: [TestSchema.id, TestSchema.id2, TestSchema.image_png, TestSchema.matrix],\n 1: [TestSchema.id, TestSchema.id2, TestSchema.sensor_name],\n }\n\n with pytest.raises(ValueError):\n # delta threshold must be an int\n NGram(fields=fields, delta_threshold='abc', timestamp_field=TestSchema.id)\n\n with pytest.raises(ValueError):\n # timestamp_field must be a field\n NGram(fields=fields, delta_threshold=5, timestamp_field=5)\n\n with pytest.raises(ValueError):\n # Fields must be a dict\n NGram(fields=[], delta_threshold=5, timestamp_field=TestSchema.id)\n\n with pytest.raises(ValueError):\n # Each value in fields must be an array\n NGram(fields={0: 'test'}, delta_threshold=5, timestamp_field=TestSchema.id)\n\n with pytest.raises(ValueError):\n # timestamp_overlap must be bool\n NGram(fields=fields, delta_threshold=0.5, timestamp_field=TestSchema.id, timestamp_overlap=2)\n\n # Check some positive cases\n NGram(fields=fields, delta_threshold=0.5, timestamp_field=TestSchema.id)\n NGram(fields=fields, delta_threshold=Decimal('0.5'), timestamp_field=TestSchema.id)\n\n\[email protected]\[email protected]('reader_factory', READER_FACTORIES)\n@create_tf_graph\ndef test_ngram_length_1_tf(synthetic_dataset, reader_factory):\n \"\"\"Test to verify that ngram generalize to support length 1\"\"\"\n dataset_dicts = synthetic_dataset.data\n fields = {0: [TestSchema.id, TestSchema.id2]}\n ngram = NGram(fields=fields, delta_threshold=0.012, timestamp_field=TestSchema.id)\n reader = reader_factory(synthetic_dataset.url, schema_fields=ngram,\n shuffle_row_groups=True, shuffle_row_drop_partitions=5)\n with tf.Session() as sess:\n for _ in range(10):\n actual = sess.run(tf_tensors(reader))\n expected_ngram = _get_named_tuple_from_ngram(ngram, dataset_dicts, actual[min(actual.keys())].id)\n _assert_equal_ngram(actual, expected_ngram)\n\n reader.stop()\n reader.join()\n\n\[email protected]('reader_factory', READER_FACTORIES)\ndef test_ngram_length_1(synthetic_dataset, reader_factory):\n \"\"\"Test to verify that ngram generalize to support length 1\"\"\"\n dataset_dicts = synthetic_dataset.data\n fields = {0: [TestSchema.id, TestSchema.id2]}\n ngram = NGram(fields=fields, delta_threshold=0.012, timestamp_field=TestSchema.id)\n with reader_factory(synthetic_dataset.url, schema_fields=ngram,\n shuffle_row_groups=True, shuffle_row_drop_partitions=3) as reader:\n for _ in range(10):\n actual = next(reader)\n expected_ngram = _get_named_tuple_from_ngram(ngram, dataset_dicts, actual[min(actual.keys())].id)\n _assert_equal_ngram(actual, expected_ngram)\n\n\[email protected]\[email protected]('reader_factory', READER_FACTORIES)\ndef test_non_consecutive_ngram(dataset_num_files_1, reader_factory):\n \"\"\"Test to verify that non consecutive keys for fields argument in ngrams work.\"\"\"\n fields = {\n -1: [TestSchema.id, TestSchema.id2, TestSchema.image_png, TestSchema.matrix],\n 1: [TestSchema.id, TestSchema.id2, TestSchema.sensor_name],\n }\n _test_continuous_ngram_tf(fields, dataset_num_files_1, reader_factory)\n\n\[email protected]\[email protected]('reader_factory', READER_FACTORIES)\ndef test_shuffled_fields(dataset_num_files_1, reader_factory):\n \"\"\"Test to verify not sorted keys for fields argument in ngrams work.\"\"\"\n fields = {\n 2: [TestSchema.id, TestSchema.id2, TestSchema.image_png, TestSchema.matrix],\n -1: [TestSchema.id, TestSchema.id2, TestSchema.sensor_name],\n }\n _test_continuous_ngram_tf(fields, dataset_num_files_1, reader_factory)\n\n\[email protected]('reader_factory', READER_FACTORIES)\ndef test_ngram_shuffle_drop_ratio(synthetic_dataset, reader_factory):\n \"\"\"Test to verify the shuffle drop ratio work as expected.\"\"\"\n fields = {\n -2: [TestSchema.id, TestSchema.id2, TestSchema.matrix],\n -1: [TestSchema.id, TestSchema.id2, TestSchema.image_png],\n 0: [TestSchema.id, TestSchema.id2, TestSchema.decimal],\n 1: [TestSchema.id, TestSchema.id2, TestSchema.sensor_name],\n 2: [TestSchema.id, TestSchema.id2]\n }\n ngram = NGram(fields=fields, delta_threshold=10, timestamp_field=TestSchema.id)\n with reader_factory(synthetic_dataset.url,\n schema_fields=ngram,\n shuffle_row_groups=False) as reader:\n unshuffled = [row[0].id for row in reader]\n with reader_factory(synthetic_dataset.url,\n schema_fields=ngram,\n shuffle_row_groups=True,\n shuffle_row_drop_partitions=6) as reader:\n shuffled = [row[0].id for row in reader]\n assert len(unshuffled) == len(shuffled)\n assert unshuffled != shuffled\n\n\ndef _test_continuous_ngram_returns(ngram_fields, ts_field, dataset_num_files_1, reader_factory):\n \"\"\"Test continuous ngram of a certain length. Continuous here refers to\n that this reader will always return consecutive ngrams due to shuffle being false\n and partition being 1. Returns the ngram object\"\"\"\n\n ngram = NGram(fields=ngram_fields, delta_threshold=10, timestamp_field=ts_field)\n with reader_factory(dataset_num_files_1.url, schema_fields=ngram, shuffle_row_groups=False) as reader:\n expected_id = 0\n\n for _ in range(ngram.length):\n actual = next(reader)\n expected_ngram = _get_named_tuple_from_ngram(ngram, dataset_num_files_1.data, expected_id)\n np.testing.assert_equal(actual, expected_ngram)\n expected_id = expected_id + 1\n\n return ngram\n\n\[email protected]('reader_factory', READER_FACTORIES)\[email protected]('petastorm.unischema._UNISCHEMA_FIELD_ORDER', 'alphabetical')\ndef test_ngram_with_regex_fields(dataset_num_files_1, reader_factory):\n \"\"\"Tests to verify fields and timestamp field can be regular expressions and work with a reader\n \"\"\"\n fields = {\n -1: [\"^id.*$\", \"sensor_name\", TestSchema.partition_key],\n 0: [\"^id.*$\", \"sensor_name\", TestSchema.partition_key],\n 1: [\"^id.*$\", \"sensor_name\", TestSchema.partition_key]\n }\n\n ts_field = '^id$'\n\n expected_fields = [TestSchema.id, TestSchema.id2, TestSchema.id_float, TestSchema.id_odd,\n TestSchema.sensor_name, TestSchema.partition_key]\n\n ngram = _test_continuous_ngram_returns(fields, ts_field, dataset_num_files_1, reader_factory)\n\n # fields should get resolved after call to a reader\n ngram_fields = ngram.fields\n\n # Can't do direct set equality between expected fields and ngram.fields b/c of issue\n # with `Collections.UnischemaField` (see unischema.py for more information). __hash__\n # and __eq__ is implemented correctly for UnischemaField. However, a collections.UnischemaField\n # object will not use the __hash__ definied in `petastorm.unischema.py`\n for k in ngram_fields.keys():\n assert len(expected_fields) == len(ngram_fields[k])\n\n for curr_field in expected_fields:\n assert curr_field in ngram_fields[k]\n\n assert TestSchema.id == ngram._timestamp_field\n\n\[email protected]('reader_factory', READER_FACTORIES)\ndef test_ngram_regex_field_resolve(dataset_num_files_1, reader_factory):\n \"\"\"Tests ngram.resolve_regex_field_names function\n \"\"\"\n fields = {\n -1: [\"^id.*\", \"sensor_name\", TestSchema.partition_key],\n 0: [\"^id.*\", \"sensor_name\", TestSchema.partition_key],\n 1: [\"^id.*\", \"sensor_name\", TestSchema.partition_key]\n }\n\n ts_field = '^id$'\n\n ngram = NGram(fields=fields, delta_threshold=10, timestamp_field=ts_field)\n\n expected_fields = {TestSchema.id, TestSchema.id2, TestSchema.id_float, TestSchema.id_odd,\n TestSchema.sensor_name, TestSchema.partition_key}\n\n ngram.resolve_regex_field_names(TestSchema)\n\n ngram_fields = ngram.fields\n\n # Can't do direct set equality between expected fields and ngram.fields b/c of issue\n # with `Collections.UnischemaField` (see unischema.py for more information). __hash__\n # and __eq__ is implemented correctly for UnischemaField. However, a collections.UnischemaField\n # object will not use the __hash__ definied in `petastorm.unischema.py`\n for k in ngram_fields.keys():\n assert len(expected_fields) == len(ngram_fields[k])\n\n for curr_field in expected_fields:\n assert curr_field in ngram_fields[k]\n\n assert TestSchema.id == ngram._timestamp_field\n", "# Copyright (c) 2017-2018 Uber Technologies, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Minimal example of how to read samples from a dataset generated by `generate_hello_world_dataset.py`\nusing tensorflow.\"\"\"\n\nfrom __future__ import print_function\n\nimport tensorflow.compat.v1 as tf # pylint: disable=import-error\n\nfrom petastorm import make_reader\nfrom petastorm.tf_utils import tf_tensors, make_petastorm_dataset\n\n\ndef tensorflow_hello_world(dataset_url='file:///tmp/hello_world_dataset'):\n # Example: tf_tensors will return tensors with dataset data\n with make_reader(dataset_url) as reader:\n tensor = tf_tensors(reader)\n with tf.Session() as sess:\n sample = sess.run(tensor)\n print(sample.id)\n\n # Example: use tf.data.Dataset API\n with make_reader(dataset_url) as reader:\n dataset = make_petastorm_dataset(reader)\n iterator = dataset.make_one_shot_iterator()\n tensor = iterator.get_next()\n with tf.Session() as sess:\n sample = sess.run(tensor)\n print(sample.id)\n\n\nif __name__ == '__main__':\n tensorflow_hello_world()\n" ]
[ [ "numpy.int32", "numpy.zeros" ], [ "tensorflow.compat.v1.Session", "numpy.testing.assert_equal" ], [ "tensorflow.compat.v1.Session" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
parkjh80/studio
[ "48852c4f097f773ce3d408b59f79fda2e2d60470", "48852c4f097f773ce3d408b59f79fda2e2d60470", "6d8d8384272e5e1b2838b12e5557272a19408e89", "48852c4f097f773ce3d408b59f79fda2e2d60470", "48852c4f097f773ce3d408b59f79fda2e2d60470", "48852c4f097f773ce3d408b59f79fda2e2d60470" ]
[ "function/python/brightics/function/manipulation/test/timeseries_distance_test.py", "function/python/brightics/function/statistics/cross_table.py", "function/python/brightics/function/classification/ada_boost_classification.py", "function/python/brightics/function/statistics/test/duncan_test.py", "function/python/brightics/function/transform/split_data.py", "function/python/brightics/common/data/table_data_reader.py" ]
[ "\"\"\"\n Copyright 2019 Samsung SDS\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\"\"\"\n\nfrom brightics.function.manipulation.timeseries_distance import timeseries_distance\nimport unittest\nimport pandas as pd\nimport HtmlTestRunner\nimport numpy as np\nimport os\n\n\nclass TimeseriesDistanceTest(unittest.TestCase):\n\n def setUp(self):\n print(\"*** Timeseries distance UnitTest Start ***\")\n self.test_data = pd.DataFrame({'col1': [], 'col2': []})\n self.test_data['col1'] = [[6, 8, 8, 4], [3, 4, 6, 9], [7, 6, 6, 1], [2, 0, 8, 1], [6, 9, 2, 8]]\n self.test_data['col2'] = [[7, 6, 2, 2], [3, 2, 9, 2], [0, 0, 7, 8], [0, 0, 2, 7], [7, 3, 3, 7]]\n self.test_result = np.array([6.708203932499369, 7.874007874011811, 11.61895003862225, 8.717797887081348, 6.244997998398398])\n\n def tearDown(self):\n print(\"*** Timeseries distance UnitTest End ***\")\n\n def test(self):\n test_result = timeseries_distance(self.test_data, input_col_1='col1', input_col_2='col2', distance_type='Euclidean')['out_table']\n test_result = test_result['distance'].values\n np.testing.assert_array_almost_equal(test_result, self.test_result, 7, 'Incorrect distance')\n\n\nif __name__ == '__main__':\n filepath = os.path.dirname(os.path.abspath(__file__))\n reportFoler = filepath + \"/../../../../../../../reports\"\n unittest.main(testRunner=HtmlTestRunner.HTMLTestRunner(\n combine_reports=True, output=reportFoler))\n", "\"\"\"\n Copyright 2019 Samsung SDS\n \n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n \n http://www.apache.org/licenses/LICENSE-2.0\n \n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\"\"\"\n\nfrom brightics.common.repr import BrtcReprBuilder, strip_margin, pandasDF2MD\nfrom brightics.function.utils import _model_dict\nfrom brightics.common.groupby import _function_by_group\nfrom brightics.common.utils import check_required_parameters\nfrom brightics.common.validation import raise_runtime_error\n\nimport numpy as np\nimport pandas as pd\n\n\ndef cross_table(table, group_by=None, **params):\n check_required_parameters(_cross_table, params, ['table'])\n if group_by is not None:\n return _function_by_group(_cross_table, table, group_by=group_by, **params)\n else:\n return _cross_table(table, **params)\n\n\ndef _cross_table(table, input_cols_1, input_cols_2, result='N', margins=False):\n \n df1 = [table[col] for col in input_cols_1]\n df2 = [table[col] for col in input_cols_2]\n \n # cross table \n if result == 'N':\n result_table = pd.crosstab(df1, df2, margins=margins) \n elif result == 'N / Row Total':\n result_table = pd.crosstab(df1, df2, margins=margins, normalize='index')\n elif result == 'N / Column Total': \n result_table = pd.crosstab(df1, df2, margins=margins, normalize='columns')\n elif result == 'N / Total': \n result_table = pd.crosstab(df1, df2, margins=margins, normalize='all')\n else:\n raise_runtime_error(\"Please check 'result'.\")\n \n # each row and column name \n row_names = list(result_table.index)[:] \n if len(input_cols_1) == 1:\n joined_row_name = [str(i) for i in row_names]\n else:\n if margins == False:\n joined_row_name = ['_'.join(str(s) for s in row_names[i]) for i in range(len(row_names))]\n elif margins == True:\n joined_row_name = ['_'.join(str(s) for s in row_names[i]) for i in range(len(row_names) - 1)] + [row_names[-1][0]]\n \n column_names = list(result_table.columns)[:]\n if len(input_cols_2) == 1:\n joined_column_name = [str(i) for i in column_names]\n else:\n if margins == False:\n joined_column_name = ['_'.join(str(s) for s in column_names[i]) for i in range(len(column_names))]\n elif margins == True:\n joined_column_name = ['_'.join(str(s) for s in column_names[i]) for i in range(len(column_names) - 1)] + [column_names[-1][0]]\n\n # cross table\n if result == 'N':\n result_table.insert(loc=0, column=' ', value=joined_row_name)\n result_table.columns = np.append('N', joined_column_name) \n # cross table normalize by row \n elif result == 'N / Row Total':\n result_table.insert(loc=0, column=' ', value=joined_row_name)\n result_table.columns = np.append('N / Row Total', joined_column_name)\n # cross table normalize by column\n elif result == 'N / Column Total': \n result_table.insert(loc=0, column=' ', value=joined_row_name)\n result_table.columns = np.append('N / Column Total', joined_column_name) \n # cross table normalize by all values \n elif result == 'N / Total': \n result_table.insert(loc=0, column=' ', value=joined_row_name)\n result_table.columns = np.append('N / Total', joined_column_name) \n else:\n raise_runtime_error(\"Please check 'result'.\")\n \n rb = BrtcReprBuilder()\n rb.addMD(strip_margin(\"\"\"\n | ## Cross Table Result\n | ### Result Type : {result}\n |\n | #### Result Table\n |\n | {result_table}\n |\n \"\"\".format(result=result, result_table=pandasDF2MD(result_table, num_rows=len(result_table.index) + 1))))\n\n model = _model_dict('cross_table')\n model['result'] = result\n model['result_table'] = result_table\n model['_repr_brtc_'] = rb.get()\n \n return {'model': model}\n", "\"\"\"\n Copyright 2019 Samsung SDS\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom sklearn.ensemble import AdaBoostClassifier\nfrom sklearn.tree import DecisionTreeClassifier\nfrom brightics.common.repr import BrtcReprBuilder\nfrom brightics.common.repr import strip_margin\nfrom brightics.common.repr import plt2MD\nfrom brightics.common.repr import dict2MD\nfrom brightics.common.groupby import _function_by_group\nfrom brightics.common.utils import check_required_parameters\nfrom brightics.common.validation import validate, greater_than_or_equal_to, greater_than\nfrom brightics.common.utils import get_default_from_parameters_if_required\nfrom brightics.function.utils import _model_dict\nfrom brightics.common.classify_input_type import check_col_type\n\n\ndef ada_boost_classification_train(table, group_by=None, **params):\n check_required_parameters(_ada_boost_classification_train, params, ['table'])\n params = get_default_from_parameters_if_required(params, _ada_boost_classification_train)\n param_validation_check = [greater_than_or_equal_to(params, 1, 'max_depth'),\n greater_than_or_equal_to(params, 1, 'n_estimators'),\n greater_than(params, 0, 'learning_rate')]\n validate(*param_validation_check)\n if group_by is not None:\n return _function_by_group(_ada_boost_classification_train, table, group_by=group_by, **params)\n else:\n return _ada_boost_classification_train(table, **params)\n\n\ndef _plot_feature_importance(feature_cols, classifier):\n \n feature_importance = classifier.feature_importances_\n indices = np.argsort(feature_importance)\n sorted_feature_cols = np.array(feature_cols)[indices]\n \n n_features = len(feature_cols)\n plt.barh(range(n_features), feature_importance[indices], color='b', align='center')\n for i, v in enumerate(feature_importance[indices]):\n plt.text(v, i, \" {:.2f}\".format(v), color='b', va='center', fontweight='bold')\n \n plt.yticks(np.arange(n_features), sorted_feature_cols)\n plt.xlabel(\"Feature importance\")\n plt.ylabel(\"Feature\")\n plt.tight_layout()\n fig_feature_importance = plt2MD(plt)\n plt.close()\n return fig_feature_importance\n \n \ndef _ada_boost_classification_train(table, feature_cols, label_col, max_depth=1,\n n_estimators=50, learning_rate=1.0, algorithm='SAMME.R', random_state=None):\n \n feature_names, x_train = check_col_type(table, feature_cols)\n y_train = table[label_col]\n\n base_estimator = DecisionTreeClassifier(max_depth=max_depth)\n\n classifier = AdaBoostClassifier(base_estimator, n_estimators, learning_rate, algorithm, random_state)\n\n classifier.fit(x_train, y_train)\n\n params = {'feature_cols': feature_cols,\n 'label_col': label_col,\n 'feature_importance': classifier.feature_importances_,\n 'n_estimators': n_estimators,\n 'learning_rate': learning_rate,\n 'algorithm': algorithm,\n 'random_state': random_state}\n \n model = _model_dict('ada_boost_classification_model')\n get_param = classifier.get_params()\n model['parameters'] = get_param\n model['classifier'] = classifier\n model['params'] = params\n\n fig_feature_importance = _plot_feature_importance(feature_names, classifier)\n params = dict2MD(get_param)\n\n rb = BrtcReprBuilder()\n rb.addMD(strip_margin(\"\"\"\n | ## AdaBoost Classification Train Result\n |\n | ### Feature Importance\n | {fig_feature_importance}\n |\n | ### Parameters\n | {list_parameters}\n |\n \"\"\".format(fig_feature_importance=fig_feature_importance,\n list_parameters=params\n )))\n\n model['_repr_brtc_'] = rb.get()\n feature_importance = classifier.feature_importances_\n feature_importance_table = pd.DataFrame([[feature_names[i], feature_importance[i]] for i in range(len(feature_names))], columns=['feature_name', 'importance'])\n model['feature_importance_table'] = feature_importance_table\n return {'model': model}\n\n\ndef ada_boost_classification_predict(table, model, **params):\n check_required_parameters(_ada_boost_classification_predict, params, ['table', 'model'])\n if '_grouped_data' in model:\n return _function_by_group(_ada_boost_classification_predict, table, model, **params)\n else:\n return _ada_boost_classification_predict(table, model, **params)\n\n\ndef _ada_boost_classification_predict(table, model, pred_col_name='prediction', prob_col_prefix='probability', suffix='index'):\n out_table = table.copy()\n classifier = model['classifier']\n _, test_data = check_col_type(table, model['params']['feature_cols'])\n \n out_table[pred_col_name] = classifier.predict(test_data) \n \n classes = classifier.classes_\n if suffix == 'index':\n suffixes = [i for i, _ in enumerate(classes)]\n else:\n suffixes = classes\n \n prob = classifier.predict_proba(test_data)\n prob_col_name = ['{prob_col_prefix}_{suffix}'.format(prob_col_prefix=prob_col_prefix, suffix=suffix) for suffix in suffixes] \n out_col_prob = pd.DataFrame(data=prob, columns=prob_col_name)\n\n out_table = pd.concat([out_table, out_col_prob], axis=1)\n return {'out_table': out_table}\n", "\"\"\"\n Copyright 2019 Samsung SDS\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\"\"\"\n\nfrom brightics.function.statistics.duncan_test import duncan_test\nfrom brightics.common.datasets import load_iris\nimport unittest\nimport pandas as pd\nimport numpy as np\nimport HtmlTestRunner\nimport os\n\n\nclass Duncan(unittest.TestCase):\n \n def setUp(self):\n print(\"*** Duncan UnitTest Start ***\")\n self.testdata = load_iris()\n\n def tearDown(self):\n print(\"*** Duncan UnitTest End ***\")\n \n def test(self):\n duncan_res = duncan_test(self.testdata, response_cols=['sepal_length'], factor_col='species')\n res = duncan_res['result']\n np.testing.assert_array_equal(res['sepal_length_species']['critical_val']['critical_value'], [0.20346880603871212, 0.21415681109106452])\n np.testing.assert_array_equal(res['sepal_length_species']['mean_by_factor']['sepal_length'], [6.587999999999998, 5.936, 5.005999999999999])\n np.testing.assert_array_equal(res['sepal_length_species']['comp_by_factor']['difference'], [0.6519999999999984, 1.581999999999999, 0.9300000000000006])\n np.testing.assert_array_equal(res['sepal_length_species']['comp_by_factor']['significant'], ['YES', 'YES', 'YES'])\n\n\nif __name__ == '__main__':\n filepath = os.path.dirname(os.path.abspath(__file__))\n reportFoler = filepath + \"/../../../../../../../reports\"\n unittest.main(testRunner=HtmlTestRunner.HTMLTestRunner(combine_reports=True, output=reportFoler))\n", "\"\"\"\n Copyright 2019 Samsung SDS\n \n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n \n http://www.apache.org/licenses/LICENSE-2.0\n \n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\"\"\"\n\nfrom sklearn.model_selection import train_test_split as sktrain_test_split\nfrom brightics.common.validation import validate, greater_than, from_to\nfrom brightics.common.groupby import _function_by_group\nfrom brightics.common.utils import check_required_parameters\nfrom brightics.common.utils import get_default_from_parameters_if_required\n\n\ndef split_data(table, group_by=None, **params):\n params = get_default_from_parameters_if_required(params, _split_data)\n param_validation_check = [greater_than(params, 0.0, 'train_ratio'),\n greater_than(params, 0.0, 'test_ratio'),\n from_to(params, 0, 2**30, 'random_state')]\n \n validate(*param_validation_check)\n check_required_parameters(_split_data, params, ['table'])\n if group_by is not None:\n return _function_by_group(_split_data, table, group_by=group_by, **params)\n else:\n return _split_data(table, **params)\n\n\ndef _split_data(table, train_ratio=7.0, test_ratio=3.0, random_state=None, shuffle=True, stratify=None):\n \n ratio = test_ratio / (train_ratio + test_ratio)\n out_table_train, out_table_test = sktrain_test_split(table, test_size=ratio, random_state=random_state, shuffle=shuffle, stratify=stratify)\n \n return {'train_table' : out_table_train.reset_index(drop=True), 'test_table' : out_table_test.reset_index(drop=True)}\n", "\"\"\"\n Copyright 2019 Samsung SDS\n \n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n \n http://www.apache.org/licenses/LICENSE-2.0\n \n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\"\"\"\n\nimport pandas as pd\nimport pyarrow\n\nimport brightics.common.data.utils as data_util\n\n\ndef read_parquet_or_csv(path):\n try:\n # try parquet data storage first using path as key\n df = pd.read_parquet(path=data_util.make_data_path_from_key(path),\n engine='pyarrow')\n except pyarrow.lib.ArrowIOError:\n df = read_csv(path)\n\n data_util.validate_column_name(df)\n\n return df\n\n\ndef read_csv(path, engine, delimiter, na_filter, strip_col, quoting, encoding):\n try:\n result = pd.read_csv(path, engine=engine, quoting=quoting, encoding=encoding, na_filter = na_filter, sep = delimiter)\n except Exception:\n result = pd.read_csv(path, engine=engine, quoting=quoting, na_filter = na_filter, sep = delimiter)\n if strip_col:\n result.columns = result.columns.str.strip()\n return result\n\n\ndef read_parquet(path):\n df = pd.read_parquet(path=path, engine='pyarrow')\n data_util.validate_column_name(df)\n return df\n" ]
[ [ "numpy.array", "pandas.DataFrame", "numpy.testing.assert_array_almost_equal" ], [ "pandas.crosstab", "numpy.append" ], [ "pandas.concat", "matplotlib.pyplot.tight_layout", "numpy.arange", "pandas.DataFrame", "sklearn.tree.DecisionTreeClassifier", "sklearn.ensemble.AdaBoostClassifier", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.close", "numpy.argsort", "numpy.array", "matplotlib.pyplot.ylabel" ], [ "numpy.testing.assert_array_equal" ], [ "sklearn.model_selection.train_test_split" ], [ "pandas.read_parquet", "pandas.read_csv" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.3", "1.1", "1.5", "1.2" ], "scipy": [], "tensorflow": [] } ]
sujeet-ap/keras-idiomatic-programmer
[ "4db490afea8acf9381cbf3d607583451a2f40a3a", "4db490afea8acf9381cbf3d607583451a2f40a3a" ]
[ "zoo/models_c.py", "zoo/senet/se_resnet.py" ]
[ "# Copyright 2019 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport tensorflow as tf\nfrom tensorflow.keras import Sequential, Model, Input\nfrom tensorflow.keras import layers\nfrom tensorflow.keras.layers import ReLU, Dense, Conv2D, Conv2DTranspose\nfrom tensorflow.keras.layers import DepthwiseConv2D, SeparableConv2D, Dropout\nfrom tensorflow.keras.layers import GlobalAveragePooling2D, Activation, BatchNormalization\nfrom tensorflow.keras.regularizers import l2\nfrom tensorflow.keras.optimizers import Adam, SGD\nfrom tensorflow.compat.v1.keras.initializers import glorot_uniform, he_normal\nfrom tensorflow.keras.callbacks import LearningRateScheduler\nfrom tensorflow.keras.preprocessing.image import ImageDataGenerator\nfrom tensorflow.keras.utils import to_categorical\nimport tensorflow_datasets as tfds\nimport tensorflow.keras.backend as K\nimport numpy as np\nfrom sklearn.model_selection import train_test_split\n\nimport random\nimport math\nimport sys\n\nfrom layers_c import Layers\nfrom preprocess_c import Preprocess\nfrom pretraining_c import Pretraining\nfrom hypertune_c import HyperTune\n\nclass Composable(Layers, Preprocess, Pretraining, HyperTune):\n ''' Composable base (super) class for Models '''\n\n def __init__(self, init_weights=None, reg=None, relu=None, bias=True):\n \"\"\" Constructor\n init_weights : kernel initializer\n reg : kernel regularizer\n relu : clip value for ReLU\n bias : whether to use bias\n \"\"\"\n Layers.__init__(self, init_weights, reg, relu, bias)\n Preprocess.__init__(self)\n Pretraining.__init__(self)\n HyperTune.__init__(self)\n\n # Feature maps encoding at the bottleneck layer in classifier (high dimensionality)\n self._encoding = None\n # Pooled and flattened encodings at the bottleneck layer (low dimensionality)\n self._embedding = None\n # Pre-activation conditional probabilities for classifier\n self._probabilities = None\n # Post-activation conditional probabilities for classifier\n self._softmax = None\n\n self._model = None\n\n @property\n def model(self):\n return self._model\n\n @model.setter\n def model(self, _model):\n self._model = _model\n\n @property\n def encoding(self):\n return self._encoding\n\n @encoding.setter\n def encoding(self, layer):\n self._encoding = layer\n\n @property\n def embedding(self):\n return self._embedding\n\n @embedding.setter\n def embedding(self, layer):\n self._embedding = layer\n\n @property\n def probabilities(self):\n return self._probabilities\n\n @probabilities.setter\n def probabilities(self, layer):\n self._probabilities = layer\n\n ###\n # Training\n ###\n\n def compile(self, loss='categorical_crossentropy', optimizer=Adam(lr=0.001, decay=1e-5), metrics=['acc']):\n \"\"\" Compile the model for training\n loss : the loss function\n optimizer: the optimizer\n metrics : metrics to report\n \"\"\"\n self.model.compile(loss=loss, optimizer=optimizer, metrics=metrics)\n\n # training variables\n hidden_dropout = None # hidden dropout in classifier\n i_lr = 0 # initial rate during full training\n e_decay = 0 # weight decay rate during full training\n e_steps = 0 # number of steps (batches) in an epoch\n t_steps = 0 # total number of steps in training job\n\n def time_decay(self, epoch, lr):\n \"\"\" Time-based Decay\n \"\"\"\n return lr * (1. / (1. + self.e_decay[1] * epoch))\n\n def step_decay(self, epoch, lr):\n \"\"\" Step-based decay\n \"\"\"\n return self.i_lr * self.e_decay[1]**(epoch)\n\n def exp_decay(self, epoch, lr):\n \"\"\" Exponential Decay\n \"\"\"\n return self.i_lr * math.exp(-self.e_decay[1] * epoch)\n\n def cosine_decay(self, epoch, lr, alpha=0.0):\n \"\"\" Cosine Decay\n \"\"\"\n cosine_decay = 0.5 * (1 + np.cos(np.pi * (self.e_steps * epoch) / self.t_steps))\n decayed = (1 - alpha) * cosine_decay + alpha\n return lr * decayed\n\n def training_scheduler(self, epoch, lr):\n \"\"\" Learning Rate scheduler for full-training\n epoch : epoch number\n lr : current learning rate\n \"\"\"\n # First epoch (not started) - do nothing\n if epoch == 0:\n return lr\n\n # Hidden dropout unit in classifier\n if self.hidden_dropout is not None:\n # If training accuracy and validation accuracy more than 3% apart\n if self.model.history.history['acc'][epoch-1] > self.model.history.history['val_acc'][epoch-1] + 0.03:\n if self.hidden_dropout.rate == 0.0:\n self.hidden_dropout.rate = 0.5\n elif self.hidden_dropout.rate < 0.75:\n self.hidden_dropout.rate *= 1.1\n print(\"*** Overfitting, set dropout to\", self.hidden_dropout.rate)\n else:\n if self.hidden_dropout.rate != 0.0:\n print(\"*** Turning off dropout\")\n self.hidden_dropout.rate = 0.0\n\n if self.e_decay[0] is None:\n return lr\n\n # Decay the learning rate\n if self.e_decay[0] == 'time':\n lr = self.time_decay(epoch, lr)\n elif self.e_decay[0] == 'step':\n lr = self.step_decay(epoch, lr)\n elif self.e_decay[0] == 'exp':\n lr = self.exp_decay(epoch, lr)\n else:\n lr = self.cosine_decay(epoch, lr)\n return lr\n\n def training(self, x_train, y_train, epochs=10, batch_size=32, lr=0.001, decay=(None, 0)):\n \"\"\" Full Training of the Model\n x_train : training images\n y_train : training labels\n epochs : number of epochs\n batch_size : size of batch\n lr : learning rate\n decay : step-wise learning rate decay\n \"\"\"\n\n print(\"*** Full Training\")\n\n # Check for hidden dropout layer in classifier\n for layer in self.model.layers:\n if isinstance(layer, Dropout):\n self.hidden_dropout = layer\n break \n\n if decay is None or 0:\n decay = (None, 0)\n elif isinstance(decay, float):\n decay = ('time', decay)\n elif not isinstance(decay, tuple):\n raise Exception(\"Training: decay must be (time, value)\")\n elif decay[0] not in [None, 'time', 'step', 'exp', 'cosine']:\n raise Exception(\"Training: invalid method for decay\")\n\n self.i_lr = lr\n self.e_decay = decay\n self.e_steps = x_train.shape[0] // batch_size\n self.t_steps = self.e_steps * epochs\n self.compile(optimizer=Adam(lr=lr, decay=decay[1]))\n\n lrate = LearningRateScheduler(self.training_scheduler, verbose=1)\n self.model.fit(x_train, y_train, epochs=epochs, batch_size=batch_size, validation_split=0.1, verbose=1,\n callbacks=[lrate])\n\n def evaluate(self, x_test, y_test):\n \"\"\" Call underlying evaluate() method\n \"\"\"\n return self._model.evaluate(x_test, y_test)\n\n def cifar10(self, epochs=10, decay=('cosine', 0)):\n \"\"\" Train on CIFAR-10\n epochs : number of epochs for full training\n \"\"\"\n from tensorflow.keras.datasets import cifar10\n (x_train, y_train), (x_test, y_test) = cifar10.load_data()\n x_train, x_test = self.standardization(x_train, x_test)\n y_train = to_categorical(y_train, 10)\n y_test = to_categorical(y_test, 10)\n y_train = self.label_smoothing(y_train, 10, 0.1)\n\n # compile the model\n self.compile(loss='categorical_crossentropy', metrics=['acc'])\n\n self.warmup(x_train, y_train)\n\n lr, batch_size = self.random_search(x_train, y_train, x_test, y_test)\n\n self.training(x_train, y_train, epochs=epochs, batch_size=batch_size,\n lr=lr, decay=decay)\n self.evaluate(x_test, y_test)\n\n def cifar100(self, epochs=20, decay=('cosine', 0)):\n \"\"\" Train on CIFAR-100\n epochs : number of epochs for full training\n \"\"\"\n from tensorflow.keras.datasets import cifar100\n (x_train, y_train), (x_test, y_test) = cifar100.load_data()\n x_train, x_test = self.normalization(x_train, x_test)\n y_train = to_categorical(y_train, 100)\n y_test = to_categorical(y_test, 100)\n y_train = self.label_smoothing(y_train, 10, 0.1)\n self.compile(loss='categorical_crossentropy', metrics=['acc'])\n\n self.warmup(x_train, y_train)\n\n lr, batch_size = self.grid_search(x_train, y_train, x_test, y_test)\n\n self.training(x_train, y_train, epochs=epochs, batch_size=batch_size,\n lr=lr, decay=decay)\n self.evaluate(x_test, y_test)\n\n def coil100(self, epochs=20, decay=('cosine', 0)):\n \"\"\"\n \"\"\"\n # Get TF.dataset generator for COIL100\n train, info = tfds.load('coil100', split='train', shuffle_files=True, with_info=True, as_supervised=True)\n n_classes = info.features['label'].num_classes\n n_images = info.splits['train'].num_examples\n input_shape = info.features['image'].shape\n\n # Get the dataset into memory\n train = train.shuffle(n_images).batch(n_images)\n for images, labels in train.take(1):\n pass\n \n images = np.asarray(images)\n images, _ = self.standardization(images, None)\n labels = to_categorical(np.asarray(labels), n_classes)\n\n # split the dataset into train/test\n x_train, x_test, y_train, y_test = train_test_split(images, labels, test_size=0.2)\n\n self.compile(loss='categorical_crossentropy', metrics=['acc'])\n\n self.warmup(x_train, y_train)\n\n lr, batch_size = self.grid_search(x_train, y_train, x_test, y_test)\n\n self.training(x_train, y_train, epochs=epochs, batch_size=batch_size,\n lr=lr, decay=decay)\n self.evaluate(x_test, y_test)\n\n", "# Copyright 2019 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# SE-ResNet (50/101/152) v1.0\n# Paper: https://arxiv.org/pdf/1709.01507.pdf\n\nimport tensorflow as tf\nfrom tensorflow.keras import Input, Model\nfrom tensorflow.keras.layers import ZeroPadding2D, Conv2D, MaxPooling2D, BatchNormalization, ReLU\nfrom tensorflow.keras.layers import GlobalAveragePooling2D, Dense, Reshape, Multiply, Add\n\ndef stem(inputs):\n \"\"\" Construct the Stem Convolutional Group \n inputs : the input vector\n \"\"\"\n # The 224x224 images are zero padded (black - no signal) to be 230x230 images prior to the first convolution\n x = ZeroPadding2D(padding=(3, 3))(inputs)\n \n # First Convolutional layer which uses a large (coarse) filter \n x = Conv2D(64, (7, 7), strides=(2, 2), padding='valid', use_bias=False, kernel_initializer='he_normal')(x)\n x = BatchNormalization()(x)\n x = ReLU()(x)\n \n # Pooled feature maps will be reduced by 75%\n x = ZeroPadding2D(padding=(1, 1))(x)\n x = MaxPooling2D((3, 3), strides=(2, 2))(x)\n return x\n\ndef learner(x, groups, ratio):\n \"\"\" Construct the Learner\n x : input to the learner\n\tgroups: list of groups: number of filters and blocks\n ratio : amount of filter reduction in squeeze\n \"\"\"\n # First Residual Block Group (not strided)\n n_filters, n_blocks = groups.pop(0)\n x = group(x, n_filters, n_blocks, ratio, strides=(1, 1))\n\n # Remaining Residual Block Groups (strided)\n for n_filters, n_blocks in groups:\n \tx = group(x, n_filters, n_blocks, ratio)\n return x\t\n\ndef group(x, n_filters, n_blocks, ratio, strides=(2, 2)):\n \"\"\" Construct the Squeeze-Excite Group\n\tx : input to the group\n n_blocks : number of blocks\n\tn_filters: number of filters\n ratio : amount of filter reduction during squeeze\n strides : whether projection block is strided\n \"\"\"\n # first block uses linear projection to match the doubling of filters between groups\n x = projection_block(x, n_filters, strides=strides, ratio=ratio)\n\n # remaining blocks use identity link\n for _ in range(n_blocks-1):\n x = identity_block(x, n_filters, ratio=ratio)\n return x\n\ndef squeeze_excite_block(x, ratio=16):\n \"\"\" Create a Squeeze and Excite block\n x : input to the block\n ratio : amount of filter reduction during squeeze\n \"\"\" \n # Remember the input\n shortcut = x\n \n # Get the number of filters on the input\n filters = x.shape[-1]\n\n # Squeeze (dimensionality reduction)\n # Do global average pooling across the filters, which will output a 1D vector\n x = GlobalAveragePooling2D()(x)\n \n # Reshape into 1x1 feature maps (1x1xC)\n x = Reshape((1, 1, filters))(x)\n \n # Reduce the number of filters (1x1xC/r)\n x = Dense(filters // ratio, activation='relu', kernel_initializer='he_normal', use_bias=False)(x)\n\n # Excitation (dimensionality restoration)\n # Restore the number of filters (1x1xC)\n x = Dense(filters, activation='sigmoid', kernel_initializer='he_normal', use_bias=False)(x)\n\n # Scale - multiply the squeeze/excitation output with the input (WxHxC)\n x = Multiply()([shortcut, x])\n return x\n \ndef identity_block(x, n_filters, ratio=16):\n \"\"\" Create a Bottleneck Residual Block with Identity Link\n x : input into the block\n n_filters: number of filters\n ratio : amount of filter reduction during squeeze\n \"\"\"\n # Save input vector (feature maps) for the identity link\n shortcut = x\n \n ## Construct the 1x1, 3x3, 1x1 residual block (fig 3c)\n\n # Dimensionality reduction\n x = Conv2D(n_filters, (1, 1), strides=(1, 1), use_bias=False, kernel_initializer='he_normal')(x)\n x = BatchNormalization()(x)\n x = ReLU()(x)\n\n # Bottleneck layer\n x = Conv2D(n_filters, (3, 3), strides=(1, 1), padding=\"same\", use_bias=False, kernel_initializer='he_normal')(x)\n x = BatchNormalization()(x)\n x = ReLU()(x)\n\n # Dimensionality restoration - increase the number of output filters by 4X\n x = Conv2D(n_filters * 4, (1, 1), strides=(1, 1), use_bias=False, kernel_initializer='he_normal')(x)\n x = BatchNormalization()(x)\n \n # Pass the output through the squeeze and excitation block\n x = squeeze_excite_block(x, ratio)\n \n # Add the identity link (input) to the output of the residual block\n x = Add()([shortcut, x])\n x = ReLU()(x)\n return x\n\ndef projection_block(x, n_filters, strides=(2,2), ratio=16):\n \"\"\" Create Bottleneck Residual Block with Projection Shortcut\n Increase the number of filters by 4X\n x : input into the block\n n_filters: number of filters\n strides : whether entry convolution is strided (i.e., (2, 2) vs (1, 1))\n ratio : amount of filter reduction during squeeze\n \"\"\"\n # Construct the projection shortcut\n # Increase filters by 4X to match shape when added to output of block\n shortcut = Conv2D(4 * n_filters, (1, 1), strides=strides, use_bias=False, kernel_initializer='he_normal')(x)\n shortcut = BatchNormalization()(shortcut)\n\n ## Construct the 1x1, 3x3, 1x1 residual block (fig 3c)\n\n # Dimensionality reduction\n # Feature pooling when strides=(2, 2)\n x = Conv2D(n_filters, (1, 1), strides=strides, use_bias=False, kernel_initializer='he_normal')(x)\n x = BatchNormalization()(x)\n x = ReLU()(x)\n\n # Bottleneck layer\n x = Conv2D(n_filters, (3, 3), strides=(1, 1), padding='same', use_bias=False, kernel_initializer='he_normal')(x)\n x = BatchNormalization()(x)\n x = ReLU()(x)\n\n # Dimensionality restoration - increase the number of filters by 4X\n x = Conv2D(4 * n_filters, (1, 1), strides=(1, 1), use_bias=False, kernel_initializer='he_normal')(x)\n x = BatchNormalization()(x)\n\n # Pass the output through the squeeze and excitation block\n x = squeeze_excite_block(x, ratio)\n\n # Add the projection shortcut link to the output of the residual block\n x = Add()([x, shortcut])\n x = ReLU()(x)\n return x\n\ndef classifier(x, n_classes):\n \"\"\" Create the Classifier Group \n x : input to the classifier\n n_classes : number of output classes\n \"\"\"\n # Pool at the end of all the convolutional residual blocks\n x = GlobalAveragePooling2D()(x)\n\n # Final Dense Outputting Layer for the outputs\n outputs = Dense(n_classes, activation='softmax', kernel_initializer='he_normal')(x)\n return outputs\n\n# Meta-parameter: # Meta-parameter: list of groups: filter size and number of blocks\ngroups = { 50 : [ (64, 3), (128, 4), (256, 6), (512, 3) ],\t\t# SE-ResNet50\n 101: [ (64, 3), (128, 4), (256, 23), (512, 3) ],\t\t# SE-ResNet101\n 152: [ (64, 3), (128, 8), (256, 36), (512, 3) ]\t\t# SE-ResNet152\n }\n\n# Meta-parameter: Amount of filter reduction in squeeze operation\nratio = 16\n \n# The input tensor\ninputs = Input(shape=(224, 224, 3))\n\n# The Stem Group\nx = stem(inputs)\n\n# The Learnet\nx = learner(x, groups[50], ratio)\n\n# The Classifier for 1000 classes\noutputs = classifier(x, 1000)\n\n# Instantiate the Model\nmodel = Model(inputs, outputs)\n" ]
[ [ "numpy.asarray", "tensorflow.keras.datasets.cifar100.load_data", "tensorflow.keras.callbacks.LearningRateScheduler", "tensorflow.keras.datasets.cifar10.load_data", "sklearn.model_selection.train_test_split", "numpy.cos", "tensorflow.keras.optimizers.Adam", "tensorflow.keras.utils.to_categorical" ], [ "tensorflow.keras.layers.ReLU", "tensorflow.keras.Input", "tensorflow.keras.layers.GlobalAveragePooling2D", "tensorflow.keras.layers.Dense", "tensorflow.keras.layers.Conv2D", "tensorflow.keras.Model", "tensorflow.keras.layers.Add", "tensorflow.keras.layers.BatchNormalization", "tensorflow.keras.layers.Multiply", "tensorflow.keras.layers.ZeroPadding2D", "tensorflow.keras.layers.Reshape", "tensorflow.keras.layers.MaxPooling2D" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "2.7", "2.6", "2.4", "2.3", "2.5", "2.2" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.7", "2.6", "2.4", "2.3", "2.5", "2.2" ] } ]
Forest75/Open3D
[ "61b90a6e06a2c209ad2b9f1c57fbd5f21e879dae", "61b90a6e06a2c209ad2b9f1c57fbd5f21e879dae" ]
[ "examples/Python/Basic/working_with_numpy.py", "examples/Python/Basic/icp_registration.py" ]
[ "# Open3D: www.open3d.org\n# The MIT License (MIT)\n# See license file or visit www.open3d.org for details\n\n# examples/Python/Basic/working_with_numpy.py\n\nimport copy\nimport numpy as np\nimport open3d as o3d\n\nif __name__ == \"__main__\":\n\n # generate some neat n times 3 matrix using a variant of sync function\n x = np.linspace(-3, 3, 401)\n mesh_x, mesh_y = np.meshgrid(x, x)\n z = np.sinc((np.power(mesh_x, 2) + np.power(mesh_y, 2)))\n z_norm = (z - z.min()) / (z.max() - z.min())\n xyz = np.zeros((np.size(mesh_x), 3))\n xyz[:, 0] = np.reshape(mesh_x, -1)\n xyz[:, 1] = np.reshape(mesh_y, -1)\n xyz[:, 2] = np.reshape(z_norm, -1)\n print('xyz')\n print(xyz)\n\n # Pass xyz to Open3D.o3d.geometry.PointCloud and visualize\n pcd = o3d.geometry.PointCloud()\n pcd.points = o3d.utility.Vector3dVector(xyz)\n o3d.io.write_point_cloud(\"../../TestData/sync.ply\", pcd)\n\n # Load saved point cloud and visualize it\n pcd_load = o3d.io.read_point_cloud(\"../../TestData/sync.ply\")\n o3d.visualization.draw_geometries([pcd_load])\n\n # convert Open3D.o3d.geometry.PointCloud to numpy array\n xyz_load = np.asarray(pcd_load.points)\n print('xyz_load')\n print(xyz_load)\n\n # save z_norm as an image (change [0,1] range to [0,255] range with uint8 type)\n img = o3d.geometry.Image((z_norm * 255).astype(np.uint8))\n o3d.io.write_image(\"../../TestData/sync.png\", img)\n o3d.visualization.draw_geometries([img])\n", "# Open3D: www.open3d.org\n# The MIT License (MIT)\n# See license file or visit www.open3d.org for details\n\n# examples/Python/Basic/icp_registration.py\n\nimport open3d as o3d\nimport numpy as np\nimport copy\n\n\ndef draw_registration_result(source, target, transformation):\n source_temp = copy.deepcopy(source)\n target_temp = copy.deepcopy(target)\n source_temp.paint_uniform_color([1, 0.706, 0])\n target_temp.paint_uniform_color([0, 0.651, 0.929])\n source_temp.transform(transformation)\n o3d.visualization.draw_geometries([source_temp, target_temp])\n\n\nif __name__ == \"__main__\":\n source = o3d.io.read_point_cloud(\"../../TestData/ICP/cloud_bin_0.pcd\")\n target = o3d.io.read_point_cloud(\"../../TestData/ICP/cloud_bin_1.pcd\")\n threshold = 0.02\n trans_init = np.asarray([[0.862, 0.011, -0.507, 0.5],\n [-0.139, 0.967, -0.215, 0.7],\n [0.487, 0.255, 0.835, -1.4], [0.0, 0.0, 0.0, 1.0]])\n draw_registration_result(source, target, trans_init)\n print(\"Initial alignment\")\n evaluation = o3d.registration.evaluate_registration(source, target,\n threshold, trans_init)\n print(evaluation)\n\n print(\"Apply point-to-point ICP\")\n reg_p2p = o3d.registration.registration_icp(\n source, target, threshold, trans_init,\n o3d.registration.TransformationEstimationPointToPoint())\n print(reg_p2p)\n print(\"Transformation is:\")\n print(reg_p2p.transformation)\n print(\"\")\n draw_registration_result(source, target, reg_p2p.transformation)\n\n print(\"Apply point-to-plane ICP\")\n reg_p2l = o3d.registration.registration_icp(\n source, target, threshold, trans_init,\n o3d.registration.TransformationEstimationPointToPlane())\n print(reg_p2l)\n print(\"Transformation is:\")\n print(reg_p2l.transformation)\n print(\"\")\n draw_registration_result(source, target, reg_p2l.transformation)\n" ]
[ [ "numpy.linspace", "numpy.power", "numpy.reshape", "numpy.asarray", "numpy.size", "numpy.meshgrid" ], [ "numpy.asarray" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
hellpoethero/FootballPython
[ "afb67446f01348f723c29bc753fb0366e341bfb7" ]
[ "ReadFBRefData.py" ]
[ "import pandas as pd\n\n\ndef read_csv(filename):\n df = pd.read_csv(filename, header=[0, 1])\n df.columns = rename_columns(df)\n return df\n\n\ndef rename_columns(df):\n new_cols = []\n for col in df.columns:\n if \"Unnamed:\" in col[0]:\n new_cols.append(col[1])\n else:\n new_cols.append(col[0] + \" \" + col[1])\n return new_cols\n" ]
[ [ "pandas.read_csv" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
Shreyaansh-darkside/arviz
[ "b2c4687ddd4fdfe21c259b7d6c64269050596d41", "b2c4687ddd4fdfe21c259b7d6c64269050596d41", "b2c4687ddd4fdfe21c259b7d6c64269050596d41", "b2c4687ddd4fdfe21c259b7d6c64269050596d41" ]
[ "arviz/stats/stats.py", "arviz/plots/backends/matplotlib/jointplot.py", "arviz/plots/backends/matplotlib/ppcplot.py", "arviz/data/inference_data.py" ]
[ "# pylint: disable=too-many-lines\n\"\"\"Statistical functions in ArviZ.\"\"\"\nimport warnings\nimport logging\nfrom collections import OrderedDict\nfrom copy import deepcopy\nfrom typing import Optional, List, Union\n\nimport numpy as np\nimport pandas as pd\nimport scipy.stats as st\nfrom scipy.optimize import minimize\nimport xarray as xr\n\nfrom ..data import convert_to_inference_data, convert_to_dataset, InferenceData, CoordSpec, DimSpec\nfrom .diagnostics import _multichain_statistics, _mc_error, ess\nfrom .stats_utils import (\n make_ufunc as _make_ufunc,\n wrap_xarray_ufunc as _wrap_xarray_ufunc,\n logsumexp as _logsumexp,\n ELPDData,\n stats_variance_2d as svar,\n _circular_standard_deviation,\n get_log_likelihood as _get_log_likelihood,\n)\nfrom ..numeric_utils import _fast_kde, histogram, get_bins\nfrom ..utils import _var_names, Numba, _numba_var, get_coords, credible_interval_warning\nfrom ..rcparams import rcParams\n\n_log = logging.getLogger(__name__)\n\n__all__ = [\n \"apply_test_function\",\n \"compare\",\n \"hdi\",\n \"hpd\",\n \"loo\",\n \"loo_pit\",\n \"psislw\",\n \"r2_score\",\n \"summary\",\n \"waic\",\n]\n\n\ndef compare(\n dataset_dict, ic=None, method=\"BB-pseudo-BMA\", b_samples=1000, alpha=1, seed=None, scale=None\n):\n r\"\"\"Compare models based on PSIS-LOO `loo` or WAIC `waic` cross-validation.\n\n LOO is leave-one-out (PSIS-LOO `loo`) cross-validation and\n WAIC is the widely applicable information criterion.\n Read more theory here - in a paper by some of the leading authorities\n on model selection dx.doi.org/10.1111/1467-9868.00353\n\n Parameters\n ----------\n dataset_dict: dict[str] -> InferenceData\n A dictionary of model names and InferenceData objects\n ic: str\n Information Criterion (PSIS-LOO `loo` or WAIC `waic`) used to compare models. Defaults to\n ``rcParams[\"stats.information_criterion\"]``.\n method: str\n Method used to estimate the weights for each model. Available options are:\n\n - 'stacking' : stacking of predictive distributions.\n - 'BB-pseudo-BMA' : (default) pseudo-Bayesian Model averaging using Akaike-type\n weighting. The weights are stabilized using the Bayesian bootstrap.\n - 'pseudo-BMA': pseudo-Bayesian Model averaging using Akaike-type\n weighting, without Bootstrap stabilization (not recommended).\n\n For more information read https://arxiv.org/abs/1704.02030\n b_samples: int\n Number of samples taken by the Bayesian bootstrap estimation.\n Only useful when method = 'BB-pseudo-BMA'.\n alpha: float\n The shape parameter in the Dirichlet distribution used for the Bayesian bootstrap. Only\n useful when method = 'BB-pseudo-BMA'. When alpha=1 (default), the distribution is uniform\n on the simplex. A smaller alpha will keeps the final weights more away from 0 and 1.\n seed: int or np.random.RandomState instance\n If int or RandomState, use it for seeding Bayesian bootstrap. Only\n useful when method = 'BB-pseudo-BMA'. Default None the global\n np.random state is used.\n scale: str\n Output scale for IC. Available options are:\n\n - `log` : (default) log-score (after Vehtari et al. (2017))\n - `negative_log` : -1 * (log-score)\n - `deviance` : -2 * (log-score)\n\n A higher log-score (or a lower deviance) indicates a model with better predictive\n accuracy.\n\n Returns\n -------\n A DataFrame, ordered from best to worst model (measured by information criteria).\n The index reflects the key with which the models are passed to this function. The columns are:\n rank: The rank-order of the models. 0 is the best.\n IC: Information Criteria (PSIS-LOO `loo` or WAIC `waic`).\n Higher IC indicates higher out-of-sample predictive fit (\"better\" model). Default LOO.\n If `scale` is `deviance` or `negative_log` smaller IC indicates\n higher out-of-sample predictive fit (\"better\" model).\n pIC: Estimated effective number of parameters.\n dIC: Relative difference between each IC (PSIS-LOO `loo` or WAIC `waic`)\n and the lowest IC (PSIS-LOO `loo` or WAIC `waic`).\n The top-ranked model is always 0.\n weight: Relative weight for each model.\n This can be loosely interpreted as the probability of each model (among the compared model)\n given the data. By default the uncertainty in the weights estimation is considered using\n Bayesian bootstrap.\n SE: Standard error of the IC estimate.\n If method = BB-pseudo-BMA these values are estimated using Bayesian bootstrap.\n dSE: Standard error of the difference in IC between each model and the top-ranked model.\n It's always 0 for the top-ranked model.\n warning: A value of 1 indicates that the computation of the IC may not be reliable.\n This could be indication of WAIC/LOO starting to fail see\n http://arxiv.org/abs/1507.04544 for details.\n scale: Scale used for the IC.\n\n Examples\n --------\n Compare the centered and non centered models of the eight school problem:\n\n .. ipython::\n\n In [1]: import arviz as az\n ...: data1 = az.load_arviz_data(\"non_centered_eight\")\n ...: data2 = az.load_arviz_data(\"centered_eight\")\n ...: compare_dict = {\"non centered\": data1, \"centered\": data2}\n ...: az.compare(compare_dict)\n\n Compare the models using LOO-CV, returning the IC in log scale and calculating the\n weights using the stacking method.\n\n .. ipython::\n\n In [1]: az.compare(compare_dict, ic=\"loo\", method=\"stacking\", scale=\"log\")\n\n See Also\n --------\n loo : Compute the Pareto Smoothed importance sampling Leave One Out cross-validation.\n waic : Compute the widely applicable information criterion.\n\n \"\"\"\n names = list(dataset_dict.keys())\n scale = rcParams[\"stats.ic_scale\"] if scale is None else scale.lower()\n if scale == \"log\":\n scale_value = 1\n ascending = False\n warnings.warn(\n \"\\nThe scale is now log by default. Use 'scale' argument or \"\n \"'stats.ic_scale' rcParam if you rely on a specific value.\\nA higher \"\n \"log-score (or a lower deviance) indicates a model with better predictive \"\n \"accuracy.\"\n )\n else:\n if scale == \"negative_log\":\n scale_value = -1\n else:\n scale_value = -2\n ascending = True\n\n ic = rcParams[\"stats.information_criterion\"] if ic is None else ic.lower()\n if ic == \"loo\":\n ic_func = loo\n df_comp = pd.DataFrame(\n index=names,\n columns=[\n \"rank\",\n \"loo\",\n \"p_loo\",\n \"d_loo\",\n \"weight\",\n \"se\",\n \"dse\",\n \"warning\",\n \"loo_scale\",\n ],\n )\n scale_col = \"loo_scale\"\n elif ic == \"waic\":\n ic_func = waic\n df_comp = pd.DataFrame(\n index=names,\n columns=[\n \"rank\",\n \"waic\",\n \"p_waic\",\n \"d_waic\",\n \"weight\",\n \"se\",\n \"dse\",\n \"warning\",\n \"waic_scale\",\n ],\n )\n scale_col = \"waic_scale\"\n else:\n raise NotImplementedError(\"The information criterion {} is not supported.\".format(ic))\n\n if method.lower() not in [\"stacking\", \"bb-pseudo-bma\", \"pseudo-bma\"]:\n raise ValueError(\"The method {}, to compute weights, is not supported.\".format(method))\n\n ic_se = \"{}_se\".format(ic)\n p_ic = \"p_{}\".format(ic)\n ic_i = \"{}_i\".format(ic)\n\n ics = pd.DataFrame()\n names = []\n for name, dataset in dataset_dict.items():\n names.append(name)\n ics = ics.append([ic_func(dataset, pointwise=True, scale=scale)])\n ics.index = names\n ics.sort_values(by=ic, inplace=True, ascending=ascending)\n ics[ic_i] = ics[ic_i].apply(lambda x: x.values.flatten())\n\n if method.lower() == \"stacking\":\n rows, cols, ic_i_val = _ic_matrix(ics, ic_i)\n exp_ic_i = np.exp(ic_i_val / scale_value)\n last_col = cols - 1\n\n def w_fuller(weights):\n return np.concatenate((weights, [max(1.0 - np.sum(weights), 0.0)]))\n\n def log_score(weights):\n w_full = w_fuller(weights)\n score = 0.0\n for i in range(rows):\n score += np.log(np.dot(exp_ic_i[i], w_full))\n return -score\n\n def gradient(weights):\n w_full = w_fuller(weights)\n grad = np.zeros(last_col)\n for k in range(last_col - 1):\n for i in range(rows):\n grad[k] += (exp_ic_i[i, k] - exp_ic_i[i, last_col]) / np.dot(\n exp_ic_i[i], w_full\n )\n return -grad\n\n theta = np.full(last_col, 1.0 / cols)\n bounds = [(0.0, 1.0) for _ in range(last_col)]\n constraints = [\n {\"type\": \"ineq\", \"fun\": lambda x: 1.0 - np.sum(x)},\n {\"type\": \"ineq\", \"fun\": np.sum},\n ]\n\n weights = minimize(\n fun=log_score, x0=theta, jac=gradient, bounds=bounds, constraints=constraints\n )\n\n weights = w_fuller(weights[\"x\"])\n ses = ics[ic_se]\n\n elif method.lower() == \"bb-pseudo-bma\":\n rows, cols, ic_i_val = _ic_matrix(ics, ic_i)\n ic_i_val = ic_i_val * rows\n\n b_weighting = st.dirichlet.rvs(alpha=[alpha] * rows, size=b_samples, random_state=seed)\n weights = np.zeros((b_samples, cols))\n z_bs = np.zeros_like(weights)\n for i in range(b_samples):\n z_b = np.dot(b_weighting[i], ic_i_val)\n u_weights = np.exp((z_b - np.min(z_b)) / scale_value)\n z_bs[i] = z_b # pylint: disable=unsupported-assignment-operation\n weights[i] = u_weights / np.sum(u_weights)\n\n weights = weights.mean(axis=0)\n ses = pd.Series(z_bs.std(axis=0), index=names) # pylint: disable=no-member\n\n elif method.lower() == \"pseudo-bma\":\n min_ic = ics.iloc[0][ic]\n z_rv = np.exp((ics[ic] - min_ic) / scale_value)\n weights = z_rv / np.sum(z_rv)\n ses = ics[ic_se]\n\n if np.any(weights):\n min_ic_i_val = ics[ic_i].iloc[0]\n for idx, val in enumerate(ics.index):\n res = ics.loc[val]\n if scale_value < 0:\n diff = res[ic_i] - min_ic_i_val\n else:\n diff = min_ic_i_val - res[ic_i]\n d_ic = np.sum(diff)\n d_std_err = np.sqrt(len(diff) * np.var(diff))\n std_err = ses.loc[val]\n weight = weights[idx]\n df_comp.at[val] = (\n idx,\n res[ic],\n res[p_ic],\n d_ic,\n weight,\n std_err,\n d_std_err,\n res[\"warning\"],\n res[scale_col],\n )\n\n return df_comp.sort_values(by=ic, ascending=ascending)\n\n\ndef _ic_matrix(ics, ic_i):\n \"\"\"Store the previously computed pointwise predictive accuracy values (ics) in a 2D matrix.\"\"\"\n cols, _ = ics.shape\n rows = len(ics[ic_i].iloc[0])\n ic_i_val = np.zeros((rows, cols))\n\n for idx, val in enumerate(ics.index):\n ic = ics.loc[val][ic_i]\n\n if len(ic) != rows:\n raise ValueError(\"The number of observations should be the same across all models\")\n\n ic_i_val[:, idx] = ic\n\n return rows, cols, ic_i_val\n\n\ndef hpd(\n # pylint: disable=unused-argument\n ary,\n hdi_prob=None,\n circular=False,\n multimodal=False,\n skipna=False,\n group=\"posterior\",\n var_names=None,\n filter_vars=None,\n coords=None,\n max_modes=10,\n **kwargs,\n):\n \"\"\"Pending deprecation. Please refer to :func:`~arviz.hdi`.\"\"\"\n # pylint: enable=unused-argument\n warnings.warn((\"hpd will be deprecated \" \"Please replace hdi\"),)\n return hdi(\n ary,\n hdi_prob,\n circular,\n multimodal,\n skipna,\n group,\n var_names,\n filter_vars,\n coords,\n max_modes,\n **kwargs,\n )\n\n\ndef hdi(\n ary,\n hdi_prob=None,\n circular=False,\n multimodal=False,\n skipna=False,\n group=\"posterior\",\n var_names=None,\n filter_vars=None,\n coords=None,\n max_modes=10,\n **kwargs,\n):\n \"\"\"\n Calculate highest density interval (HDI) of array for given probability.\n\n The HDI is the minimum width Bayesian credible interval (BCI).\n\n Parameters\n ----------\n ary: obj\n object containing posterior samples.\n Any object that can be converted to an az.InferenceData object.\n Refer to documentation of az.convert_to_dataset for details.\n hdi_prob: float, optional\n HDI prob for which interval will be computed. Defaults to ``stats.hdi_prob`` rcParam.\n circular: bool, optional\n Whether to compute the hdi taking into account `x` is a circular variable\n (in the range [-np.pi, np.pi]) or not. Defaults to False (i.e non-circular variables).\n Only works if multimodal is False.\n multimodal: bool, optional\n If true it may compute more than one hdi interval if the distribution is multimodal and the\n modes are well separated.\n skipna: bool, optional\n If true ignores nan values when computing the hdi interval. Defaults to false.\n group: str, optional\n Specifies which InferenceData group should be used to calculate hdi.\n Defaults to 'posterior'\n var_names: list, optional\n Names of variables to include in the hdi report. Prefix the variables by `~`\n when you want to exclude them from the report: `[\"~beta\"]` instead of `[\"beta\"]`\n (see `az.summary` for more details).\n filter_vars: {None, \"like\", \"regex\"}, optional, default=None\n If `None` (default), interpret var_names as the real variables names. If \"like\",\n interpret var_names as substrings of the real variables names. If \"regex\",\n interpret var_names as regular expressions on the real variables names. A la\n `pandas.filter`.\n coords: mapping, optional\n Specifies the subset over to calculate hdi.\n max_modes: int, optional\n Specifies the maximum number of modes for multimodal case.\n kwargs: dict, optional\n Additional keywords passed to :func:`~arviz.wrap_xarray_ufunc`.\n\n Returns\n -------\n np.ndarray or xarray.Dataset, depending upon input\n lower(s) and upper(s) values of the interval(s).\n\n See Also\n --------\n plot_hdi : Plot HDI intervals for regression data.\n xarray.Dataset.quantile : Calculate quantiles of array for given probabilities.\n\n Examples\n --------\n Calculate the HDI of a Normal random variable:\n\n .. ipython::\n\n In [1]: import arviz as az\n ...: import numpy as np\n ...: data = np.random.normal(size=2000)\n ...: az.hdi(data, hdi_prob=.68)\n\n Calculate the HDI of a dataset:\n\n .. ipython::\n\n In [1]: import arviz as az\n ...: data = az.load_arviz_data('centered_eight')\n ...: az.hdi(data)\n\n We can also calculate the HDI of some of the variables of dataset:\n\n .. ipython::\n\n In [1]: az.hdi(data, var_names=[\"mu\", \"theta\"])\n\n If we want to calculate the HDI over specified dimension of dataset,\n we can pass `input_core_dims` by kwargs:\n\n .. ipython::\n\n In [1]: az.hdi(data, input_core_dims = [[\"chain\"]])\n\n We can also calculate the hdi over a particular selection over all groups:\n\n .. ipython::\n\n In [1]: az.hdi(data, coords={\"chain\":[0, 1, 3]}, input_core_dims = [[\"draw\"]])\n\n \"\"\"\n if hdi_prob is None:\n hdi_prob = rcParams[\"stats.hdi_prob\"]\n else:\n if not 1 >= hdi_prob > 0:\n raise ValueError(\"The value of hdi_prob should be in the interval (0, 1]\")\n\n func_kwargs = {\n \"hdi_prob\": hdi_prob,\n \"skipna\": skipna,\n \"out_shape\": (max_modes, 2) if multimodal else (2,),\n }\n kwargs.setdefault(\"output_core_dims\", [[\"hdi\", \"mode\"] if multimodal else [\"hdi\"]])\n if not multimodal:\n func_kwargs[\"circular\"] = circular\n else:\n func_kwargs[\"max_modes\"] = max_modes\n\n func = _hdi_multimodal if multimodal else _hdi\n\n isarray = isinstance(ary, np.ndarray)\n if isarray and ary.ndim <= 1:\n func_kwargs.pop(\"out_shape\")\n hdi_data = func(ary, **func_kwargs) # pylint: disable=unexpected-keyword-arg\n return hdi_data[~np.isnan(hdi_data).all(axis=1), :] if multimodal else hdi_data\n\n if isarray and ary.ndim == 2:\n warnings.warn(\n \"hdi currently interprets 2d data as (draw, shape) but this will change in \"\n \"a future release to (chain, draw) for coherence with other functions\",\n FutureWarning,\n )\n ary = np.expand_dims(ary, 0)\n\n ary = convert_to_dataset(ary, group=group)\n if coords is not None:\n ary = get_coords(ary, coords)\n var_names = _var_names(var_names, ary, filter_vars)\n ary = ary[var_names] if var_names else ary\n\n hdi_coord = xr.DataArray([\"lower\", \"higher\"], dims=[\"hdi\"], attrs=dict(hdi_prob=hdi_prob))\n hdi_data = _wrap_xarray_ufunc(func, ary, func_kwargs=func_kwargs, **kwargs).assign_coords(\n {\"hdi\": hdi_coord}\n )\n hdi_data = hdi_data.dropna(\"mode\", how=\"all\") if multimodal else hdi_data\n return hdi_data.x.values if isarray else hdi_data\n\n\ndef _hdi(ary, hdi_prob, circular, skipna):\n \"\"\"Compute hpi over the flattened array.\"\"\"\n ary = ary.flatten()\n if skipna:\n nans = np.isnan(ary)\n if not nans.all():\n ary = ary[~nans]\n n = len(ary)\n\n if circular:\n mean = st.circmean(ary, high=np.pi, low=-np.pi)\n ary = ary - mean\n ary = np.arctan2(np.sin(ary), np.cos(ary))\n\n ary = np.sort(ary)\n interval_idx_inc = int(np.floor(hdi_prob * n))\n n_intervals = n - interval_idx_inc\n interval_width = ary[interval_idx_inc:] - ary[:n_intervals]\n\n if len(interval_width) == 0:\n raise ValueError(\"Too few elements for interval calculation. \")\n\n min_idx = np.argmin(interval_width)\n hdi_min = ary[min_idx]\n hdi_max = ary[min_idx + interval_idx_inc]\n\n if circular:\n hdi_min = hdi_min + mean\n hdi_max = hdi_max + mean\n hdi_min = np.arctan2(np.sin(hdi_min), np.cos(hdi_min))\n hdi_max = np.arctan2(np.sin(hdi_max), np.cos(hdi_max))\n\n hdi_interval = np.array([hdi_min, hdi_max])\n\n return hdi_interval\n\n\ndef _hdi_multimodal(ary, hdi_prob, skipna, max_modes):\n \"\"\"Compute HDI if the distribution is multimodal.\"\"\"\n ary = ary.flatten()\n if skipna:\n ary = ary[~np.isnan(ary)]\n\n if ary.dtype.kind == \"f\":\n density, lower, upper = _fast_kde(ary)\n range_x = upper - lower\n dx = range_x / len(density)\n bins = np.linspace(lower, upper, len(density))\n else:\n bins = get_bins(ary)\n _, density, _ = histogram(ary, bins=bins)\n dx = np.diff(bins)[0]\n\n density *= dx\n\n idx = np.argsort(-density)\n intervals = bins[idx][density[idx].cumsum() <= hdi_prob]\n intervals.sort()\n\n intervals_splitted = np.split(intervals, np.where(np.diff(intervals) >= dx * 1.1)[0] + 1)\n\n hdi_intervals = np.full((max_modes, 2), np.nan)\n for i, interval in enumerate(intervals_splitted):\n if i == max_modes:\n warnings.warn(\n \"found more modes than {0}, returning only the first {0} modes\", max_modes\n )\n break\n if interval.size == 0:\n hdi_intervals[i] = np.asarray([bins[0], bins[0]])\n else:\n hdi_intervals[i] = np.asarray([interval[0], interval[-1]])\n\n return np.array(hdi_intervals)\n\n\ndef loo(data, pointwise=None, var_name=None, reff=None, scale=None):\n \"\"\"Compute Pareto-smoothed importance sampling leave-one-out cross-validation (PSIS-LOO-CV).\n\n Estimates the expected log pointwise predictive density (elpd) using Pareto-smoothed\n importance sampling leave-one-out cross-validation (PSIS-LOO-CV). Also calculates LOO's\n standard error and the effective number of parameters. Read more theory here\n https://arxiv.org/abs/1507.04544 and here https://arxiv.org/abs/1507.02646\n\n Parameters\n ----------\n data: obj\n Any object that can be converted to an az.InferenceData object. Refer to documentation of\n az.convert_to_inference_data for details\n pointwise: bool, optional\n If True the pointwise predictive accuracy will be returned. Defaults to\n ``stats.ic_pointwise`` rcParam.\n var_name : str, optional\n The name of the variable in log_likelihood groups storing the pointwise log\n likelihood data to use for loo computation.\n reff: float, optional\n Relative MCMC efficiency, `ess / n` i.e. number of effective samples divided by the number\n of actual samples. Computed from trace by default.\n scale: str\n Output scale for loo. Available options are:\n\n - `log` : (default) log-score\n - `negative_log` : -1 * log-score\n - `deviance` : -2 * log-score\n\n A higher log-score (or a lower deviance or negative log_score) indicates a model with\n better predictive accuracy.\n\n Returns\n -------\n ELPDData object (inherits from panda.Series) with the following row/attributes:\n loo: approximated expected log pointwise predictive density (elpd)\n loo_se: standard error of loo\n p_loo: effective number of parameters\n shape_warn: bool\n True if the estimated shape parameter of\n Pareto distribution is greater than 0.7 for one or more samples\n loo_i: array of pointwise predictive accuracy, only if pointwise True\n pareto_k: array of Pareto shape values, only if pointwise True\n loo_scale: scale of the loo results\n\n The returned object has a custom print method that overrides pd.Series method.\n\n Examples\n --------\n Calculate LOO of a model:\n\n .. ipython::\n\n In [1]: import arviz as az\n ...: data = az.load_arviz_data(\"centered_eight\")\n ...: az.loo(data)\n\n Calculate LOO of a model and return the pointwise values:\n\n .. ipython::\n\n In [2]: data_loo = az.loo(data, pointwise=True)\n ...: data_loo.loo_i\n \"\"\"\n inference_data = convert_to_inference_data(data)\n log_likelihood = _get_log_likelihood(inference_data, var_name=var_name)\n pointwise = rcParams[\"stats.ic_pointwise\"] if pointwise is None else pointwise\n\n log_likelihood = log_likelihood.stack(sample=(\"chain\", \"draw\"))\n shape = log_likelihood.shape\n n_samples = shape[-1]\n n_data_points = np.product(shape[:-1])\n scale = rcParams[\"stats.ic_scale\"] if scale is None else scale.lower()\n\n if scale == \"deviance\":\n scale_value = -2\n elif scale == \"log\":\n scale_value = 1\n elif scale == \"negative_log\":\n scale_value = -1\n else:\n raise TypeError('Valid scale values are \"deviance\", \"log\", \"negative_log\"')\n\n if reff is None:\n if not hasattr(inference_data, \"posterior\"):\n raise TypeError(\"Must be able to extract a posterior group from data.\")\n posterior = inference_data.posterior\n n_chains = len(posterior.chain)\n if n_chains == 1:\n reff = 1.0\n else:\n ess_p = ess(posterior, method=\"mean\")\n # this mean is over all data variables\n reff = (\n np.hstack([ess_p[v].values.flatten() for v in ess_p.data_vars]).mean() / n_samples\n )\n\n log_weights, pareto_shape = psislw(-log_likelihood, reff)\n log_weights += log_likelihood\n\n warn_mg = False\n if np.any(pareto_shape > 0.7):\n warnings.warn(\n \"Estimated shape parameter of Pareto distribution is greater than 0.7 for \"\n \"one or more samples. You should consider using a more robust model, this is because \"\n \"importance sampling is less likely to work well if the marginal posterior and \"\n \"LOO posterior are very different. This is more likely to happen with a non-robust \"\n \"model and highly influential observations.\"\n )\n warn_mg = True\n\n ufunc_kwargs = {\"n_dims\": 1, \"ravel\": False}\n kwargs = {\"input_core_dims\": [[\"sample\"]]}\n loo_lppd_i = scale_value * _wrap_xarray_ufunc(\n _logsumexp, log_weights, ufunc_kwargs=ufunc_kwargs, **kwargs\n )\n loo_lppd = loo_lppd_i.values.sum()\n loo_lppd_se = (n_data_points * np.var(loo_lppd_i.values)) ** 0.5\n\n lppd = np.sum(\n _wrap_xarray_ufunc(\n _logsumexp,\n log_likelihood,\n func_kwargs={\"b_inv\": n_samples},\n ufunc_kwargs=ufunc_kwargs,\n **kwargs,\n ).values\n )\n p_loo = lppd - loo_lppd / scale_value\n\n if pointwise:\n if np.equal(loo_lppd, loo_lppd_i).all(): # pylint: disable=no-member\n warnings.warn(\n \"The point-wise LOO is the same with the sum LOO, please double check \"\n \"the Observed RV in your model to make sure it returns element-wise logp.\"\n )\n return ELPDData(\n data=[\n loo_lppd,\n loo_lppd_se,\n p_loo,\n n_samples,\n n_data_points,\n warn_mg,\n loo_lppd_i.rename(\"loo_i\"),\n pareto_shape,\n scale,\n ],\n index=[\n \"loo\",\n \"loo_se\",\n \"p_loo\",\n \"n_samples\",\n \"n_data_points\",\n \"warning\",\n \"loo_i\",\n \"pareto_k\",\n \"loo_scale\",\n ],\n )\n\n else:\n return ELPDData(\n data=[loo_lppd, loo_lppd_se, p_loo, n_samples, n_data_points, warn_mg, scale],\n index=[\"loo\", \"loo_se\", \"p_loo\", \"n_samples\", \"n_data_points\", \"warning\", \"loo_scale\"],\n )\n\n\ndef psislw(log_weights, reff=1.0):\n \"\"\"\n Pareto smoothed importance sampling (PSIS).\n\n Parameters\n ----------\n log_weights: array\n Array of size (n_observations, n_samples)\n reff: float\n relative MCMC efficiency, `ess / n`\n\n Returns\n -------\n lw_out: array\n Smoothed log weights\n kss: array\n Pareto tail indices\n\n References\n ----------\n * Vehtari et al. (2015) see https://arxiv.org/abs/1507.02646\n\n Examples\n --------\n Get Pareto smoothed importance sampling (PSIS) log weights:\n\n .. ipython::\n\n In [1]: import arviz as az\n ...: data = az.load_arviz_data(\"centered_eight\")\n ...: log_likelihood = data.sample_stats.log_likelihood.stack(sample=(\"chain\", \"draw\"))\n ...: az.psislw(-log_likelihood, reff=0.8)\n\n \"\"\"\n if hasattr(log_weights, \"sample\"):\n n_samples = len(log_weights.sample)\n shape = [size for size, dim in zip(log_weights.shape, log_weights.dims) if dim != \"sample\"]\n else:\n n_samples = log_weights.shape[-1]\n shape = log_weights.shape[:-1]\n # precalculate constants\n cutoff_ind = -int(np.ceil(min(n_samples / 5.0, 3 * (n_samples / reff) ** 0.5))) - 1\n cutoffmin = np.log(np.finfo(float).tiny) # pylint: disable=no-member, assignment-from-no-return\n k_min = 1.0 / 3\n\n # create output array with proper dimensions\n out = tuple([np.empty_like(log_weights), np.empty(shape)])\n\n # define kwargs\n func_kwargs = {\"cutoff_ind\": cutoff_ind, \"cutoffmin\": cutoffmin, \"k_min\": k_min, \"out\": out}\n ufunc_kwargs = {\"n_dims\": 1, \"n_output\": 2, \"ravel\": False, \"check_shape\": False}\n kwargs = {\"input_core_dims\": [[\"sample\"]], \"output_core_dims\": [[\"sample\"], []]}\n log_weights, pareto_shape = _wrap_xarray_ufunc(\n _psislw, log_weights, ufunc_kwargs=ufunc_kwargs, func_kwargs=func_kwargs, **kwargs\n )\n if isinstance(log_weights, xr.DataArray):\n log_weights = log_weights.rename(\"log_weights\").rename(sample=\"sample\")\n if isinstance(pareto_shape, xr.DataArray):\n pareto_shape = pareto_shape.rename(\"pareto_shape\")\n return log_weights, pareto_shape\n\n\ndef _psislw(log_weights, cutoff_ind, cutoffmin, k_min=1.0 / 3):\n \"\"\"\n Pareto smoothed importance sampling (PSIS) for a 1D vector.\n\n Parameters\n ----------\n log_weights: array\n Array of length n_observations\n cutoff_ind: int\n cutoffmin: float\n k_min: float\n\n Returns\n -------\n lw_out: array\n Smoothed log weights\n kss: float\n Pareto tail index\n \"\"\"\n x = np.asarray(log_weights)\n\n # improve numerical accuracy\n x -= np.max(x)\n # sort the array\n x_sort_ind = np.argsort(x)\n # divide log weights into body and right tail\n xcutoff = max(x[x_sort_ind[cutoff_ind]], cutoffmin)\n\n expxcutoff = np.exp(xcutoff)\n (tailinds,) = np.where(x > xcutoff) # pylint: disable=unbalanced-tuple-unpacking\n x_tail = x[tailinds]\n tail_len = len(x_tail)\n if tail_len <= 4:\n # not enough tail samples for gpdfit\n k = np.inf\n else:\n # order of tail samples\n x_tail_si = np.argsort(x_tail)\n # fit generalized Pareto distribution to the right tail samples\n x_tail = np.exp(x_tail) - expxcutoff\n k, sigma = _gpdfit(x_tail[x_tail_si])\n\n if k >= k_min:\n # no smoothing if short tail or GPD fit failed\n # compute ordered statistic for the fit\n sti = np.arange(0.5, tail_len) / tail_len\n smoothed_tail = _gpinv(sti, k, sigma)\n smoothed_tail = np.log( # pylint: disable=assignment-from-no-return\n smoothed_tail + expxcutoff\n )\n # place the smoothed tail into the output array\n x[tailinds[x_tail_si]] = smoothed_tail\n # truncate smoothed values to the largest raw weight 0\n x[x > 0] = 0\n # renormalize weights\n x -= _logsumexp(x)\n\n return x, k\n\n\ndef _gpdfit(ary):\n \"\"\"Estimate the parameters for the Generalized Pareto Distribution (GPD).\n\n Empirical Bayes estimate for the parameters of the generalized Pareto\n distribution given the data.\n\n Parameters\n ----------\n ary: array\n sorted 1D data array\n\n Returns\n -------\n k: float\n estimated shape parameter\n sigma: float\n estimated scale parameter\n \"\"\"\n prior_bs = 3\n prior_k = 10\n n = len(ary)\n m_est = 30 + int(n ** 0.5)\n\n b_ary = 1 - np.sqrt(m_est / (np.arange(1, m_est + 1, dtype=float) - 0.5))\n b_ary /= prior_bs * ary[int(n / 4 + 0.5) - 1]\n b_ary += 1 / ary[-1]\n\n k_ary = np.log1p(-b_ary[:, None] * ary).mean(axis=1) # pylint: disable=no-member\n len_scale = n * (np.log(-(b_ary / k_ary)) - k_ary - 1)\n weights = 1 / np.exp(len_scale - len_scale[:, None]).sum(axis=1)\n\n # remove negligible weights\n real_idxs = weights >= 10 * np.finfo(float).eps\n if not np.all(real_idxs):\n weights = weights[real_idxs]\n b_ary = b_ary[real_idxs]\n # normalise weights\n weights /= weights.sum()\n\n # posterior mean for b\n b_post = np.sum(b_ary * weights)\n # estimate for k\n k_post = np.log1p(-b_post * ary).mean() # pylint: disable=invalid-unary-operand-type,no-member\n # add prior for k_post\n k_post = (n * k_post + prior_k * 0.5) / (n + prior_k)\n sigma = -k_post / b_post\n\n return k_post, sigma\n\n\ndef _gpinv(probs, kappa, sigma):\n \"\"\"Inverse Generalized Pareto distribution function.\"\"\"\n # pylint: disable=unsupported-assignment-operation, invalid-unary-operand-type\n x = np.full_like(probs, np.nan)\n if sigma <= 0:\n return x\n ok = (probs > 0) & (probs < 1)\n if np.all(ok):\n if np.abs(kappa) < np.finfo(float).eps:\n x = -np.log1p(-probs)\n else:\n x = np.expm1(-kappa * np.log1p(-probs)) / kappa\n x *= sigma\n else:\n if np.abs(kappa) < np.finfo(float).eps:\n x[ok] = -np.log1p(-probs[ok])\n else:\n x[ok] = np.expm1(-kappa * np.log1p(-probs[ok])) / kappa\n x *= sigma\n x[probs == 0] = 0\n if kappa >= 0:\n x[probs == 1] = np.inf\n else:\n x[probs == 1] = -sigma / kappa\n return x\n\n\ndef r2_score(y_true, y_pred):\n \"\"\"R² for Bayesian regression models. Only valid for linear models.\n\n Parameters\n ----------\n y_true: array-like of shape = (n_samples) or (n_samples, n_outputs)\n Ground truth (correct) target values.\n y_pred: array-like of shape = (n_samples) or (n_samples, n_outputs)\n Estimated target values.\n\n Returns\n -------\n Pandas Series with the following indices:\n r2: Bayesian R²\n r2_std: standard deviation of the Bayesian R².\n\n Examples\n --------\n Calculate R² for Bayesian regression models :\n\n .. ipython::\n\n In [1]: import arviz as az\n ...: data = az.load_arviz_data('regression1d')\n ...: y_true = data.observed_data[\"y\"].values\n ...: y_pred = data.posterior_predictive.stack(sample=(\"chain\", \"draw\"))[\"y\"].values.T\n ...: az.r2_score(y_true, y_pred)\n\n \"\"\"\n _numba_flag = Numba.numba_flag\n if y_pred.ndim == 1:\n var_y_est = _numba_var(svar, np.var, y_pred)\n var_e = _numba_var(svar, np.var, (y_true - y_pred))\n else:\n var_y_est = _numba_var(svar, np.var, y_pred.mean(0))\n var_e = _numba_var(svar, np.var, (y_true - y_pred), axis=0)\n r_squared = var_y_est / (var_y_est + var_e)\n\n return pd.Series([np.mean(r_squared), np.std(r_squared)], index=[\"r2\", \"r2_std\"])\n\n\ndef summary(\n data,\n var_names: Optional[List[str]] = None,\n filter_vars=None,\n fmt: str = \"wide\",\n kind: str = \"all\",\n round_to=None,\n include_circ=None,\n circ_var_names=None,\n stat_funcs=None,\n extend=True,\n hdi_prob=None,\n order=\"C\",\n index_origin=None,\n skipna=False,\n coords: Optional[CoordSpec] = None,\n dims: Optional[DimSpec] = None,\n credible_interval=None,\n) -> Union[pd.DataFrame, xr.Dataset]:\n \"\"\"Create a data frame with summary statistics.\n\n Parameters\n ----------\n data: obj\n Any object that can be converted to an az.InferenceData object\n Refer to documentation of az.convert_to_dataset for details\n var_names: list\n Names of variables to include in summary. Prefix the variables by `~` when you\n want to exclude them from the summary: `[\"~beta\"]` instead of `[\"beta\"]` (see\n examples below).\n filter_vars: {None, \"like\", \"regex\"}, optional, default=None\n If `None` (default), interpret var_names as the real variables names. If \"like\",\n interpret var_names as substrings of the real variables names. If \"regex\",\n interpret var_names as regular expressions on the real variables names. A la\n `pandas.filter`.\n fmt: {'wide', 'long', 'xarray'}\n Return format is either pandas.DataFrame {'wide', 'long'} or xarray.Dataset {'xarray'}.\n kind: {'all', 'stats', 'diagnostics'}\n Whether to include the `stats`: `mean`, `sd`, `hdi_3%`, `hdi_97%`, or the `diagnostics`:\n `mcse_mean`, `mcse_sd`, `ess_bulk`, `ess_tail`, and `r_hat`. Default to include `all` of\n them.\n round_to: int\n Number of decimals used to round results. Defaults to 2. Use \"none\" to return raw numbers.\n include_circ: boolean\n Whether to include circular statistics\n deprecated: Please see circ_var_names\n circ_var_names: list\n A list of circular variables to compute circular stats for\n stat_funcs: dict\n A list of functions or a dict of functions with function names as keys used to calculate\n statistics. By default, the mean, standard deviation, simulation standard error, and\n highest posterior density intervals are included.\n\n The functions will be given one argument, the samples for a variable as an nD array,\n The functions should be in the style of a ufunc and return a single number. For example,\n `np.mean`, or `scipy.stats.var` would both work.\n extend: boolean\n If True, use the statistics returned by ``stat_funcs`` in addition to, rather than in place\n of, the default statistics. This is only meaningful when ``stat_funcs`` is not None.\n hdi_prob: float, optional\n HDI interval to compute. Defaults to 0.94. This is only meaningful when ``stat_funcs`` is\n None.\n order: {\"C\", \"F\"}\n If fmt is \"wide\", use either C or F unpacking order. Defaults to C.\n index_origin: int\n If fmt is \"wide, select n-based indexing for multivariate parameters.\n Defaults to rcParam data.index.origin, which is 0.\n skipna: bool\n If true ignores nan values when computing the summary statistics, it does not affect the\n behaviour of the functions passed to ``stat_funcs``. Defaults to false.\n coords: Dict[str, List[Any]], optional\n Coordinates specification to be used if the ``fmt`` is ``'xarray'``.\n dims: Dict[str, List[str]], optional\n Dimensions specification for the variables to be used if the ``fmt`` is ``'xarray'``.\n credible_interval: float, optional\n deprecated: Please see hdi_prob\n\n Returns\n -------\n pandas.DataFrame or xarray.Dataset\n Return type dicated by `fmt` argument.\n Return value will contain summary statistics for each variable. Default statistics are:\n `mean`, `sd`, `hdi_3%`, `hdi_97%`, `mcse_mean`, `mcse_sd`, `ess_bulk`, `ess_tail`, and\n `r_hat`.\n `r_hat` is only computed for traces with 2 or more chains.\n\n Examples\n --------\n .. ipython::\n\n In [1]: import arviz as az\n ...: data = az.load_arviz_data(\"centered_eight\")\n ...: az.summary(data, var_names=[\"mu\", \"tau\"])\n\n You can use `filter_vars` to select variables without having to specify all the exact\n names. Use `filter_vars=\"like\"` to select based on partial naming:\n\n .. ipython::\n\n In [1]: az.summary(data, var_names=[\"the\"], filter_vars=\"like\")\n\n Use `filter_vars=\"regex\"` to select based on regular expressions, and prefix the variables\n you want to exclude by `~`. Here, we exclude from the summary all the variables\n starting with the letter t:\n\n .. ipython::\n\n In [1]: az.summary(data, var_names=[\"~^t\"], filter_vars=\"regex\")\n\n Other statistics can be calculated by passing a list of functions\n or a dictionary with key, function pairs.\n\n .. ipython::\n\n In [1]: import numpy as np\n ...: def median_sd(x):\n ...: median = np.percentile(x, 50)\n ...: sd = np.sqrt(np.mean((x-median)**2))\n ...: return sd\n ...:\n ...: func_dict = {\n ...: \"std\": np.std,\n ...: \"median_std\": median_sd,\n ...: \"5%\": lambda x: np.percentile(x, 5),\n ...: \"median\": lambda x: np.percentile(x, 50),\n ...: \"95%\": lambda x: np.percentile(x, 95),\n ...: }\n ...: az.summary(\n ...: data,\n ...: var_names=[\"mu\", \"tau\"],\n ...: stat_funcs=func_dict,\n ...: extend=False\n ...: )\n\n \"\"\"\n if include_circ:\n warnings.warn(\n \"include_circ is deprecated and will be ignored. Use circ_var_names instead\",\n DeprecationWarning,\n )\n\n if credible_interval:\n hdi_prob = credible_interval_warning(hdi_prob, hdi_prob)\n\n extra_args = {} # type: Dict[str, Any]\n if coords is not None:\n extra_args[\"coords\"] = coords\n if dims is not None:\n extra_args[\"dims\"] = dims\n if index_origin is None:\n index_origin = rcParams[\"data.index_origin\"]\n if hdi_prob is None:\n hdi_prob = rcParams[\"stats.hdi_prob\"]\n else:\n if not 1 >= hdi_prob > 0:\n raise ValueError(\"The value of hdi_prob should be in the interval (0, 1]\")\n posterior = convert_to_dataset(data, group=\"posterior\", **extra_args)\n var_names = _var_names(var_names, posterior, filter_vars)\n posterior = posterior if var_names is None else posterior[var_names]\n\n fmt_group = (\"wide\", \"long\", \"xarray\")\n if not isinstance(fmt, str) or (fmt.lower() not in fmt_group):\n raise TypeError(\"Invalid format: '{}'. Formatting options are: {}\".format(fmt, fmt_group))\n\n unpack_order_group = (\"C\", \"F\")\n if not isinstance(order, str) or (order.upper() not in unpack_order_group):\n raise TypeError(\n \"Invalid order: '{}'. Unpacking options are: {}\".format(order, unpack_order_group)\n )\n\n alpha = 1 - hdi_prob\n\n extra_metrics = []\n extra_metric_names = []\n\n if stat_funcs is not None:\n if isinstance(stat_funcs, dict):\n for stat_func_name, stat_func in stat_funcs.items():\n extra_metrics.append(\n xr.apply_ufunc(\n _make_ufunc(stat_func), posterior, input_core_dims=((\"chain\", \"draw\"),)\n )\n )\n extra_metric_names.append(stat_func_name)\n else:\n for stat_func in stat_funcs:\n extra_metrics.append(\n xr.apply_ufunc(\n _make_ufunc(stat_func), posterior, input_core_dims=((\"chain\", \"draw\"),)\n )\n )\n extra_metric_names.append(stat_func.__name__)\n\n if extend and kind in [\"all\", \"stats\"]:\n mean = posterior.mean(dim=(\"chain\", \"draw\"), skipna=skipna)\n\n sd = posterior.std(dim=(\"chain\", \"draw\"), ddof=1, skipna=skipna)\n\n hdi_post = hdi(posterior, hdi_prob=hdi_prob, multimodal=False, skipna=skipna)\n hdi_lower = hdi_post.sel(hdi=\"lower\", drop=True)\n hdi_higher = hdi_post.sel(hdi=\"higher\", drop=True)\n\n if circ_var_names:\n nan_policy = \"omit\" if skipna else \"propagate\"\n circ_mean = xr.apply_ufunc(\n _make_ufunc(st.circmean),\n posterior,\n kwargs=dict(high=np.pi, low=-np.pi, nan_policy=nan_policy),\n input_core_dims=((\"chain\", \"draw\"),),\n )\n _numba_flag = Numba.numba_flag\n func = None\n if _numba_flag:\n func = _circular_standard_deviation\n kwargs_circ_std = dict(high=np.pi, low=-np.pi, skipna=skipna)\n else:\n func = st.circstd\n kwargs_circ_std = dict(high=np.pi, low=-np.pi, nan_policy=nan_policy)\n circ_sd = xr.apply_ufunc(\n _make_ufunc(func),\n posterior,\n kwargs=kwargs_circ_std,\n input_core_dims=((\"chain\", \"draw\"),),\n )\n\n circ_mcse = xr.apply_ufunc(\n _make_ufunc(_mc_error),\n posterior,\n kwargs=dict(circular=True),\n input_core_dims=((\"chain\", \"draw\"),),\n )\n\n circ_hdi = hdi(posterior, hdi_prob=hdi_prob, circular=True, skipna=skipna)\n circ_hdi_lower = circ_hdi.sel(hdi=\"lower\", drop=True)\n circ_hdi_higher = circ_hdi.sel(hdi=\"higher\", drop=True)\n\n if kind in [\"all\", \"diagnostics\"]:\n mcse_mean, mcse_sd, ess_mean, ess_sd, ess_bulk, ess_tail, r_hat = xr.apply_ufunc(\n _make_ufunc(_multichain_statistics, n_output=7, ravel=False),\n posterior,\n input_core_dims=((\"chain\", \"draw\"),),\n output_core_dims=tuple([] for _ in range(7)),\n )\n\n # Combine metrics\n metrics = []\n metric_names = []\n if extend:\n metrics_names_ = (\n \"mean\",\n \"sd\",\n \"hdi_{:g}%\".format(100 * alpha / 2),\n \"hdi_{:g}%\".format(100 * (1 - alpha / 2)),\n \"mcse_mean\",\n \"mcse_sd\",\n \"ess_mean\",\n \"ess_sd\",\n \"ess_bulk\",\n \"ess_tail\",\n \"r_hat\",\n )\n if kind == \"all\":\n metrics_ = (\n mean,\n sd,\n hdi_lower,\n hdi_higher,\n mcse_mean,\n mcse_sd,\n ess_mean,\n ess_sd,\n ess_bulk,\n ess_tail,\n r_hat,\n )\n elif kind == \"stats\":\n metrics_ = (mean, sd, hdi_lower, hdi_higher)\n metrics_names_ = metrics_names_[:4]\n elif kind == \"diagnostics\":\n metrics_ = (mcse_mean, mcse_sd, ess_mean, ess_sd, ess_bulk, ess_tail, r_hat)\n metrics_names_ = metrics_names_[4:]\n metrics.extend(metrics_)\n metric_names.extend(metrics_names_)\n\n if circ_var_names:\n\n if kind != \"diagnostics\":\n for metric, circ_stat in zip(\n # Replace only the first 5 statistics for their circular equivalent\n metrics[:5],\n (circ_mean, circ_sd, circ_hdi_lower, circ_hdi_higher, circ_mcse),\n ):\n for circ_var in circ_var_names:\n metric[circ_var] = circ_stat[circ_var]\n\n metrics.extend(extra_metrics)\n metric_names.extend(extra_metric_names)\n joined = (\n xr.concat(metrics, dim=\"metric\").assign_coords(metric=metric_names).reset_coords(drop=True)\n )\n\n if fmt.lower() == \"wide\":\n dfs = []\n for var_name, values in joined.data_vars.items():\n if len(values.shape[1:]):\n metric = list(values.metric.values)\n data_dict = OrderedDict()\n for idx in np.ndindex(values.shape[1:] if order == \"C\" else values.shape[1:][::-1]):\n if order == \"F\":\n idx = tuple(idx[::-1])\n ser = pd.Series(values[(Ellipsis, *idx)].values, index=metric)\n key_index = \",\".join(map(str, (i + index_origin for i in idx)))\n key = \"{}[{}]\".format(var_name, key_index)\n data_dict[key] = ser\n df = pd.DataFrame.from_dict(data_dict, orient=\"index\")\n df = df.loc[list(data_dict.keys())]\n else:\n df = values.to_dataframe()\n df.index = list(df.index)\n df = df.T\n dfs.append(df)\n summary_df = pd.concat(dfs, sort=False)\n elif fmt.lower() == \"long\":\n df = joined.to_dataframe().reset_index().set_index(\"metric\")\n df.index = list(df.index)\n summary_df = df\n else:\n # format is 'xarray'\n summary_df = joined\n if (round_to is not None) and (round_to not in (\"None\", \"none\")):\n summary_df = summary_df.round(round_to)\n elif round_to not in (\"None\", \"none\") and (fmt.lower() in (\"long\", \"wide\")):\n # Don't round xarray object by default (even with \"none\")\n decimals = {\n col: 3\n if col not in {\"ess_mean\", \"ess_sd\", \"ess_bulk\", \"ess_tail\", \"r_hat\"}\n else 2\n if col == \"r_hat\"\n else 0\n for col in summary_df.columns\n }\n summary_df = summary_df.round(decimals)\n\n return summary_df\n\n\ndef waic(data, pointwise=None, var_name=None, scale=None):\n \"\"\"Compute the widely applicable information criterion.\n\n Estimates the expected log pointwise predictive density (elpd) using WAIC. Also calculates the\n WAIC's standard error and the effective number of parameters.\n Read more theory here https://arxiv.org/abs/1507.04544 and here https://arxiv.org/abs/1004.2316\n\n Parameters\n ----------\n data: obj\n Any object that can be converted to an az.InferenceData object. Refer to documentation of\n ``az.convert_to_inference_data`` for details\n pointwise: bool\n If True the pointwise predictive accuracy will be returned. Defaults to\n ``stats.ic_pointwise`` rcParam.\n var_name : str, optional\n The name of the variable in log_likelihood groups storing the pointwise log\n likelihood data to use for waic computation.\n scale: str\n Output scale for WAIC. Available options are:\n\n - `log` : (default) log-score\n - `negative_log` : -1 * log-score\n - `deviance` : -2 * log-score\n\n A higher log-score (or a lower deviance or negative log_score) indicates a model with\n better predictive accuracy.\n\n Returns\n -------\n ELPDData object (inherits from panda.Series) with the following row/attributes:\n waic: approximated expected log pointwise predictive density (elpd)\n waic_se: standard error of waic\n p_waic: effective number parameters\n var_warn: bool\n True if posterior variance of the log predictive densities exceeds 0.4\n waic_i: xarray.DataArray with the pointwise predictive accuracy, only if pointwise=True\n waic_scale: scale of the reported waic results\n\n The returned object has a custom print method that overrides pd.Series method.\n\n Examples\n --------\n Calculate WAIC of a model:\n\n .. ipython::\n\n In [1]: import arviz as az\n ...: data = az.load_arviz_data(\"centered_eight\")\n ...: az.waic(data)\n\n Calculate WAIC of a model and return the pointwise values:\n\n .. ipython::\n\n In [2]: data_waic = az.waic(data, pointwise=True)\n ...: data_waic.waic_i\n \"\"\"\n inference_data = convert_to_inference_data(data)\n log_likelihood = _get_log_likelihood(inference_data, var_name=var_name)\n scale = rcParams[\"stats.ic_scale\"] if scale is None else scale.lower()\n pointwise = rcParams[\"stats.ic_pointwise\"] if pointwise is None else pointwise\n\n if scale == \"deviance\":\n scale_value = -2\n elif scale == \"log\":\n scale_value = 1\n elif scale == \"negative_log\":\n scale_value = -1\n else:\n raise TypeError('Valid scale values are \"deviance\", \"log\", \"negative_log\"')\n\n log_likelihood = log_likelihood.stack(sample=(\"chain\", \"draw\"))\n shape = log_likelihood.shape\n n_samples = shape[-1]\n n_data_points = np.product(shape[:-1])\n\n ufunc_kwargs = {\"n_dims\": 1, \"ravel\": False}\n kwargs = {\"input_core_dims\": [[\"sample\"]]}\n lppd_i = _wrap_xarray_ufunc(\n _logsumexp,\n log_likelihood,\n func_kwargs={\"b_inv\": n_samples},\n ufunc_kwargs=ufunc_kwargs,\n **kwargs,\n )\n\n vars_lpd = log_likelihood.var(dim=\"sample\")\n warn_mg = False\n if np.any(vars_lpd > 0.4):\n warnings.warn(\n (\n \"For one or more samples the posterior variance of the log predictive \"\n \"densities exceeds 0.4. This could be indication of WAIC starting to fail. \\n\"\n \"See http://arxiv.org/abs/1507.04544 for details\"\n )\n )\n warn_mg = True\n\n waic_i = scale_value * (lppd_i - vars_lpd)\n waic_se = (n_data_points * np.var(waic_i.values)) ** 0.5\n waic_sum = np.sum(waic_i.values)\n p_waic = np.sum(vars_lpd.values)\n\n if pointwise:\n if np.equal(waic_sum, waic_i).all(): # pylint: disable=no-member\n warnings.warn(\n \"\"\"The point-wise WAIC is the same with the sum WAIC, please double check\n the Observed RV in your model to make sure it returns element-wise logp.\n \"\"\"\n )\n return ELPDData(\n data=[\n waic_sum,\n waic_se,\n p_waic,\n n_samples,\n n_data_points,\n warn_mg,\n waic_i.rename(\"waic_i\"),\n scale,\n ],\n index=[\n \"waic\",\n \"waic_se\",\n \"p_waic\",\n \"n_samples\",\n \"n_data_points\",\n \"warning\",\n \"waic_i\",\n \"waic_scale\",\n ],\n )\n else:\n return ELPDData(\n data=[waic_sum, waic_se, p_waic, n_samples, n_data_points, warn_mg, scale],\n index=[\n \"waic\",\n \"waic_se\",\n \"p_waic\",\n \"n_samples\",\n \"n_data_points\",\n \"warning\",\n \"waic_scale\",\n ],\n )\n\n\ndef loo_pit(idata=None, *, y=None, y_hat=None, log_weights=None):\n \"\"\"Compute leave one out (PSIS-LOO) probability integral transform (PIT) values.\n\n Parameters\n ----------\n idata: InferenceData\n InferenceData object.\n y: array, DataArray or str\n Observed data. If str, idata must be present and contain the observed data group\n y_hat: array, DataArray or str\n Posterior predictive samples for ``y``. It must have the same shape as y plus an\n extra dimension at the end of size n_samples (chains and draws stacked). If str or\n None, idata must contain the posterior predictive group. If None, y_hat is taken\n equal to y, thus, y must be str too.\n log_weights: array or DataArray\n Smoothed log_weights. It must have the same shape as ``y_hat``\n\n Returns\n -------\n loo_pit: array or DataArray\n Value of the LOO-PIT at each observed data point.\n\n Examples\n --------\n Calculate LOO-PIT values using as test quantity the observed values themselves.\n\n .. ipython::\n\n In [1]: import arviz as az\n ...: data = az.load_arviz_data(\"centered_eight\")\n ...: az.loo_pit(idata=data, y=\"obs\")\n\n Calculate LOO-PIT values using as test quantity the square of the difference between\n each observation and `mu`. Both ``y`` and ``y_hat`` inputs will be array-like,\n but ``idata`` will still be passed in order to calculate the ``log_weights`` from\n there.\n\n .. ipython::\n\n In [1]: T = data.observed_data.obs - data.posterior.mu.median(dim=(\"chain\", \"draw\"))\n ...: T_hat = data.posterior_predictive.obs - data.posterior.mu\n ...: T_hat = T_hat.stack(sample=(\"chain\", \"draw\"))\n ...: az.loo_pit(idata=data, y=T**2, y_hat=T_hat**2)\n\n \"\"\"\n y_str = \"\"\n if idata is not None and not isinstance(idata, InferenceData):\n raise ValueError(\"idata must be of type InferenceData or None\")\n\n if idata is None:\n if not all(isinstance(arg, (np.ndarray, xr.DataArray)) for arg in (y, y_hat, log_weights)):\n raise ValueError(\n \"all 3 y, y_hat and log_weights must be array or DataArray when idata is None \"\n \"but they are of types {}\".format([type(arg) for arg in (y, y_hat, log_weights)])\n )\n\n else:\n if y_hat is None and isinstance(y, str):\n y_hat = y\n elif y_hat is None:\n raise ValueError(\"y_hat cannot be None if y is not a str\")\n if isinstance(y, str):\n y_str = y\n y = idata.observed_data[y].values\n elif not isinstance(y, (np.ndarray, xr.DataArray)):\n raise ValueError(\"y must be of types array, DataArray or str, not {}\".format(type(y)))\n if isinstance(y_hat, str):\n y_hat = idata.posterior_predictive[y_hat].stack(sample=(\"chain\", \"draw\")).values\n elif not isinstance(y_hat, (np.ndarray, xr.DataArray)):\n raise ValueError(\n \"y_hat must be of types array, DataArray or str, not {}\".format(type(y_hat))\n )\n if log_weights is None:\n if y_str:\n try:\n log_likelihood = _get_log_likelihood(idata, var_name=y)\n except TypeError:\n log_likelihood = _get_log_likelihood(idata)\n else:\n log_likelihood = _get_log_likelihood(idata)\n log_likelihood = log_likelihood.stack(sample=(\"chain\", \"draw\"))\n posterior = convert_to_dataset(idata, group=\"posterior\")\n n_chains = len(posterior.chain)\n n_samples = len(log_likelihood.sample)\n ess_p = ess(posterior, method=\"mean\")\n # this mean is over all data variables\n reff = (\n (np.hstack([ess_p[v].values.flatten() for v in ess_p.data_vars]).mean() / n_samples)\n if n_chains > 1\n else 1\n )\n log_weights = psislw(-log_likelihood, reff=reff)[0].values\n elif not isinstance(log_weights, (np.ndarray, xr.DataArray)):\n raise ValueError(\n \"log_weights must be None or of types array or DataArray, not {}\".format(\n type(log_weights)\n )\n )\n\n if len(y.shape) + 1 != len(y_hat.shape):\n raise ValueError(\n \"y_hat must have 1 more dimension than y, but y_hat has {} dims and y has \"\n \"{} dims\".format(len(y.shape), len(y_hat.shape))\n )\n\n if y.shape != y_hat.shape[:-1]:\n raise ValueError(\n \"y has shape: {} which should be equal to y_hat shape (omitting the last \"\n \"dimension): {}\".format(y.shape, y_hat.shape)\n )\n\n if y_hat.shape != log_weights.shape:\n raise ValueError(\n \"y_hat and log_weights must have the same shape but have shapes {} and {}\".format(\n y_hat.shape, log_weights.shape\n )\n )\n\n kwargs = {\n \"input_core_dims\": [[], [\"sample\"], [\"sample\"]],\n \"output_core_dims\": [[]],\n \"join\": \"left\",\n }\n ufunc_kwargs = {\"n_dims\": 1}\n\n return _wrap_xarray_ufunc(_loo_pit, y, y_hat, log_weights, ufunc_kwargs=ufunc_kwargs, **kwargs)\n\n\ndef _loo_pit(y, y_hat, log_weights):\n \"\"\"Compute LOO-PIT values.\"\"\"\n sel = y_hat <= y\n if np.sum(sel) > 0:\n value = np.exp(_logsumexp(log_weights[sel]))\n return min(1, value)\n else:\n return 0\n\n\ndef apply_test_function(\n idata,\n func,\n group=\"both\",\n var_names=None,\n pointwise=False,\n out_data_shape=None,\n out_pp_shape=None,\n out_name_data=\"T\",\n out_name_pp=None,\n func_args=None,\n func_kwargs=None,\n ufunc_kwargs=None,\n wrap_data_kwargs=None,\n wrap_pp_kwargs=None,\n inplace=True,\n overwrite=None,\n):\n \"\"\"Apply a Bayesian test function to an InferenceData object.\n\n Parameters\n ----------\n idata: InferenceData\n InferenceData object on which to apply the test function. This function will add\n new variables to the InferenceData object to store the result without modifying the\n existing ones.\n func: callable\n Callable that calculates the test function. It must have the following call signature\n ``func(y, theta, *args, **kwargs)`` (where ``y`` is the observed data or posterior\n predictive and ``theta`` the model parameters) even if not all the arguments are\n used.\n group: str, optional\n Group on which to apply the test function. Can be observed_data, posterior_predictive\n or both.\n var_names: dict group -> var_names, optional\n Mapping from group name to the variables to be passed to func. It can be a dict of\n strings or lists of strings. There is also the option of using ``both`` as key,\n in which case, the same variables are used in observed data and posterior predictive\n groups\n pointwise: bool, optional\n If True, apply the test function to each observation and sample, otherwise, apply\n test function to each sample.\n out_data_shape, out_pp_shape: tuple, optional\n Output shape of the test function applied to the observed/posterior predictive data.\n If None, the default depends on the value of pointwise.\n out_name_data, out_name_pp: str, optional\n Name of the variables to add to the observed_data and posterior_predictive datasets\n respectively. ``out_name_pp`` can be ``None``, in which case will be taken equal to\n ``out_name_data``.\n func_args: sequence, optional\n Passed as is to ``func``\n func_kwargs: mapping, optional\n Passed as is to ``func``\n wrap_data_kwargs, wrap_pp_kwargs: mapping, optional\n kwargs passed to ``az.stats.wrap_xarray_ufunc``. By default, some suitable input_core_dims\n are used.\n inplace: bool, optional\n If True, add the variables inplace, othewise, return a copy of idata with the variables\n added.\n overwrite: bool, optional\n Overwrite data in case ``out_name_data`` or ``out_name_pp`` are already variables in\n dataset. If ``None`` it will be the opposite of inplace.\n\n Returns\n -------\n idata: InferenceData\n Output InferenceData object. If ``inplace=True``, it is the same input object modified\n inplace.\n\n Notes\n -----\n This function is provided for convenience to wrap scalar or functions working on low\n dims to inference data object. It is not optimized to be faster nor as fast as vectorized\n computations.\n\n Examples\n --------\n Use ``apply_test_function`` to wrap ``np.min`` for illustration purposes. And plot the\n results.\n\n .. plot::\n :context: close-figs\n\n >>> import arviz as az\n >>> idata = az.load_arviz_data(\"centered_eight\")\n >>> az.apply_test_function(idata, lambda y, theta: np.min(y))\n >>> T = np.asscalar(idata.observed_data.T)\n >>> az.plot_posterior(idata, var_names=[\"T\"], group=\"posterior_predictive\", ref_val=T)\n\n \"\"\"\n out = idata if inplace else deepcopy(idata)\n\n valid_groups = (\"observed_data\", \"posterior_predictive\", \"both\")\n if group not in valid_groups:\n raise ValueError(\n \"Invalid group argument. Must be one of {} not {}.\".format(valid_groups, group)\n )\n if overwrite is None:\n overwrite = not inplace\n\n if out_name_pp is None:\n out_name_pp = out_name_data\n\n if func_args is None:\n func_args = tuple()\n\n if func_kwargs is None:\n func_kwargs = {}\n\n if ufunc_kwargs is None:\n ufunc_kwargs = {}\n ufunc_kwargs.setdefault(\"check_shape\", False)\n ufunc_kwargs.setdefault(\"ravel\", False)\n\n if wrap_data_kwargs is None:\n wrap_data_kwargs = {}\n if wrap_pp_kwargs is None:\n wrap_pp_kwargs = {}\n if var_names is None:\n var_names = {}\n\n both_var_names = var_names.pop(\"both\", None)\n var_names.setdefault(\"posterior\", list(out.posterior.data_vars))\n\n in_posterior = out.posterior[var_names[\"posterior\"]]\n if isinstance(in_posterior, xr.Dataset):\n in_posterior = in_posterior.to_array().squeeze()\n\n groups = (\"posterior_predictive\", \"observed_data\") if group == \"both\" else [group]\n for grp in groups:\n out_group_shape = out_data_shape if grp == \"observed_data\" else out_pp_shape\n out_name_group = out_name_data if grp == \"observed_data\" else out_name_pp\n wrap_group_kwargs = wrap_data_kwargs if grp == \"observed_data\" else wrap_pp_kwargs\n if not hasattr(out, grp):\n raise ValueError(\"InferenceData object must have {} group\".format(grp))\n if not overwrite and out_name_group in getattr(out, grp).data_vars:\n raise ValueError(\n \"Should overwrite: {} variable present in group {}, but overwrite is False\".format(\n out_name_group, grp\n )\n )\n var_names.setdefault(\n grp, list(getattr(out, grp).data_vars) if both_var_names is None else both_var_names\n )\n in_group = getattr(out, grp)[var_names[grp]]\n if isinstance(in_group, xr.Dataset):\n in_group = in_group.to_array(dim=\"{}_var\".format(grp)).squeeze()\n\n if pointwise:\n out_group_shape = in_group.shape if out_group_shape is None else out_group_shape\n elif grp == \"observed_data\":\n out_group_shape = () if out_group_shape is None else out_group_shape\n elif grp == \"posterior_predictive\":\n out_group_shape = in_group.shape[:2] if out_group_shape is None else out_group_shape\n loop_dims = in_group.dims[: len(out_group_shape)]\n\n wrap_group_kwargs.setdefault(\n \"input_core_dims\",\n [\n [dim for dim in dataset.dims if dim not in loop_dims]\n for dataset in [in_group, in_posterior]\n ],\n )\n func_kwargs[\"out\"] = np.empty(out_group_shape)\n\n out_group = getattr(out, grp)\n try:\n out_group[out_name_group] = _wrap_xarray_ufunc(\n func,\n in_group.values,\n in_posterior.values,\n func_args=func_args,\n func_kwargs=func_kwargs,\n ufunc_kwargs=ufunc_kwargs,\n **wrap_group_kwargs,\n )\n except IndexError:\n excluded_dims = set(\n wrap_group_kwargs[\"input_core_dims\"][0] + wrap_group_kwargs[\"input_core_dims\"][1]\n )\n out_group[out_name_group] = _wrap_xarray_ufunc(\n func,\n *xr.broadcast(in_group, in_posterior, exclude=excluded_dims),\n func_args=func_args,\n func_kwargs=func_kwargs,\n ufunc_kwargs=ufunc_kwargs,\n **wrap_group_kwargs,\n )\n setattr(out, grp, out_group)\n\n return out\n", "\"\"\"Matplotlib jointplot.\"\"\"\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nfrom . import backend_kwarg_defaults, backend_show\nfrom ...distplot import plot_dist\nfrom ...kdeplot import plot_kde\nfrom ...plot_utils import make_label\n\n\ndef plot_joint(\n ax,\n figsize,\n plotters,\n ax_labelsize,\n xt_labelsize,\n kind,\n contour,\n fill_last,\n joint_kwargs,\n gridsize,\n marginal_kwargs,\n backend_kwargs,\n show,\n):\n \"\"\"Matplotlib joint plot.\"\"\"\n if backend_kwargs is None:\n backend_kwargs = {}\n\n backend_kwargs = {\n **backend_kwarg_defaults(),\n **backend_kwargs,\n }\n if ax is None:\n # Instantiate figure and grid\n fig = plt.figure(figsize=figsize, **backend_kwargs)\n grid = plt.GridSpec(4, 4, hspace=0.1, wspace=0.1, figure=fig)\n\n # Set up main plot\n axjoin = fig.add_subplot(grid[1:, :-1])\n # Set up top KDE\n ax_hist_x = fig.add_subplot(grid[0, :-1], sharex=axjoin)\n # Set up right KDE\n ax_hist_y = fig.add_subplot(grid[1:, -1], sharey=axjoin)\n elif len(ax) == 3:\n axjoin, ax_hist_x, ax_hist_y = ax\n else:\n raise ValueError(\"ax must be of lenght 3 but found {}\".format(len(ax)))\n\n # Personalize axes\n ax_hist_x.tick_params(labelleft=False, labelbottom=False)\n ax_hist_y.tick_params(labelleft=False, labelbottom=False)\n\n # Set labels for axes\n x_var_name = make_label(plotters[0][0], plotters[0][1])\n y_var_name = make_label(plotters[1][0], plotters[1][1])\n\n axjoin.set_xlabel(x_var_name, fontsize=ax_labelsize)\n axjoin.set_ylabel(y_var_name, fontsize=ax_labelsize)\n axjoin.tick_params(labelsize=xt_labelsize)\n\n # Flatten data\n x = plotters[0][2].flatten()\n y = plotters[1][2].flatten()\n\n if kind == \"scatter\":\n axjoin.scatter(x, y, **joint_kwargs)\n elif kind == \"kde\":\n plot_kde(x, y, contour=contour, fill_last=fill_last, ax=axjoin, **joint_kwargs)\n else:\n if gridsize == \"auto\":\n gridsize = int(len(x) ** 0.35)\n axjoin.hexbin(x, y, mincnt=1, gridsize=gridsize, **joint_kwargs)\n axjoin.grid(False)\n\n for val, ax_, rotate in ((x, ax_hist_x, False), (y, ax_hist_y, True)):\n plot_dist(val, textsize=xt_labelsize, rotated=rotate, ax=ax_, **marginal_kwargs)\n\n ax_hist_x.set_xlim(axjoin.get_xlim())\n ax_hist_y.set_ylim(axjoin.get_ylim())\n\n if backend_show(show):\n plt.show()\n\n return np.array([axjoin, ax_hist_x, ax_hist_y])\n", "\"\"\"Matplotlib Posterior predictive plot.\"\"\"\nimport platform\nimport logging\nfrom matplotlib import animation, get_backend\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nfrom . import backend_show\nfrom ...kdeplot import plot_kde\nfrom ...plot_utils import (\n make_label,\n _create_axes_grid,\n)\nfrom ....numeric_utils import _fast_kde, histogram, get_bins\n\n_log = logging.getLogger(__name__)\n\n\ndef plot_ppc(\n ax,\n length_plotters,\n rows,\n cols,\n figsize,\n animated,\n obs_plotters,\n pp_plotters,\n predictive_dataset,\n pp_sample_ix,\n kind,\n alpha,\n linewidth,\n mean,\n xt_labelsize,\n ax_labelsize,\n jitter,\n total_pp_samples,\n legend,\n group,\n markersize,\n animation_kwargs,\n num_pp_samples,\n backend_kwargs,\n show,\n):\n \"\"\"Matplotlib ppc plot.\"\"\"\n if animated:\n try:\n shell = get_ipython().__class__.__name__\n if shell == \"ZMQInteractiveShell\" and get_backend() != \"nbAgg\":\n raise Warning(\n \"To run animations inside a notebook you have to use the nbAgg backend. \"\n \"Try with `%matplotlib notebook` or `%matplotlib nbAgg`. You can switch \"\n \"back to the default backend with `%matplotlib inline` or \"\n \"`%matplotlib auto`.\"\n )\n except NameError:\n pass\n\n if animation_kwargs[\"blit\"] and platform.system() != \"Linux\":\n _log.warning(\n \"If you experience problems rendering the animation try setting \"\n \"`animation_kwargs({'blit':False}) or changing the plotting backend \"\n \"(e.g. to TkAgg)\"\n )\n\n if ax is None:\n fig, axes = _create_axes_grid(\n length_plotters, rows, cols, figsize=figsize, backend_kwargs=backend_kwargs\n )\n else:\n axes = np.ravel(ax)\n if len(axes) != length_plotters:\n raise ValueError(\n \"Found {} variables to plot but {} axes instances. They must be equal.\".format(\n length_plotters, len(axes)\n )\n )\n if animated:\n fig = axes[0].get_figure()\n if not all([ax.get_figure() is fig for ax in axes]):\n raise ValueError(\"All axes must be on the same figure for animation to work\")\n\n for i, ax_i in enumerate(axes):\n var_name, selection, obs_vals = obs_plotters[i]\n pp_var_name, _, pp_vals = pp_plotters[i]\n dtype = predictive_dataset[pp_var_name].dtype.kind\n\n # flatten non-specified dimensions\n obs_vals = obs_vals.flatten()\n pp_vals = pp_vals.reshape(total_pp_samples, -1)\n pp_sampled_vals = pp_vals[pp_sample_ix]\n\n if kind == \"kde\":\n plot_kwargs = {\"color\": \"C5\", \"alpha\": alpha, \"linewidth\": 0.5 * linewidth}\n if dtype == \"i\":\n plot_kwargs[\"drawstyle\"] = \"steps-pre\"\n ax_i.plot(\n [], color=\"C5\", label=\"{} predictive {}\".format(group.capitalize(), pp_var_name)\n )\n\n if dtype == \"f\":\n plot_kde(\n obs_vals,\n label=\"Observed {}\".format(var_name),\n plot_kwargs={\"color\": \"k\", \"linewidth\": linewidth, \"zorder\": 3},\n fill_kwargs={\"alpha\": 0},\n ax=ax_i,\n legend=legend,\n )\n else:\n bins = get_bins(obs_vals)\n _, hist, bin_edges = histogram(obs_vals, bins=bins)\n hist = np.concatenate((hist[:1], hist))\n ax_i.plot(\n bin_edges,\n hist,\n label=\"Observed {}\".format(var_name),\n color=\"k\",\n linewidth=linewidth,\n zorder=3,\n drawstyle=plot_kwargs[\"drawstyle\"],\n )\n\n pp_densities = []\n pp_xs = []\n for vals in pp_sampled_vals:\n vals = np.array([vals]).flatten()\n if dtype == \"f\":\n pp_density, lower, upper = _fast_kde(vals)\n pp_x = np.linspace(lower, upper, len(pp_density))\n pp_densities.append(pp_density)\n pp_xs.append(pp_x)\n else:\n bins = get_bins(vals)\n _, hist, bin_edges = histogram(vals, bins=bins)\n hist = np.concatenate((hist[:1], hist))\n pp_densities.append(hist)\n pp_xs.append(bin_edges)\n\n if animated:\n animate, init = _set_animation(\n pp_sampled_vals, ax_i, dtype=dtype, kind=kind, plot_kwargs=plot_kwargs\n )\n\n else:\n if dtype == \"f\":\n ax_i.plot(np.transpose(pp_xs), np.transpose(pp_densities), **plot_kwargs)\n else:\n for x_s, y_s in zip(pp_xs, pp_densities):\n ax_i.plot(x_s, y_s, **plot_kwargs)\n\n if mean:\n label = \"{} predictive mean {}\".format(group.capitalize(), pp_var_name)\n if dtype == \"f\":\n rep = len(pp_densities)\n len_density = len(pp_densities[0])\n\n new_x = np.linspace(np.min(pp_xs), np.max(pp_xs), len_density)\n new_d = np.zeros((rep, len_density))\n bins = np.digitize(pp_xs, new_x, right=True)\n new_x -= (new_x[1] - new_x[0]) / 2\n for irep in range(rep):\n new_d[irep][bins[irep]] = pp_densities[irep]\n ax_i.plot(\n new_x,\n new_d.mean(0),\n color=\"C0\",\n linestyle=\"--\",\n linewidth=linewidth,\n zorder=2,\n label=label,\n )\n else:\n vals = pp_vals.flatten()\n bins = get_bins(vals)\n _, hist, bin_edges = histogram(vals, bins=bins)\n hist = np.concatenate((hist[:1], hist))\n ax_i.plot(\n bin_edges,\n hist,\n color=\"C0\",\n linewidth=linewidth,\n label=label,\n zorder=2,\n linestyle=\"--\",\n drawstyle=plot_kwargs[\"drawstyle\"],\n )\n ax_i.tick_params(labelsize=xt_labelsize)\n ax_i.set_yticks([])\n\n elif kind == \"cumulative\":\n drawstyle = \"default\" if dtype == \"f\" else \"steps-pre\"\n ax_i.plot(\n *_empirical_cdf(obs_vals),\n color=\"k\",\n linewidth=linewidth,\n label=\"Observed {}\".format(var_name),\n drawstyle=drawstyle,\n zorder=3\n )\n if animated:\n animate, init = _set_animation(\n pp_sampled_vals,\n ax_i,\n kind=kind,\n alpha=alpha,\n drawstyle=drawstyle,\n linewidth=linewidth,\n )\n\n else:\n pp_densities = np.empty((2 * len(pp_sampled_vals), pp_sampled_vals[0].size))\n for idx, vals in enumerate(pp_sampled_vals):\n vals = np.array([vals]).flatten()\n pp_x, pp_density = _empirical_cdf(vals)\n pp_densities[2 * idx] = pp_x\n pp_densities[2 * idx + 1] = pp_density\n\n ax_i.plot(\n *pp_densities, alpha=alpha, color=\"C5\", drawstyle=drawstyle, linewidth=linewidth\n )\n ax_i.plot([], color=\"C5\", label=\"Posterior predictive {}\".format(pp_var_name))\n if mean:\n ax_i.plot(\n *_empirical_cdf(pp_vals.flatten()),\n color=\"C0\",\n linestyle=\"--\",\n linewidth=linewidth,\n drawstyle=drawstyle,\n label=\"Posterior predictive mean {}\".format(pp_var_name)\n )\n ax_i.set_yticks([0, 0.5, 1])\n\n elif kind == \"scatter\":\n if mean:\n if dtype == \"f\":\n plot_kde(\n pp_vals.flatten(),\n plot_kwargs={\n \"color\": \"C0\",\n \"linestyle\": \"--\",\n \"linewidth\": linewidth,\n \"zorder\": 3,\n },\n label=\"Posterior predictive mean {}\".format(pp_var_name),\n ax=ax_i,\n legend=legend,\n )\n else:\n vals = pp_vals.flatten()\n bins = get_bins(vals)\n _, hist, bin_edges = histogram(vals, bins=bins)\n hist = np.concatenate((hist[:1], hist))\n ax_i.plot(\n bin_edges,\n hist,\n color=\"C0\",\n linewidth=linewidth,\n label=\"Posterior predictive mean {}\".format(pp_var_name),\n zorder=3,\n linestyle=\"--\",\n drawstyle=\"steps-pre\",\n )\n\n _, limit = ax_i.get_ylim()\n limit *= 1.05\n y_rows = np.linspace(0, limit, num_pp_samples + 1)\n jitter_scale = y_rows[1] - y_rows[0]\n scale_low = 0\n scale_high = jitter_scale * jitter\n\n obs_yvals = np.zeros_like(obs_vals, dtype=np.float64)\n if jitter:\n obs_yvals += np.random.uniform(low=scale_low, high=scale_high, size=len(obs_vals))\n ax_i.plot(\n obs_vals,\n obs_yvals,\n \"o\",\n color=\"C0\",\n markersize=markersize,\n alpha=alpha,\n label=\"Observed {}\".format(var_name),\n zorder=4,\n )\n\n if animated:\n animate, init = _set_animation(\n pp_sampled_vals,\n ax_i,\n kind=kind,\n height=y_rows.mean() * 0.5,\n markersize=markersize,\n )\n\n else:\n for vals, y in zip(pp_sampled_vals, y_rows[1:]):\n vals = np.ravel(vals)\n yvals = np.full_like(vals, y, dtype=np.float64)\n if jitter:\n yvals += np.random.uniform(low=scale_low, high=scale_high, size=len(vals))\n ax_i.plot(\n vals, yvals, \"o\", zorder=2, color=\"C5\", markersize=markersize, alpha=alpha\n )\n\n ax_i.plot([], \"C5o\", label=\"Posterior predictive {}\".format(pp_var_name))\n\n ax_i.set_yticks([])\n\n if var_name != pp_var_name:\n xlabel = \"{} / {}\".format(var_name, pp_var_name)\n else:\n xlabel = var_name\n ax_i.set_xlabel(make_label(xlabel, selection), fontsize=ax_labelsize)\n\n if legend:\n if i == 0:\n ax_i.legend(fontsize=xt_labelsize * 0.75)\n else:\n ax_i.legend([])\n\n if backend_show(show):\n plt.show()\n\n if animated:\n ani = animation.FuncAnimation(\n fig, animate, np.arange(0, num_pp_samples), init_func=init, **animation_kwargs\n )\n return axes, ani\n else:\n return axes\n\n\ndef _set_animation(\n pp_sampled_vals,\n ax,\n dtype=None,\n kind=\"density\",\n alpha=None,\n drawstyle=None,\n linewidth=None,\n height=None,\n markersize=None,\n plot_kwargs=None,\n):\n if kind == \"kde\":\n length = len(pp_sampled_vals)\n if dtype == \"f\":\n y_vals, lower, upper = _fast_kde(pp_sampled_vals[0])\n x_vals = np.linspace(lower, upper, len(y_vals))\n\n max_max = max([max(_fast_kde(pp_sampled_vals[i])[0]) for i in range(length)])\n\n ax.set_ylim(0, max_max)\n\n (line,) = ax.plot(x_vals, y_vals, **plot_kwargs)\n\n def animate(i):\n y_vals, lower, upper = _fast_kde(pp_sampled_vals[i])\n x_vals = np.linspace(lower, upper, len(y_vals))\n line.set_data(x_vals, y_vals)\n return line\n\n else:\n vals = pp_sampled_vals[0]\n _, y_vals, x_vals = histogram(vals, bins=\"auto\")\n (line,) = ax.plot(x_vals[:-1], y_vals, **plot_kwargs)\n\n max_max = max(\n [max(histogram(pp_sampled_vals[i], bins=\"auto\")[1]) for i in range(length)]\n )\n\n ax.set_ylim(0, max_max)\n\n def animate(i):\n _, y_vals, x_vals = histogram(pp_sampled_vals[i], bins=\"auto\")\n line.set_data(x_vals[:-1], y_vals)\n return (line,)\n\n elif kind == \"cumulative\":\n x_vals, y_vals = _empirical_cdf(pp_sampled_vals[0])\n (line,) = ax.plot(\n x_vals, y_vals, alpha=alpha, color=\"C5\", drawstyle=drawstyle, linewidth=linewidth\n )\n\n def animate(i):\n x_vals, y_vals = _empirical_cdf(pp_sampled_vals[i])\n line.set_data(x_vals, y_vals)\n return line\n\n elif kind == \"scatter\":\n x_vals = pp_sampled_vals[0]\n y_vals = np.full_like(x_vals, height, dtype=np.float64)\n (line,) = ax.plot(\n x_vals, y_vals, \"o\", zorder=2, color=\"C5\", markersize=markersize, alpha=alpha\n )\n\n def animate(i):\n line.set_xdata(np.ravel(pp_sampled_vals[i]))\n return line\n\n def init():\n if kind != \"scatter\":\n line.set_data([], [])\n else:\n line.set_xdata([])\n return line\n\n return animate, init\n\n\ndef _empirical_cdf(data):\n \"\"\"Compute empirical cdf of a numpy array.\n\n Parameters\n ----------\n data : np.array\n 1d array\n\n Returns\n -------\n np.array, np.array\n x and y coordinates for the empirical cdf of the data\n \"\"\"\n return np.sort(data), np.linspace(0, 1, len(data))\n", "\"\"\"Data structure for using netcdf groups with xarray.\"\"\"\n# pylint: disable=too-many-lines\nfrom collections import OrderedDict\nfrom collections.abc import Sequence\nfrom copy import copy as ccopy, deepcopy\nfrom datetime import datetime\nfrom html import escape\nimport warnings\nimport uuid\n\nimport netCDF4 as nc\nimport numpy as np\nimport xarray as xr\nfrom xarray.core.options import OPTIONS\n\nfrom .base import _extend_xr_method\nfrom ..utils import _subset_list, HtmlTemplate\nfrom ..rcparams import rcParams\n\nSUPPORTED_GROUPS = [\n \"posterior\",\n \"posterior_predictive\",\n \"predictions\",\n \"log_likelihood\",\n \"sample_stats\",\n \"prior\",\n \"prior_predictive\",\n \"sample_stats_prior\",\n \"observed_data\",\n \"constant_data\",\n \"predictions_constant_data\",\n]\n\nWARMUP_TAG = \"warmup_\"\n\nSUPPORTED_GROUPS_WARMUP = [\n \"{}posterior\".format(WARMUP_TAG),\n \"{}posterior_predictive\".format(WARMUP_TAG),\n \"{}predictions\".format(WARMUP_TAG),\n \"{}sample_stats\".format(WARMUP_TAG),\n \"{}log_likelihood\".format(WARMUP_TAG),\n]\n\nSUPPORTED_GROUPS_ALL = SUPPORTED_GROUPS + SUPPORTED_GROUPS_WARMUP\n\n\nclass InferenceData:\n \"\"\"Container for inference data storage using xarray.\n\n For a detailed introduction to ``InferenceData`` objects and their usage, see\n :doc:`/notebooks/XarrayforArviZ`. This page provides help and documentation\n on ``InferenceData`` methods and their low level implementation.\n \"\"\"\n\n def __init__(self, **kwargs):\n \"\"\"Initialize InferenceData object from keyword xarray datasets.\n\n Parameters\n ----------\n kwargs :\n Keyword arguments of xarray datasets\n\n Examples\n --------\n Initiate an InferenceData object from scratch, not recommended. InferenceData\n objects should be initialized using ``from_xyz`` methods, see :ref:`data_api` for more\n details.\n\n .. ipython::\n\n In [1]: import arviz as az\n ...: import numpy as np\n ...: import xarray as xr\n ...: dataset = xr.Dataset(\n ...: {\n ...: \"a\": ([\"chain\", \"draw\", \"a_dim\"], np.random.normal(size=(4, 100, 3))),\n ...: \"b\": ([\"chain\", \"draw\"], np.random.normal(size=(4, 100))),\n ...: },\n ...: coords={\n ...: \"chain\": ([\"chain\"], np.arange(4)),\n ...: \"draw\": ([\"draw\"], np.arange(100)),\n ...: \"a_dim\": ([\"a_dim\"], [\"x\", \"y\", \"z\"]),\n ...: }\n ...: )\n ...: idata = az.InferenceData(posterior=dataset, prior=dataset)\n ...: idata\n\n We have created an ``InferenceData`` object with two groups. Now we can check its\n contents:\n\n .. ipython::\n\n In [1]: idata.posterior\n\n \"\"\"\n self._groups = []\n self._groups_warmup = []\n save_warmup = kwargs.pop(\"save_warmup\", False)\n key_list = [key for key in SUPPORTED_GROUPS_ALL if key in kwargs]\n for key in kwargs:\n if key not in SUPPORTED_GROUPS_ALL:\n key_list.append(key)\n warnings.warn(\n \"{} group is not defined in the InferenceData scheme\".format(key), UserWarning\n )\n for key in key_list:\n dataset = kwargs[key]\n dataset_warmup = None\n if dataset is None:\n continue\n elif isinstance(dataset, (list, tuple)):\n dataset, dataset_warmup = kwargs[key]\n elif not isinstance(dataset, xr.Dataset):\n raise ValueError(\n \"Arguments to InferenceData must be xarray Datasets \"\n \"(argument '{}' was type '{}')\".format(key, type(dataset))\n )\n if not key.startswith(WARMUP_TAG):\n if dataset:\n setattr(self, key, dataset)\n self._groups.append(key)\n elif key.startswith(WARMUP_TAG):\n if dataset:\n setattr(self, key, dataset)\n self._groups_warmup.append(key)\n if save_warmup and dataset_warmup is not None:\n if dataset_warmup:\n key = \"{}{}\".format(WARMUP_TAG, key)\n setattr(self, key, dataset_warmup)\n self._groups_warmup.append(key)\n\n def __repr__(self):\n \"\"\"Make string representation of InferenceData object.\"\"\"\n msg = \"Inference data with groups:\\n\\t> {options}\".format(\n options=\"\\n\\t> \".join(self._groups)\n )\n if self._groups_warmup:\n msg += \"\\n\\nWarmup iterations saved ({}*).\".format(WARMUP_TAG)\n return msg\n\n def _repr_html_(self):\n \"\"\"Make html representation of InferenceData object.\"\"\"\n display_style = OPTIONS[\"display_style\"]\n if display_style == \"text\":\n html_repr = f\"<pre>{escape(repr(self))}</pre>\"\n else:\n elements = \"\".join(\n [\n HtmlTemplate.element_template.format(\n group_id=group + str(uuid.uuid4()),\n group=group,\n xr_data=getattr( # pylint: disable=protected-access\n self, group\n )._repr_html_(),\n )\n for group in self._groups_all\n ]\n )\n formatted_html_template = HtmlTemplate.html_template.format( # pylint: disable=possibly-unused-variable\n elements\n )\n css_template = HtmlTemplate.css_template # pylint: disable=possibly-unused-variable\n html_repr = \"%(formatted_html_template)s%(css_template)s\" % locals()\n return html_repr\n\n def __delattr__(self, group):\n \"\"\"Delete a group from the InferenceData object.\"\"\"\n if group in self._groups:\n self._groups.remove(group)\n elif group in self._groups_warmup:\n self._groups_warmup.remove(group)\n object.__delattr__(self, group)\n\n @property\n def _groups_all(self):\n return self._groups + self._groups_warmup\n\n @staticmethod\n def from_netcdf(filename):\n \"\"\"Initialize object from a netcdf file.\n\n Expects that the file will have groups, each of which can be loaded by xarray.\n By default, the datasets of the InferenceData object will be lazily loaded instead\n of being loaded into memory. This\n behaviour is regulated by the value of ``az.rcParams[\"data.load\"]``.\n\n Parameters\n ----------\n filename : str\n location of netcdf file\n\n Returns\n -------\n InferenceData object\n \"\"\"\n groups = {}\n with nc.Dataset(filename, mode=\"r\") as data:\n data_groups = list(data.groups)\n\n for group in data_groups:\n with xr.open_dataset(filename, group=group) as data:\n if rcParams[\"data.load\"] == \"eager\":\n groups[group] = data.load()\n else:\n groups[group] = data\n return InferenceData(**groups)\n\n def to_netcdf(self, filename, compress=True, groups=None):\n \"\"\"Write InferenceData to file using netcdf4.\n\n Parameters\n ----------\n filename : str\n Location to write to\n compress : bool, optional\n Whether to compress result. Note this saves disk space, but may make\n saving and loading somewhat slower (default: True).\n groups : list, optional\n Write only these groups to netcdf file.\n\n Returns\n -------\n str\n Location of netcdf file\n \"\"\"\n mode = \"w\" # overwrite first, then append\n if self._groups_all: # check's whether a group is present or not.\n if groups is None:\n groups = self._groups_all\n else:\n groups = [group for group in self._groups_all if group in groups]\n\n for group in groups:\n data = getattr(self, group)\n kwargs = {}\n if compress:\n kwargs[\"encoding\"] = {var_name: {\"zlib\": True} for var_name in data.variables}\n data.to_netcdf(filename, mode=mode, group=group, **kwargs)\n data.close()\n mode = \"a\"\n else: # creates a netcdf file for an empty InferenceData object.\n empty_netcdf_file = nc.Dataset(filename, mode=\"w\", format=\"NETCDF4\")\n empty_netcdf_file.close()\n return filename\n\n def __add__(self, other):\n \"\"\"Concatenate two InferenceData objects.\"\"\"\n return concat(self, other, copy=True, inplace=False)\n\n def sel(\n self, groups=None, filter_groups=None, inplace=False, chain_prior=None, **kwargs,\n ):\n \"\"\"Perform an xarray selection on all groups.\n\n Loops groups to perform Dataset.sel(key=item)\n for every kwarg if key is a dimension of the dataset.\n One example could be performing a burn in cut on the InferenceData object\n or discarding a chain. The selection is performed on all relevant groups (like\n posterior, prior, sample stats) while non relevant groups like observed data are\n omitted. See :meth:`xarray.Dataset.sel <xarray:xarray.Dataset.sel>`\n\n Parameters\n ----------\n groups: str or list of str, optional\n Groups where the selection is to be applied. Can either be group names\n or metagroup names.\n filter_groups: {None, \"like\", \"regex\"}, optional, default=None\n If `None` (default), interpret groups as the real group or metagroup names.\n If \"like\", interpret groups as substrings of the real group or metagroup names.\n If \"regex\", interpret groups as regular expressions on the real group or\n metagroup names. A la `pandas.filter`.\n inplace: bool, optional\n If ``True``, modify the InferenceData object inplace,\n otherwise, return the modified copy.\n chain_prior: bool, optional, deprecated\n If ``False``, do not select prior related groups using ``chain`` dim.\n Otherwise, use selection on ``chain`` if present. Default=False\n **kwargs: mapping\n It must be accepted by Dataset.sel().\n\n Returns\n -------\n InferenceData\n A new InferenceData object by default.\n When `inplace==True` perform selection in-place and return `None`\n\n Examples\n --------\n Use ``sel`` to discard one chain of the InferenceData object. We first check the\n dimensions of the original object:\n\n .. ipython::\n\n In [1]: import arviz as az\n ...: idata = az.load_arviz_data(\"centered_eight\")\n ...: del idata.prior # prior group only has 1 chain currently\n ...: print(idata.posterior.coords)\n ...: print(idata.posterior_predictive.coords)\n ...: print(idata.observed_data.coords)\n\n In order to remove the third chain:\n\n .. ipython::\n\n In [1]: idata_subset = idata.sel(chain=[0, 1, 3])\n ...: print(idata_subset.posterior.coords)\n ...: print(idata_subset.posterior_predictive.coords)\n ...: print(idata_subset.observed_data.coords)\n\n \"\"\"\n if chain_prior is not None:\n warnings.warn(\n \"chain_prior has been deprecated. Use groups argument and \"\n \"rcParams['data.metagroups'] instead.\",\n DeprecationWarning,\n )\n else:\n chain_prior = False\n groups = self._group_names(groups, filter_groups)\n\n out = self if inplace else deepcopy(self)\n for group in groups:\n dataset = getattr(self, group)\n valid_keys = set(kwargs.keys()).intersection(dataset.dims)\n if not chain_prior and \"prior\" in group:\n valid_keys -= {\"chain\"}\n dataset = dataset.sel(**{key: kwargs[key] for key in valid_keys})\n setattr(out, group, dataset)\n if inplace:\n return None\n else:\n return out\n\n def isel(\n self, groups=None, filter_groups=None, inplace=False, **kwargs,\n ):\n \"\"\"Perform an xarray selection on all groups.\n\n Loops groups to perform Dataset.isel(key=item)\n for every kwarg if key is a dimension of the dataset.\n One example could be performing a burn in cut on the InferenceData object\n or discarding a chain. The selection is performed on all relevant groups (like\n posterior, prior, sample stats) while non relevant groups like observed data are\n omitted. See :meth:`xarray:xarray.Dataset.isel`\n\n Parameters\n ----------\n groups: str or list of str, optional\n Groups where the selection is to be applied. Can either be group names\n or metagroup names.\n filter_groups: {None, \"like\", \"regex\"}, optional, default=None\n If `None` (default), interpret groups as the real group or metagroup names.\n If \"like\", interpret groups as substrings of the real group or metagroup names.\n If \"regex\", interpret groups as regular expressions on the real group or\n metagroup names. A la `pandas.filter`.\n inplace: bool, optional\n If ``True``, modify the InferenceData object inplace,\n otherwise, return the modified copy.\n **kwargs: mapping\n It must be accepted by :meth:`xarray:xarray.Dataset.isel`.\n\n Returns\n -------\n InferenceData\n A new InferenceData object by default.\n When `inplace==True` perform selection in-place and return `None`\n\n \"\"\"\n groups = self._group_names(groups, filter_groups)\n\n out = self if inplace else deepcopy(self)\n for group in groups:\n dataset = getattr(self, group)\n valid_keys = set(kwargs.keys()).intersection(dataset.dims)\n dataset = dataset.isel(**{key: kwargs[key] for key in valid_keys})\n setattr(out, group, dataset)\n if inplace:\n return None\n else:\n return out\n\n def stack(\n self, dimensions=None, groups=None, filter_groups=None, inplace=False, **kwargs,\n ):\n \"\"\"Perform an xarray stacking on all groups.\n\n Stack any number of existing dimensions into a single new dimension.\n Loops groups to perform Dataset.stack(key=value)\n for every kwarg if value is a dimension of the dataset.\n The selection is performed on all relevant groups (like\n posterior, prior, sample stats) while non relevant groups like observed data are\n omitted. See :meth:`xarray:xarray.Dataset.stack`\n\n Parameters\n ----------\n dimensions: dict\n Names of new dimensions, and the existing dimensions that they replace.\n groups: str or list of str, optional\n Groups where the selection is to be applied. Can either be group names\n or metagroup names.\n filter_groups: {None, \"like\", \"regex\"}, optional, default=None\n If `None` (default), interpret groups as the real group or metagroup names.\n If \"like\", interpret groups as substrings of the real group or metagroup names.\n If \"regex\", interpret groups as regular expressions on the real group or\n metagroup names. A la `pandas.filter`.\n inplace: bool, optional\n If ``True``, modify the InferenceData object inplace,\n otherwise, return the modified copy.\n **kwargs: mapping\n It must be accepted by :meth:`xarray:xarray.Dataset.stack`.\n\n Returns\n -------\n InferenceData\n A new InferenceData object by default.\n When `inplace==True` perform selection in-place and return `None`\n\n \"\"\"\n groups = self._group_names(groups, filter_groups)\n\n dimensions = {} if dimensions is None else dimensions\n dimensions.update(kwargs)\n out = self if inplace else deepcopy(self)\n for group in groups:\n dataset = getattr(self, group)\n kwarg_dict = {}\n for key, value in dimensions.items():\n if not set(value).difference(dataset.dims):\n kwarg_dict[key] = value\n dataset = dataset.stack(**kwarg_dict)\n setattr(out, group, dataset)\n if inplace:\n return None\n else:\n return out\n\n def unstack(self, dim=None, groups=None, filter_groups=None, inplace=False):\n \"\"\"Perform an xarray unstacking on all groups.\n\n Unstack existing dimensions corresponding to MultiIndexes into multiple new dimensions.\n Loops groups to perform Dataset.unstack(key=value).\n The selection is performed on all relevant groups (like posterior, prior,\n sample stats) while non relevant groups like observed data are omitted.\n See :meth:`xarray:xarray.Dataset.unstack`\n\n Parameters\n ----------\n dim: Hashable or iterable of Hashable, optional\n Dimension(s) over which to unstack. By default unstacks all MultiIndexes.\n groups: str or list of str, optional\n Groups where the selection is to be applied. Can either be group names\n or metagroup names.\n filter_groups: {None, \"like\", \"regex\"}, optional, default=None\n If `None` (default), interpret groups as the real group or metagroup names.\n If \"like\", interpret groups as substrings of the real group or metagroup names.\n If \"regex\", interpret groups as regular expressions on the real group or\n metagroup names. A la `pandas.filter`.\n inplace: bool, optional\n If ``True``, modify the InferenceData object inplace,\n otherwise, return the modified copy.\n\n Returns\n -------\n InferenceData\n A new InferenceData object by default.\n When `inplace==True` perform selection in place and return `None`\n\n \"\"\"\n groups = self._group_names(groups, filter_groups)\n if isinstance(dim, str):\n dim = [dim]\n\n out = self if inplace else deepcopy(self)\n for group in groups:\n dataset = getattr(self, group)\n valid_dims = set(dim).intersection(dataset.dims) if dim is not None else dim\n dataset = dataset.unstack(dim=valid_dims)\n setattr(out, group, dataset)\n if inplace:\n return None\n else:\n return out\n\n def rename(self, name_dict=None, groups=None, filter_groups=None, inplace=False):\n \"\"\"Perform xarray renaming of variable and dimensions on all groups.\n\n Loops groups to perform Dataset.rename(name_dict)\n for every key in name_dict if key is a dimension/data_vars of the dataset.\n The renaming is performed on all relevant groups (like\n posterior, prior, sample stats) while non relevant groups like observed data are\n omitted. See :meth:`xarray:xarray.Dataset.rename`\n\n Parameters\n ----------\n name_dict: dict\n Dictionary whose keys are current variable or dimension names\n and whose values are the desired names.\n groups: str or list of str, optional\n Groups where the selection is to be applied. Can either be group names\n or metagroup names.\n filter_groups: {None, \"like\", \"regex\"}, optional, default=None\n If `None` (default), interpret groups as the real group or metagroup names.\n If \"like\", interpret groups as substrings of the real group or metagroup names.\n If \"regex\", interpret groups as regular expressions on the real group or\n metagroup names. A la `pandas.filter`.\n inplace: bool, optional\n If ``True``, modify the InferenceData object inplace,\n otherwise, return the modified copy.\n\n\n Returns\n -------\n InferenceData\n A new InferenceData object by default.\n When `inplace==True` perform renaming in-place and return `None`\n\n \"\"\"\n groups = self._group_names(groups, filter_groups)\n if \"chain\" in name_dict.keys() or \"draw\" in name_dict.keys():\n raise KeyError(\"'chain' or 'draw' dimensions can't be renamed\")\n out = self if inplace else deepcopy(self)\n\n for group in groups:\n dataset = getattr(self, group)\n expected_keys = list(dataset.data_vars) + list(dataset.dims)\n valid_keys = set(name_dict.keys()).intersection(expected_keys)\n dataset = dataset.rename({key: name_dict[key] for key in valid_keys})\n setattr(out, group, dataset)\n if inplace:\n return None\n else:\n return out\n\n def rename_vars(self, name_dict=None, groups=None, filter_groups=None, inplace=False):\n \"\"\"Perform xarray renaming of variable or coordinate names on all groups.\n\n Loops groups to perform Dataset.rename_vars(name_dict)\n for every key in name_dict if key is a variable or coordinate names of the dataset.\n The renaming is performed on all relevant groups (like\n posterior, prior, sample stats) while non relevant groups like observed data are\n omitted. See :meth:`xarray:xarray.Dataset.rename_vars`\n\n Parameters\n ----------\n name_dict: dict\n Dictionary whose keys are current variable or coordinate names\n and whose values are the desired names.\n groups: str or list of str, optional\n Groups where the selection is to be applied. Can either be group names\n or metagroup names.\n filter_groups: {None, \"like\", \"regex\"}, optional, default=None\n If `None` (default), interpret groups as the real group or metagroup names.\n If \"like\", interpret groups as substrings of the real group or metagroup names.\n If \"regex\", interpret groups as regular expressions on the real group or\n metagroup names. A la `pandas.filter`.\n inplace: bool, optional\n If ``True``, modify the InferenceData object inplace,\n otherwise, return the modified copy.\n\n\n Returns\n -------\n InferenceData\n A new InferenceData object with renamed variables including coordinates by default.\n When `inplace==True` perform renaming in-place and return `None`\n\n \"\"\"\n groups = self._group_names(groups, filter_groups)\n\n out = self if inplace else deepcopy(self)\n for group in groups:\n dataset = getattr(self, group)\n valid_keys = set(name_dict.keys()).intersection(dataset.data_vars)\n dataset = dataset.rename_vars({key: name_dict[key] for key in valid_keys})\n setattr(out, group, dataset)\n if inplace:\n return None\n else:\n return out\n\n def rename_dims(self, name_dict=None, groups=None, filter_groups=None, inplace=False):\n \"\"\"Perform xarray renaming of dimensions on all groups.\n\n Loops groups to perform Dataset.rename_dims(name_dict)\n for every key in name_dict if key is a dimension of the dataset.\n The renaming is performed on all relevant groups (like\n posterior, prior, sample stats) while non relevant groups like observed data are\n omitted. See :meth:`xarray:xarray.Dataset.rename_dims`\n\n Parameters\n ----------\n name_dict: dict\n Dictionary whose keys are current dimension names and whose values are the desired\n names.\n groups: str or list of str, optional\n Groups where the selection is to be applied. Can either be group names\n or metagroup names.\n filter_groups: {None, \"like\", \"regex\"}, optional, default=None\n If `None` (default), interpret groups as the real group or metagroup names.\n If \"like\", interpret groups as substrings of the real group or metagroup names.\n If \"regex\", interpret groups as regular expressions on the real group or\n metagroup names. A la `pandas.filter`.\n inplace: bool, optional\n If ``True``, modify the InferenceData object inplace,\n otherwise, return the modified copy.\n\n\n Returns\n -------\n InferenceData\n A new InferenceData object with renamed dimension by default.\n When `inplace==True` perform renaming in-place and return `None`\n\n \"\"\"\n groups = self._group_names(groups, filter_groups)\n if \"chain\" in name_dict.keys() or \"draw\" in name_dict.keys():\n raise KeyError(\"'chain' or 'draw' dimensions can't be renamed\")\n\n out = self if inplace else deepcopy(self)\n for group in groups:\n dataset = getattr(self, group)\n valid_keys = set(name_dict.keys()).intersection(dataset.dims)\n dataset = dataset.rename_dims({key: name_dict[key] for key in valid_keys})\n setattr(out, group, dataset)\n if inplace:\n return None\n else:\n return out\n\n set_index = _extend_xr_method(xr.Dataset.set_index)\n get_index = _extend_xr_method(xr.Dataset.get_index)\n reset_index = _extend_xr_method(xr.Dataset.reset_index)\n set_coords = _extend_xr_method(xr.Dataset.set_coords)\n reset_coords = _extend_xr_method(xr.Dataset.reset_coords)\n assign = _extend_xr_method(xr.Dataset.assign)\n assign_coords = _extend_xr_method(xr.Dataset.assign_coords)\n sortby = _extend_xr_method(xr.Dataset.sortby)\n chunk = _extend_xr_method(xr.Dataset.chunk)\n unify_chunks = _extend_xr_method(xr.Dataset.unify_chunks)\n load = _extend_xr_method(xr.Dataset.load)\n compute = _extend_xr_method(xr.Dataset.compute)\n persist = _extend_xr_method(xr.Dataset.persist)\n\n mean = _extend_xr_method(xr.Dataset.mean)\n median = _extend_xr_method(xr.Dataset.median)\n min = _extend_xr_method(xr.Dataset.min)\n max = _extend_xr_method(xr.Dataset.max)\n cumsum = _extend_xr_method(xr.Dataset.cumsum)\n sum = _extend_xr_method(xr.Dataset.sum)\n quantile = _extend_xr_method(xr.Dataset.quantile)\n\n def _group_names(self, groups, filter_groups=None):\n \"\"\"Handle expansion of group names input across arviz.\n\n Parameters\n ----------\n groups: str, list of str or None\n group or metagroup names.\n idata: xarray.Dataset\n Posterior data in an xarray\n filter_groups: {None, \"like\", \"regex\"}, optional, default=None\n If `None` (default), interpret groups as the real group or metagroup names.\n If \"like\", interpret groups as substrings of the real group or metagroup names.\n If \"regex\", interpret groups as regular expressions on the real group or\n metagroup names. A la `pandas.filter`.\n\n Returns\n -------\n groups: list\n \"\"\"\n all_groups = self._groups_all\n if groups is None:\n return all_groups\n if isinstance(groups, str):\n groups = [groups]\n sel_groups = []\n metagroups = rcParams[\"data.metagroups\"]\n for group in groups:\n if group[0] == \"~\":\n sel_groups.extend(\n [f\"~{item}\" for item in metagroups[group[1:]] if item in all_groups]\n if group[1:] in metagroups\n else [group]\n )\n else:\n sel_groups.extend(\n [item for item in metagroups[group] if item in all_groups]\n if group in metagroups\n else [group]\n )\n\n try:\n group_names = _subset_list(sel_groups, all_groups, filter_items=filter_groups)\n except KeyError as err:\n msg = \" \".join((\"groups:\", f\"{err}\", \"in InferenceData\"))\n raise KeyError(msg)\n return group_names\n\n def map(self, fun, groups=None, filter_groups=None, inplace=False, args=None, **kwargs):\n \"\"\"Apply a function to multiple groups.\n\n Applies ``fun`` groupwise to the selected ``InferenceData`` groups and overwrites the\n group with the result of the function.\n\n Parameters\n ----------\n fun : callable\n Function to be applied to each group. Assumes the function is called as\n ``fun(dataset, *args, **kwargs)``.\n groups : str or list of str, optional\n Groups where the selection is to be applied. Can either be group names\n or metagroup names.\n filter_groups : {None, \"like\", \"regex\"}, optional\n If `None` (default), interpret var_names as the real variables names. If \"like\",\n interpret var_names as substrings of the real variables names. If \"regex\",\n interpret var_names as regular expressions on the real variables names. A la\n `pandas.filter`.\n inplace : bool, optional\n If ``True``, modify the InferenceData object inplace,\n otherwise, return the modified copy.\n args : array_like, optional\n Positional arguments passed to ``fun``.\n **kwargs : mapping, optional\n Keyword arguments passed to ``fun``.\n\n Returns\n -------\n InferenceData\n A new InferenceData object by default.\n When `inplace==True` perform selection in place and return `None`\n\n Examples\n --------\n Shift observed_data, prior_predictive and posterior_predictive.\n\n .. ipython::\n\n In [1]: import arviz as az\n ...: idata = az.load_arviz_data(\"non_centered_eight\")\n ...: idata_shifted_obs = idata.map(lambda x: x + 3, groups=\"observed_vars\")\n ...: print(idata_shifted_obs.observed_data)\n ...: print(idata_shifted_obs.posterior_predictive)\n\n Rename and update the coordinate values in both posterior and prior groups.\n\n .. ipython::\n\n In [1]: idata = az.load_arviz_data(\"radon\")\n ...: idata = idata.map(\n ...: lambda ds: ds.rename({\"g_coef\": \"uranium_coefs\"}).assign(\n ...: uranium_coefs=[\"intercept\", \"u_slope\"]\n ...: ),\n ...: groups=[\"posterior\", \"prior\"]\n ...: )\n ...: idata.posterior\n\n Add extra coordinates to all groups containing observed variables\n\n .. ipython::\n\n In [1]: idata = az.load_arviz_data(\"rugby\")\n ...: home_team, away_team = np.array([\n ...: m.split() for m in idata.observed_data.match.values\n ...: ]).T\n ...: idata = idata.map(\n ...: lambda ds, **kwargs: ds.assign_coords(**kwargs),\n ...: groups=\"observed_vars\",\n ...: home_team=(\"match\", home_team),\n ...: away_team=(\"match\", away_team),\n ...: )\n ...: print(idata.posterior_predictive)\n ...: print(idata.observed_data)\n\n \"\"\"\n if args is None:\n args = []\n groups = self._group_names(groups, filter_groups)\n\n out = self if inplace else deepcopy(self)\n for group in groups:\n dataset = getattr(self, group)\n dataset = fun(dataset, *args, **kwargs)\n setattr(out, group, dataset)\n if inplace:\n return None\n else:\n return out\n\n def _wrap_xarray_method(\n self, method, groups=None, filter_groups=None, inplace=False, args=None, **kwargs\n ):\n \"\"\"Extend and xarray.Dataset method to InferenceData object.\n\n Parameters\n ----------\n method: str\n Method to be extended. Must be a ``xarray.Dataset`` method.\n groups: str or list of str, optional\n Groups where the selection is to be applied. Can either be group names\n or metagroup names.\n inplace: bool, optional\n If ``True``, modify the InferenceData object inplace,\n otherwise, return the modified copy.\n **kwargs: mapping, optional\n Keyword arguments passed to the xarray Dataset method.\n\n Returns\n -------\n InferenceData\n A new InferenceData object by default.\n When `inplace==True` perform selection in place and return `None`\n\n Examples\n --------\n Compute the mean of `posterior_groups`:\n\n .. ipython::\n\n In [1]: import arviz as az\n ...: idata = az.load_arviz_data(\"non_centered_eight\")\n ...: idata_means = idata._wrap_xarray_method(\"mean\", groups=\"latent_vars\")\n ...: print(idata_means.posterior)\n ...: print(idata_means.observed_data)\n\n .. ipython::\n\n In [1]: idata_stack = idata._wrap_xarray_method(\n ...: \"stack\",\n ...: groups=[\"posterior_groups\", \"prior_groups\"],\n ...: sample=[\"chain\", \"draw\"]\n ...: )\n ...: print(idata_stack.posterior)\n ...: print(idata_stack.prior)\n ...: print(idata_stack.observed_data)\n\n \"\"\"\n if args is None:\n args = []\n groups = self._group_names(groups, filter_groups)\n\n method = getattr(xr.Dataset, method)\n\n out = self if inplace else deepcopy(self)\n for group in groups:\n dataset = getattr(self, group)\n dataset = method(dataset, *args, **kwargs)\n setattr(out, group, dataset)\n if inplace:\n return None\n else:\n return out\n\n\n# pylint: disable=protected-access, inconsistent-return-statements\ndef concat(*args, dim=None, copy=True, inplace=False, reset_dim=True):\n \"\"\"Concatenate InferenceData objects.\n\n Concatenates over `group`, `chain` or `draw`.\n By default concatenates over unique groups.\n To concatenate over `chain` or `draw` function\n needs identical groups and variables.\n\n The `variables` in the `data` -group are merged if `dim` are not found.\n\n\n Parameters\n ----------\n *args : InferenceData\n Variable length InferenceData list or\n Sequence of InferenceData.\n dim : str, optional\n If defined, concatenated over the defined dimension.\n Dimension which is concatenated. If None, concatenates over\n unique groups.\n copy : bool\n If True, groups are copied to the new InferenceData object.\n Used only if `dim` is None.\n inplace : bool\n If True, merge args to first object.\n reset_dim : bool\n Valid only if dim is not None.\n\n Returns\n -------\n InferenceData\n A new InferenceData object by default.\n When `inplace==True` merge args to first arg and return `None`\n\n Examples\n --------\n Use ``concat`` method to concatenate InferenceData objects. This will concatenates over\n unique groups by default. We first create an ``InferenceData`` object:\n\n .. ipython::\n\n In [1]: import arviz as az\n ...: import numpy as np\n ...: data = {\n ...: \"a\": np.random.normal(size=(4, 100, 3)),\n ...: \"b\": np.random.normal(size=(4, 100)),\n ...: }\n ...: coords = {\"a_dim\": [\"x\", \"y\", \"z\"]}\n ...: dataA = az.from_dict(data, coords=coords, dims={\"a\": [\"a_dim\"]})\n ...: dataA\n\n We have created an ``InferenceData`` object with default group 'posterior'. Now, we will\n create another ``InferenceData`` object:\n\n .. ipython::\n\n In [1]: dataB = az.from_dict(prior=data, coords=coords, dims={\"a\": [\"a_dim\"]})\n ...: dataB\n\n We have created another ``InferenceData`` object with group 'prior'. Now, we will concatenate\n these two ``InferenceData`` objects:\n\n .. ipython::\n\n In [1]: az.concat(dataA, dataB)\n\n Now, we will concatenate over chain (or draw). It requires identical groups and variables.\n Here we are concatenating two identical ``InferenceData`` objects over dimension chain:\n\n .. ipython::\n\n In [1]: az.concat(dataA, dataA, dim=\"chain\")\n\n It will create an ``InferenceData`` with the original group 'posterior'. In similar way,\n we can also concatenate over draws.\n\n \"\"\"\n # pylint: disable=undefined-loop-variable, too-many-nested-blocks\n if len(args) == 0:\n if inplace:\n return\n return InferenceData()\n\n if len(args) == 1 and isinstance(args[0], Sequence):\n args = args[0]\n\n # assert that all args are InferenceData\n for i, arg in enumerate(args):\n if not isinstance(arg, InferenceData):\n raise TypeError(\n \"Concatenating is supported only\"\n \"between InferenceData objects. Input arg {} is {}\".format(i, type(arg))\n )\n\n if dim is not None and dim.lower() not in {\"group\", \"chain\", \"draw\"}:\n msg = \"Invalid `dim`: {}. Valid `dim` are {}\".format(dim, '{\"group\", \"chain\", \"draw\"}')\n raise TypeError(msg)\n dim = dim.lower() if dim is not None else dim\n\n if len(args) == 1 and isinstance(args[0], InferenceData):\n if inplace:\n return None\n else:\n if copy:\n return deepcopy(args[0])\n else:\n return args[0]\n\n current_time = str(datetime.now())\n\n if not inplace:\n # Keep order for python 3.5\n inference_data_dict = OrderedDict()\n\n if dim is None:\n arg0 = args[0]\n arg0_groups = ccopy(arg0._groups_all)\n args_groups = dict()\n # check if groups are independent\n # Concat over unique groups\n for arg in args[1:]:\n for group in arg._groups_all:\n if group in args_groups or group in arg0_groups:\n msg = (\n \"Concatenating overlapping groups is not supported unless `dim` is defined.\"\n )\n msg += \" Valid dimensions are `chain` and `draw`.\"\n raise TypeError(msg)\n group_data = getattr(arg, group)\n args_groups[group] = deepcopy(group_data) if copy else group_data\n # add arg0 to args_groups if inplace is False\n # otherwise it will merge args_groups to arg0\n # inference data object\n if not inplace:\n for group in arg0_groups:\n group_data = getattr(arg0, group)\n args_groups[group] = deepcopy(group_data) if copy else group_data\n\n other_groups = [group for group in args_groups if group not in SUPPORTED_GROUPS_ALL]\n\n for group in SUPPORTED_GROUPS_ALL + other_groups:\n if group not in args_groups:\n continue\n if inplace:\n if group.startswith(WARMUP_TAG):\n arg0._groups_warmup.append(group)\n else:\n arg0._groups.append(group)\n setattr(arg0, group, args_groups[group])\n else:\n inference_data_dict[group] = args_groups[group]\n if inplace:\n other_groups = [\n group for group in arg0_groups if group not in SUPPORTED_GROUPS_ALL\n ] + other_groups\n sorted_groups = [\n group for group in SUPPORTED_GROUPS + other_groups if group in arg0._groups\n ]\n setattr(arg0, \"_groups\", sorted_groups)\n sorted_groups_warmup = [\n group\n for group in SUPPORTED_GROUPS_WARMUP + other_groups\n if group in arg0._groups_warmup\n ]\n setattr(arg0, \"_groups_warmup\", sorted_groups_warmup)\n else:\n arg0 = args[0]\n arg0_groups = arg0._groups_all\n for arg in args[1:]:\n for group0 in arg0_groups:\n if group0 not in arg._groups_all:\n if group0 == \"observed_data\":\n continue\n msg = \"Mismatch between the groups.\"\n raise TypeError(msg)\n for group in arg._groups_all:\n # handle data groups seperately\n if group not in [\"observed_data\", \"constant_data\", \"predictions_constant_data\"]:\n # assert that groups are equal\n if group not in arg0_groups:\n msg = \"Mismatch between the groups.\"\n raise TypeError(msg)\n\n # assert that variables are equal\n group_data = getattr(arg, group)\n group_vars = group_data.data_vars\n\n if not inplace and group in inference_data_dict:\n group0_data = inference_data_dict[group]\n else:\n group0_data = getattr(arg0, group)\n group0_vars = group0_data.data_vars\n\n for var in group0_vars:\n if var not in group_vars:\n msg = \"Mismatch between the variables.\"\n raise TypeError(msg)\n\n for var in group_vars:\n if var not in group0_vars:\n msg = \"Mismatch between the variables.\"\n raise TypeError(msg)\n var_dims = getattr(group_data, var).dims\n var0_dims = getattr(group0_data, var).dims\n if var_dims != var0_dims:\n msg = \"Mismatch between the dimensions.\"\n raise TypeError(msg)\n\n if dim not in var_dims or dim not in var0_dims:\n msg = \"Dimension {} missing.\".format(dim)\n raise TypeError(msg)\n\n # xr.concat\n concatenated_group = xr.concat((group_data, group0_data), dim=dim)\n if reset_dim:\n concatenated_group[dim] = range(concatenated_group[dim].size)\n\n # handle attrs\n if hasattr(group0_data, \"attrs\"):\n group0_attrs = deepcopy(getattr(group0_data, \"attrs\"))\n else:\n group0_attrs = OrderedDict()\n\n if hasattr(group_data, \"attrs\"):\n group_attrs = getattr(group_data, \"attrs\")\n else:\n group_attrs = dict()\n\n # gather attrs results to group0_attrs\n for attr_key, attr_values in group_attrs.items():\n group0_attr_values = group0_attrs.get(attr_key, None)\n equality = attr_values == group0_attr_values\n if hasattr(equality, \"__iter__\"):\n equality = np.all(equality)\n if equality:\n continue\n # handle special cases:\n if attr_key in (\"created_at\", \"previous_created_at\"):\n # check the defaults\n if not hasattr(group0_attrs, \"previous_created_at\"):\n group0_attrs[\"previous_created_at\"] = []\n if group0_attr_values is not None:\n group0_attrs[\"previous_created_at\"].append(group0_attr_values)\n # check previous values\n if attr_key == \"previous_created_at\":\n if not isinstance(attr_values, list):\n attr_values = [attr_values]\n group0_attrs[\"previous_created_at\"].extend(attr_values)\n continue\n # update \"created_at\"\n if group0_attr_values != current_time:\n group0_attrs[attr_key] = current_time\n group0_attrs[\"previous_created_at\"].append(attr_values)\n\n elif attr_key in group0_attrs:\n combined_key = \"combined_{}\".format(attr_key)\n if combined_key not in group0_attrs:\n group0_attrs[combined_key] = [group0_attr_values]\n group0_attrs[combined_key].append(attr_values)\n else:\n group0_attrs[attr_key] = attr_values\n # update attrs\n setattr(concatenated_group, \"attrs\", group0_attrs)\n\n if inplace:\n setattr(arg0, group, concatenated_group)\n else:\n inference_data_dict[group] = concatenated_group\n else:\n # observed_data, \"constant_data\", \"predictions_constant_data\",\n if group not in arg0_groups:\n setattr(arg0, group, deepcopy(group_data) if copy else group_data)\n arg0._groups.append(group)\n continue\n\n # assert that variables are equal\n group_data = getattr(arg, group)\n group_vars = group_data.data_vars\n\n group0_data = getattr(arg0, group)\n if not inplace:\n group0_data = deepcopy(group0_data)\n group0_vars = group0_data.data_vars\n\n for var in group_vars:\n if var not in group0_vars:\n var_data = getattr(group_data, var)\n getattr(arg0, group)[var] = var_data\n else:\n var_data = getattr(group_data, var)\n var0_data = getattr(group0_data, var)\n if dim in var_data.dims and dim in var0_data.dims:\n concatenated_var = xr.concat((group_data, group0_data), dim=dim)\n group0_data[var] = concatenated_var\n\n # handle attrs\n if hasattr(group0_data, \"attrs\"):\n group0_attrs = getattr(group0_data, \"attrs\")\n else:\n group0_attrs = OrderedDict()\n\n if hasattr(group_data, \"attrs\"):\n group_attrs = getattr(group_data, \"attrs\")\n else:\n group_attrs = dict()\n\n # gather attrs results to group0_attrs\n for attr_key, attr_values in group_attrs.items():\n group0_attr_values = group0_attrs.get(attr_key, None)\n equality = attr_values == group0_attr_values\n if hasattr(equality, \"__iter__\"):\n equality = np.all(equality)\n if equality:\n continue\n # handle special cases:\n if attr_key in (\"created_at\", \"previous_created_at\"):\n # check the defaults\n if not hasattr(group0_attrs, \"previous_created_at\"):\n group0_attrs[\"previous_created_at\"] = []\n if group0_attr_values is not None:\n group0_attrs[\"previous_created_at\"].append(group0_attr_values)\n # check previous values\n if attr_key == \"previous_created_at\":\n if not isinstance(attr_values, list):\n attr_values = [attr_values]\n group0_attrs[\"previous_created_at\"].extend(attr_values)\n continue\n # update \"created_at\"\n if group0_attr_values != current_time:\n group0_attrs[attr_key] = current_time\n group0_attrs[\"previous_created_at\"].append(attr_values)\n\n elif attr_key in group0_attrs:\n combined_key = \"combined_{}\".format(attr_key)\n if combined_key not in group0_attrs:\n group0_attrs[combined_key] = [group0_attr_values]\n group0_attrs[combined_key].append(attr_values)\n\n else:\n group0_attrs[attr_key] = attr_values\n # update attrs\n setattr(group0_data, \"attrs\", group0_attrs)\n\n if inplace:\n setattr(arg0, group, group0_data)\n else:\n inference_data_dict[group] = group0_data\n\n return None if inplace else InferenceData(**inference_data_dict)\n" ]
[ [ "numpy.dot", "numpy.product", "numpy.expand_dims", "pandas.Series", "numpy.asarray", "pandas.DataFrame", "numpy.all", "numpy.max", "numpy.argmin", "numpy.any", "numpy.zeros_like", "numpy.mean", "numpy.var", "numpy.exp", "numpy.where", "numpy.empty_like", "numpy.arange", "numpy.full", "numpy.sin", "numpy.finfo", "numpy.std", "numpy.diff", "scipy.stats.circmean", "numpy.log1p", "scipy.stats.dirichlet.rvs", "numpy.zeros", "pandas.concat", "numpy.log", "numpy.min", "numpy.isnan", "numpy.full_like", "scipy.optimize.minimize", "numpy.equal", "numpy.floor", "pandas.DataFrame.from_dict", "numpy.argsort", "numpy.array", "numpy.sum", "numpy.abs", "numpy.cos", "numpy.sort", "numpy.ndindex", "numpy.empty" ], [ "matplotlib.pyplot.GridSpec", "numpy.array", "matplotlib.pyplot.show", "matplotlib.pyplot.figure" ], [ "numpy.array", "numpy.linspace", "numpy.min", "numpy.arange", "numpy.sort", "numpy.concatenate", "numpy.full_like", "matplotlib.get_backend", "numpy.max", "numpy.zeros_like", "numpy.transpose", "numpy.ravel", "numpy.digitize", "matplotlib.pyplot.show", "numpy.zeros" ], [ "numpy.all" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [ "0.13", "1.6", "0.14", "1.10", "0.15", "1.4", "0.16", "1.9", "0.19", "1.5", "0.18", "1.2", "1.7", "0.12", "1.0", "0.17", "1.3", "1.8" ], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
NewKnowledge/gecko
[ "0d48807608a680a8debe92a7c784444a8d350494" ]
[ "Gecko/scripts/seed/GraphReconstructionKarateClub.py" ]
[ "import matplotlib\n#matplotlib.use('GTKAgg')\n\nimport os\nos.environ[\"THEANO_FLAGS\"]=\"mode=FAST_RUN,device=gpu,floatX=float32\"\n\nimport matplotlib.pyplot as plt\n\nfrom gem.utils import graph_util, plot_util\nfrom gem.evaluation import visualize_embedding as viz\nfrom gem.evaluation import evaluate_graph_reconstruction as gr\nfrom time import time\n\nfrom gem.embedding.gf import GraphFactorization\nfrom gem.embedding.hope import HOPE\nfrom gem.embedding.lap import LaplacianEigenmaps\nfrom gem.embedding.lle import LocallyLinearEmbedding\n#from gem.embedding.node2vec import node2vec\n#from gem.embedding.sdne import SDNE\n\n# File that contains the edges. Format: source target\n# Optionally, you can add weights as third column: source target weight\nedge_f = 'data/karate.edgelist'\n# Specify whether the edges are directed\nisDirected = True\n\n# Load graph\nG = graph_util.loadGraphFromEdgeListTxt(edge_f, directed=isDirected)\nG = G.to_directed()\n\nmodels = []\n# You can comment out the methods you don't want to run\nmodels.append(GraphFactorization(d=2, max_iter=100000, eta=1*10**-4, regu=1.0))\nmodels.append(HOPE(d=4, beta=0.01))\nmodels.append(LaplacianEigenmaps(d=2))\nmodels.append(LocallyLinearEmbedding(d=2))\n#models.append(node2vec(d=2, max_iter=1, walk_len=80, num_walks=10, con_size=10, ret_p=1, inout_p=1))\n#models.append(SDNE(d=2, beta=5, alpha=1e-5, nu1=1e-6, nu2=1e-6, K=3,n_units=[50, 15,], rho=0.3, n_iter=50, xeta=0.01,n_batch=500,\n# modelfile=['./intermediate/enc_model.json', './intermediate/dec_model.json'],\n# weightfile=['./intermediate/enc_weights.hdf5', './intermediate/dec_weights.hdf5']))\n\nfor embedding in models:\n print ('Num nodes: %d, num edges: %d' % (G.number_of_nodes(), G.number_of_edges()))\n t1 = time()\n # Learn embedding - accepts a networkx graph or file with edge list\n Y, t = embedding.learn_embedding(graph=G, edge_f=None, is_weighted=True, no_python=True)\n print (embedding._method_name+':\\n\\tTraining time: %f' % (time() - t1))\n # Evaluate on graph reconstruction\n MAP, prec_curv, err, err_baseline = gr.evaluateStaticGraphReconstruction(G, embedding, Y, None)\n #---------------------------------------------------------------------------------\n print((\"\\tMAP: {} \\t precision curve: {}\\n\\n\\n\\n\"+'-'*100).format(MAP,prec_curv[:5]))\n #---------------------------------------------------------------------------------\n # Visualize\n viz.plot_embedding2D(embedding.get_embedding(), di_graph=G, node_colors=None)\n plt.show()\n" ]
[ [ "matplotlib.pyplot.show" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
kilsenp/jina
[ "ba476532f41c6d0b3ec2ff2e8636811169e99f16" ]
[ "jina/drivers/multimodal.py" ]
[ "__copyright__ = \"Copyright (c) 2020 Jina AI Limited. All rights reserved.\"\n__license__ = \"Apache-2.0\"\n\nfrom collections import defaultdict\nfrom typing import Iterable, Tuple, Dict, List\n\nimport numpy as np\n\nfrom .encode import BaseEncodeDriver\nfrom ..proto import jina_pb2\nfrom ..proto.ndarray.generic import GenericNdArray\n\n\ndef _extract_doc_content(doc: 'jina_pb2.Document'):\n \"\"\"Returns the content of the document with the following priority:\n If the document has an embedding, return it, otherwise return its content.\n \"\"\"\n r = GenericNdArray(doc.embedding).value\n if r is not None:\n return r\n elif doc.text or doc.buffer:\n return doc.text or doc.buffer\n else:\n return GenericNdArray(doc.blob).value\n\n\ndef _extract_modalities_from_document(doc: 'jina_pb2.Document'):\n \"\"\"Returns a dictionary of document content (embedding, text, blob or buffer) with `modality` as its key\n \"\"\"\n doc_content = {}\n for chunk in doc.chunks:\n modality = chunk.modality\n if modality in doc_content:\n return None\n else:\n doc_content[modality] = _extract_doc_content(chunk)\n return doc_content\n\n\nclass MultiModalDriver(BaseEncodeDriver):\n \"\"\"Extract multimodal embeddings from different modalities.\n Input-Output ::\n Input:\n document:\n |- chunk: {modality: mode1}\n |\n |- chunk: {modality: mode2}\n Output:\n document: (embedding: multimodal encoding)\n |- chunk: {modality: mode1}\n |\n |- chunk: {modality: mode2}\n\n .. note::\n - It traverses on the ``documents`` for which we want to apply the ``multimodal`` embedding. This way\n we can use the `batching` capabilities for the `executor`.\n .. warning::\n - It assumes that every ``chunk`` of a ``document`` belongs to a different modality.\n \"\"\"\n\n def __init__(self,\n traversal_paths: Tuple[str] = ('r',), *args, **kwargs):\n super().__init__(traversal_paths=traversal_paths, *args, **kwargs)\n\n @property\n def positional_modality(self) -> List[str]:\n \"\"\"Get position per modality.\n :return: the list of strings representing the name and order of the modality.\n \"\"\"\n if not self._exec.positional_modality:\n raise RuntimeError('Could not know which position of the ndarray to load to each modality')\n return self._exec.positional_modality\n\n def _get_executor_input_arguments(self, content_by_modality: Dict[str, 'np.ndarray']):\n \"\"\"\n From a dictionary ``content_by_modality`` it returns the arguments in the proper order so that they can be\n passed to the executor.\n \"\"\"\n return [content_by_modality[modality] for modality in self.positional_modality]\n\n def _apply_all(\n self,\n docs: Iterable['jina_pb2.Document'],\n *args, **kwargs\n ) -> None:\n \"\"\"\n :param docs: the docs for which a ``multimodal embedding`` will be computed, whose chunks are of different\n modalities\n :return:\n \"\"\"\n content_by_modality = defaultdict(list) # array of num_rows equal to num_docs and num_columns equal to\n\n valid_docs = []\n for doc in docs:\n doc_content = _extract_modalities_from_document(doc)\n if doc_content:\n valid_docs.append(doc)\n for modality in self.positional_modality:\n content_by_modality[modality].append(doc_content[modality])\n else:\n self.logger.warning(f'Invalid doc {doc.id}. Only one chunk per modality is accepted')\n\n if len(valid_docs) > 0:\n # Pass a variable length argument (one argument per array)\n for modality in self.positional_modality:\n content_by_modality[modality] = np.stack(content_by_modality[modality])\n\n # Guarantee that the arguments are provided to the executor in its desired order\n input_args = self._get_executor_input_arguments(content_by_modality)\n embeds = self.exec_fn(*input_args)\n if len(valid_docs) != embeds.shape[0]:\n self.logger.error(\n f'mismatched {len(valid_docs)} docs from level {docs[0].granularity} '\n f'and a {embeds.shape} shape embedding, the first dimension must be the same')\n for doc, embedding in zip(valid_docs, embeds):\n GenericNdArray(doc.embedding).value = embedding\n" ]
[ [ "numpy.stack" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
fiath/test
[ "b50898dafa90e93da48f573e0b3feb1bb6acd8de", "b50898dafa90e93da48f573e0b3feb1bb6acd8de", "b50898dafa90e93da48f573e0b3feb1bb6acd8de" ]
[ "klustaviewa/views/similaritymatrixview.py", "kwiklib/dataio/spikecache.py", "galry/managers/mesh_manager.py" ]
[ "\"\"\"Correlation matrix View: show correlation matrix.\"\"\"\n\n# -----------------------------------------------------------------------------\n# Imports\n# -----------------------------------------------------------------------------\nimport numpy as np\nimport numpy.random as rdn\nfrom galry import (Manager, DefaultPaintManager, PlotInteractionManager,\n Visual, PlotPaintManager,\n GalryWidget, QtGui, QtCore, QtOpenGL, enforce_dtype, RectanglesVisual,\n TextVisual, TextureVisual)\nfrom galry.tools import hsv_to_rgb\n \nfrom kwiklib.dataio.selection import get_indices\nfrom kwiklib.dataio.tools import get_array\nfrom kwiklib.utils.colors import COLORMAP\nfrom kwiklib.utils import logger as log\nfrom klustaviewa.views.common import HighlightManager, KlustaViewaBindings, KlustaView\n\n\n# -----------------------------------------------------------------------------\n# Utility functions\n# -----------------------------------------------------------------------------\ndef colormap(x, col0=None, col1=None):\n \"\"\"Colorize a 2D grayscale array.\n \n Arguments: \n * x:an NxM array with values in [0,1].\n * col0=None: a tuple (H, S, V) corresponding to color 0. By default, a\n rainbow color gradient is used.\n * col1=None: a tuple (H, S, V) corresponding to color 1.\n \n Returns:\n * y: an NxMx3 array with a rainbow color palette.\n \n \"\"\"\n # record values to be removed\n removed = x == -1\n \n x[np.isnan(x)] = 0.\n # x -= x.min()\n # x *= (1. / x.max())\n # Set the maximum values above which the max color should be used.\n max = .1\n \n x = np.clip(x / max, 0., 1.)\n \n # Gamma correction. Doesn't look very good.\n # x = x ** .2\n\n shape = x.shape\n \n if col0 is None:\n col0 = (.67, .91, .65)\n if col1 is None:\n col1 = (0., 1., 1.)\n \n col0 = np.array(col0).reshape((1, 1, -1))\n col1 = np.array(col1).reshape((1, 1, -1))\n \n col0 = np.tile(col0, x.shape + (1,))\n col1 = np.tile(col1, x.shape + (1,))\n \n x = np.tile(x.reshape(shape + (1,)), (1, 1, 3))\n \n y = hsv_to_rgb(col0 + (col1 - col0) * x)\n \n # value of -1 = black\n y[removed,:] = 0\n # Remove diagonal.\n n = y.shape[0]\n y[xrange(n), xrange(n), :] = 0\n \n return y\n \n\n# -----------------------------------------------------------------------------\n# Data manager\n# -----------------------------------------------------------------------------\nclass SimilarityMatrixDataManager(Manager):\n def set_data(self, similarity_matrix=None,\n cluster_colors_full=None,\n clusters_hidden=[], # WARNING: relative indexing\n ):\n if similarity_matrix is None:\n similarity_matrix = np.zeros(0)\n cluster_colors_full = np.zeros(0)\n \n if similarity_matrix.size == 0:\n similarity_matrix = -np.ones((2, 2))\n elif similarity_matrix.shape[0] == 1:\n similarity_matrix = -np.ones((2, 2))\n \n self.similarity_matrix = similarity_matrix\n n = similarity_matrix.shape[0]\n \n self.texture = colormap(self.similarity_matrix.copy())\n # similarity_matrix axes are originally (x, y) from the lower left corner\n # but when displayed, they are (i, j) from the upper left corner\n # so we need to transpose\n self.texture = np.swapaxes(self.texture, 0, 1)\n \n self.clusters_unique = get_indices(cluster_colors_full)\n \n self.cluster_colors = cluster_colors_full\n self.nclusters = len(self.clusters_unique)\n \n # Remove hidden clusters.\n indices = np.array(sorted(set(range(self.nclusters)) - set(clusters_hidden)),\n dtype=np.int32)\n self.indices = indices\n \n if len(indices) >= 2:\n tex0 = self.texture.copy()\n self.texture = tex0[[[_] for _ in (indices)],indices,:]\n else:\n indices = range(self.nclusters)\n \n self.clusters_displayed = self.clusters_unique[indices]\n self.nclusters_displayed = len(indices)\n \n self.texture = self.texture[::-1,:,:]\n \n \n# -----------------------------------------------------------------------------\n# Visuals\n# -----------------------------------------------------------------------------\nclass SimilarityMatrixPaintManager(PlotPaintManager):\n def initialize(self):\n self.add_visual(TextureVisual,\n texture=self.data_manager.texture, \n name='similarity_matrix')\n\n self.add_visual(RectanglesVisual, coordinates=np.zeros((2, 4)),\n color=(1., 1., 1., .5), autonormalizable=False, name='square')\n \n self.add_visual(TextVisual, text='0', name='clusterinfo', fontsize=16,\n background_transparent=False,\n posoffset=(150., -60.),\n color=(1., 1., 1., 1.),\n letter_spacing=350.,\n depth=-1,\n visible=False)\n \n def update(self):\n self.set_data(\n texture=self.data_manager.texture, visual='similarity_matrix')\n \n\n# -----------------------------------------------------------------------------\n# Interaction\n# -----------------------------------------------------------------------------\nclass SimilarityMatrixInfoManager(Manager):\n def initialize(self):\n pass\n \n def get_closest_cluster(self, xd, yd):\n nclu = self.data_manager.nclusters_displayed\n \n cx = int((xd + 1) / 2. * nclu)\n cy = int((yd + 1) / 2. * nclu)\n \n cx_rel = np.clip(cx, 0, nclu - 1)\n cy_rel = np.clip(cy, 0, nclu - 1)\n \n return cx_rel, cy_rel\n \n def show_closest_cluster(self, xd, yd):\n \n if len(self.data_manager.clusters_displayed) == 0:\n return\n \n cx_rel, cy_rel = self.get_closest_cluster(xd, yd)\n \n cx = self.data_manager.clusters_displayed[cx_rel]\n cy = self.data_manager.clusters_displayed[cy_rel]\n \n ind = self.data_manager.indices\n matrix = self.data_manager.similarity_matrix\n matrix = matrix[ind,:][:,ind]\n \n if ((cx_rel >= matrix.shape[0]) or\n (cy_rel >= matrix.shape[1])):\n return\n \n val = matrix[cx_rel, cy_rel]\n \n text = \"%d/%d:%.3f\" % (cx, cy, val)\n \n x, y = self.interaction_manager.get_processor('navigation').get_window_coordinates(xd, yd)\n \n posoffset=(-np.sign(x) * 140., -np.sign(y) * 40.)\n \n self.paint_manager.set_data(coordinates=(xd, yd), \n text=text,\n visible=True,\n posoffset=posoffset,\n visual='clusterinfo')\n \n \nclass SimilarityMatrixInteractionManager(PlotInteractionManager):\n def initialize(self):\n # self.register('ShowClosestCluster', self.show_closest_cluster)\n self.register('SelectPair', self.select_pair)\n self.register('AddPair', self.add_pair)\n self.register('MoveSquare', self.move_square)\n self.register(None, self.hide_closest_cluster)\n \n self.square_coordinates = np.zeros((2, 4))\n self.clusters_selected = np.array([])\n \n def hide_closest_cluster(self, parameter):\n self.paint_manager.set_data(visible=False, visual='clusterinfo')\n self.paint_manager.set_data(visible=False, visual='square')\n \n def select_pair(self, parameter, add=False):\n if self.data_manager.nclusters_displayed == 0:\n return\n \n nav = self.get_processor('navigation')\n \n # window coordinates\n x, y = parameter\n # data coordinates\n xd, yd = nav.get_data_coordinates(x, y)\n \n cx_rel, cy_rel = self.info_manager.get_closest_cluster(xd, yd)\n cx = self.data_manager.clusters_displayed[cx_rel]\n cy = self.data_manager.clusters_displayed[cy_rel]\n if cx != cy:\n clusters = np.array([cx, cy])\n else:\n clusters = np.array([cx])\n \n if add:\n clusters = np.array(sorted(set(self.clusters_selected).union(\n clusters)))\n # clusters_new = clusters\n # clusters = self.clusters_selected.extend([cluster \n # for cluster in clusters_new \n # if cluster not in self.clusters_selected])\n \n self.clusters_selected = clusters\n \n # Emit signal.\n log.debug(\"Selected clusters {0:s}.\".format(str(clusters)))\n self.parent.clustersSelected.emit(clusters)\n \n def add_pair(self, parameter):\n self.select_pair(parameter, True)\n \n def show_cluster(self, clu0, clu1):\n if self.data_manager.nclusters_displayed <= 1:\n return\n \n n = self.data_manager.texture.shape[0]\n dx = 1 / float(n)\n i, j = np.digitize([clu0, clu1], self.data_manager.clusters_displayed) - 1\n x0, y0 = i * dx * 2 - 1, j * dx * 2 - 1,\n x1, y1 = (i + 1) * dx * 2 - 1, (j + 1) * dx * 2 - 1\n self.info_manager.show_closest_cluster((x0 + x1) / 2, (y0 + y1) / 2)\n \n def show_closest_cluster(self, parameter):\n if self.data_manager.nclusters_displayed <= 1:\n return\n \n nclu = self.data_manager.nclusters_displayed\n \n if nclu == 0:\n return\n \n nav = self.get_processor('navigation')\n \n # window coordinates\n x, y = parameter\n # data coordinates\n xd, yd = nav.get_data_coordinates(x, y)\n \n self.info_manager.show_closest_cluster(xd, yd)\n \n def show_square(self, clu0, clu1):\n if self.data_manager.nclusters_displayed <= 1:\n return\n \n n = self.data_manager.texture.shape[0]\n dx = 1 / float(n)\n i, j = np.digitize([clu0, clu1], self.data_manager.clusters_displayed) - 1\n x0, y0 = i * dx * 2 - 1, j * dx * 2 - 1,\n x1, y1 = (i + 1) * dx * 2 - 1, (j + 1) * dx * 2 - 1\n self.square_coordinates[0,:] = (x0, -1, x1, 1)\n self.square_coordinates[1,:] = (-1, y0, 1, y1)\n self.paint_manager.set_data(coordinates=self.square_coordinates, \n visible=True,\n visual='square')\n \n def move_square(self, parameter):\n if self.data_manager.nclusters_displayed <= 1:\n return\n \n self.show_closest_cluster(parameter)\n \n # data coordinates\n x, y = parameter\n nav = self.get_processor('navigation')\n x, y = nav.get_data_coordinates(x, y)\n \n n = self.data_manager.texture.shape[0]\n dx = 1 / float(n)\n i = np.clip(int((x + 1) / 2. * n), 0, n - 1)\n j = np.clip(int((y + 1) / 2. * n), 0, n - 1)\n\n clu0 = self.data_manager.clusters_displayed[i]\n clu1 = self.data_manager.clusters_displayed[j]\n self.show_square(clu0, clu1)\n \n \nclass SimilarityMatrixBindings(KlustaViewaBindings):\n def get_base_cursor(self):\n return 'ArrowCursor'\n \n def set_selectcluster(self):\n self.set('RightClick', 'SelectPair', #key_modifier='Shift',\n param_getter=lambda p:\n (p['mouse_position'][0], p['mouse_position'][1]))\n self.set('RightClick', 'AddPair', key_modifier='Control',\n param_getter=lambda p:\n (p['mouse_position'][0], p['mouse_position'][1]))\n \n def set_move(self):\n self.set('Move', 'MoveSquare',\n param_getter=lambda p: p['mouse_position'])\n self.set('Move', 'MoveSquare', key_modifier='Control',\n param_getter=lambda p: p['mouse_position'])\n \n def initialize(self):\n # super(SimilarityMatrixBindings, self).initialize()\n # self.set_clusterinfo()\n self.set_selectcluster()\n self.set_move()\n \n\n# -----------------------------------------------------------------------------\n# Top-level module\n# -----------------------------------------------------------------------------\nclass SimilarityMatrixView(KlustaView):\n \n # Raise the list of highlighted spike absolute indices.\n clustersSelected = QtCore.pyqtSignal(np.ndarray)\n \n def initialize(self):\n self.set_bindings(SimilarityMatrixBindings)\n self.set_companion_classes(\n paint_manager=SimilarityMatrixPaintManager,\n info_manager=SimilarityMatrixInfoManager,\n interaction_manager=SimilarityMatrixInteractionManager,\n data_manager=SimilarityMatrixDataManager,)\n \n def set_data(self, *args, **kwargs):\n # if kwargs.get('similarity_matrix', None) is None:\n # return\n self.data_manager.set_data(*args, **kwargs)\n \n # update?\n if self.initialized:\n self.paint_manager.update()\n self.updateGL()\n\n def clear(self):\n self.set_data()\n self.paint_manager.set_data(visible=False, visual='clusterinfo')\n self.paint_manager.set_data(visible=False, visual='square')\n \n def show_selection(self, clu0, clu1):\n self.interaction_manager.show_square(clu0, clu1)\n self.interaction_manager.show_cluster(clu0, clu1)\n self.updateGL()\n \n \n # def sizeHint(self):\n # return QtCore.QSize(300, 400)\n ", "\"\"\"Object-oriented interface to an experiment's data.\"\"\"\n\n# -----------------------------------------------------------------------------\n# Imports\n# -----------------------------------------------------------------------------\nimport os\nimport re\nfrom itertools import chain\nfrom collections import OrderedDict\n\nimport numpy as np\nimport pandas as pd\nimport tables as tb\n\nfrom selection import select, slice_to_indices\nfrom kwiklib.dataio.kwik import (get_filenames, open_files, close_files,\n add_spikes)\nfrom kwiklib.dataio.utils import convert_dtype\nfrom kwiklib.utils.six import (iteritems, string_types, iterkeys, \n itervalues, next)\nfrom kwiklib.utils.wrap import wrap\n\ndef _select(arr, indices):\n fm = np.empty((len(indices),) + arr.shape[1:], \n dtype=arr.dtype)\n for j, i in enumerate(indices):\n # fm[j:j+1,...] = arr[i:i+1,...]\n fm[j,...] = arr[i,...]\n return indices, fm\n\nclass SpikeCache(object):\n def __init__(self, spike_clusters=None, cache_fraction=1.,\n # nspikes=None,\n features_masks=None,\n waveforms_raw=None,\n waveforms_filtered=None):\n \n self.spike_clusters_dataset = spike_clusters\n self._update_clusters()\n \n self.nspikes = len(self.spike_clusters)\n # self.cluster_sizes = np.bincount(spike_clusters)\n self.cache_fraction = cache_fraction\n self.features_masks = features_masks\n self.waveforms_raw = waveforms_raw\n self.waveforms_filtered = waveforms_filtered\n \n self.features_masks_cached = None\n self.cache_indices = None\n \n assert self.nspikes == len(self.spike_clusters)\n if self.waveforms_raw is not None:\n assert self.waveforms_raw.shape[0] in (0, self.nspikes)\n if self.waveforms_filtered is not None:\n assert self.waveforms_filtered.shape[0] in (0, self.nspikes)\n \n assert cache_fraction > 0\n \n def _update_clusters(self):\n \"\"\"Re-load the clustering.\"\"\"\n self.spike_clusters = self.spike_clusters_dataset[:]\n \n def cache_features_masks(self, offset=0):\n if self.features_masks is None:\n return\n k = np.clip(int(1. / self.cache_fraction), 1, self.nspikes)\n # Load and save subset in feature_masks.\n self.features_masks_cached = self.features_masks[offset::k,...]\n self.cache_indices = np.arange(self.nspikes)[offset::k,...]\n self.cache_size = len(self.cache_indices)\n \n def load_features_masks_bg(self):\n if not hasattr(self, 'spikes_bg'):\n self.spikes_bg, self.features_bg = self.load_features_masks(fraction=.05)\n return self.spikes_bg, self.features_bg\n \n def load_features_masks(self, fraction=None, clusters=None):\n \"\"\"Load a subset of features & masks. \n \n Arguments:\n * fraction: fraction of spikes to load from the cache.\n * clusters: if not None, load all features & masks of all spikes in \n the selected clusters.\n \n \"\"\"\n assert fraction is not None or clusters is not None\n \n if self.features_masks is None:\n return [], []\n \n # Cache susbet of features masks and save them in an array.\n if self.features_masks_cached is None:\n self.cache_features_masks()\n \n if clusters is None:\n offset = 0\n k = np.clip(int(1. / fraction), 1, self.cache_size)\n \n # Load and save subset from cache_feature_masks.\n loaded_features_masks = self.features_masks_cached[offset::k,...]\n loaded_indices = self.cache_indices[offset::k]\n return loaded_indices, loaded_features_masks\n else:\n self._update_clusters()\n # Find the indices of all spikes in the requested clusters\n indices = np.nonzero(np.in1d(self.spike_clusters, clusters))[0]\n arr = (self.features_masks_cached \n if self.cache_fraction == 1. else self.features_masks)\n return _select(arr, indices)\n \n def load_waveforms(self, clusters=None, count=50, filtered=True):\n \"\"\"Load some waveforms from the requested clusters.\n \n Arguments:\n * clusters: list of clusters\n * count: max number of waveforms per cluster\n * filtered=True: whether to load filtered or raw waveforms\n \n \"\"\"\n assert count > 0\n \n if self.waveforms_raw is None and self.waveforms_filtered is None:\n return [], []\n \n w = self.waveforms_filtered if filtered else self.waveforms_raw\n if w is None or len(w) == 0:\n return np.array([[[]]])\n nclusters = len(clusters)\n indices = []\n self._update_clusters()\n for cluster in clusters:\n # Number of spikes to load for this cluster: count\n # but we want this number to be < cluster size, and > 10 if possible\n ind = np.nonzero(self.spike_clusters == cluster)[0]\n cluster_size = len(ind)\n if cluster_size == 0:\n continue\n nspikes = np.clip(count, min(cluster_size, 10),\n max(cluster_size, count))\n indices.append(ind[::max(1, len(ind)//nspikes)])\n # indices now contains some spike indices from the requested clusters\n if len(indices) > 0:\n indices = np.hstack(indices)\n indices = np.unique(indices)\n return _select(w, indices)\n \n \n \n ", "from galry import NavigationEventProcessor, InteractionManager, \\\n PaintManager, \\\n GridEventProcessor, scale_matrix, rotation_matrix, translation_matrix, \\\n MeshNavigationEventProcessor\nfrom default_manager import DefaultPaintManager, DefaultInteractionManager, \\\n DefaultBindings\nfrom plot_manager import PlotBindings\nimport numpy as np\n\n\ndef load_mesh(filename):\n \"\"\"Load vertices and faces from a wavefront .obj file and generate\n normals.\n \n \"\"\"\n data = np.genfromtxt(filename, dtype=[('type', np.character, 1),\n ('points', np.float32, 3)])\n\n # Get vertices and faces\n vertices = data['points'][data['type'] == 'v']\n faces = (data['points'][data['type'] == 'f']-1).astype(np.uint32)\n\n # Build normals\n T = vertices[faces]\n N = np.cross(T[::,1 ]-T[::,0], T[::,2]-T[::,0])\n L = np.sqrt(N[:,0]**2+N[:,1]**2+N[:,2]**2)\n N /= L[:, np.newaxis]\n normals = np.zeros(vertices.shape)\n normals[faces[:,0]] += N\n normals[faces[:,1]] += N\n normals[faces[:,2]] += N\n L = np.sqrt(normals[:,0]**2+normals[:,1]**2+normals[:,2]**2)\n normals /= L[:, np.newaxis]\n\n # Scale vertices such that object is contained in [-1:+1,-1:+1,-1:+1]\n vmin, vmax = vertices.min(), vertices.max()\n vertices = 2*(vertices-vmin)/(vmax-vmin) - 1\n\n return vertices, normals, faces\n\n \nclass MeshInteractionManager(DefaultInteractionManager):\n def initialize_default(self, constrain_navigation=None, momentum=None):\n super(MeshInteractionManager, self).initialize_default()\n self.add_processor(MeshNavigationEventProcessor, name='navigation')\n self.add_processor(GridEventProcessor, name='grid')\n \n \nclass MeshPaintManager(DefaultPaintManager):\n def initialize_default(self, *args, **kwargs):\n super(MeshPaintManager, self).initialize_default(*args, **kwargs)\n self.set_rendering_options(activate3D=True)\n \n \nclass MeshBindings(PlotBindings):\n def initialize(self):\n super(MeshBindings, self).initialize()\n self.set_rotation_mouse()\n self.set_rotation_keyboard()\n \n def set_panning_mouse(self):\n # Panning: CTRL + left button mouse\n self.set('LeftClickMove', 'Pan',\n # key_modifier='Control',\n param_getter=lambda p: (-4*p[\"mouse_position_diff\"][0],\n -4*p[\"mouse_position_diff\"][1]))\n \n def set_rotation_mouse(self):\n # Rotation: left button mouse\n self.set('MiddleClickMove', 'Rotation',\n param_getter=lambda p: (3*p[\"mouse_position_diff\"][0],\n 3*p[\"mouse_position_diff\"][1])) \n\n self.set('LeftClickMove', 'Rotation',\n key_modifier='Control',\n param_getter=lambda p: (3*p[\"mouse_position_diff\"][0],\n 3*p[\"mouse_position_diff\"][1])) \n \n def set_rotation_keyboard(self):\n \"\"\"Set zooming bindings with the keyboard.\"\"\"\n # Rotation: ALT + key arrows\n self.set('KeyPress', 'Rotation',\n key='Left', key_modifier='Shift', \n param_getter=lambda p: (-.25, 0))\n self.set('KeyPress', 'Rotation',\n key='Right', key_modifier='Shift', \n param_getter=lambda p: (.25, 0))\n self.set('KeyPress', 'Rotation',\n key='Up', key_modifier='Shift', \n param_getter=lambda p: (0, .25))\n self.set('KeyPress', 'Rotation',\n key='Down', key_modifier='Shift', \n param_getter=lambda p: (0, -.25))\n \n def set_zoombox_mouse(self):\n \"\"\"Deactivate zoombox.\"\"\"\n pass\n\n def set_zoombox_keyboard(self):\n \"\"\"Deactivate zoombox.\"\"\"\n pass\n \n def extend(self):\n \"\"\"Set rotation interactions with mouse and keyboard.\"\"\"\n self.set_rotation_mouse()\n self.set_rotation_keyboard()\n \n " ]
[ [ "numpy.swapaxes", "numpy.clip", "numpy.isnan", "numpy.tile", "numpy.ones", "numpy.sign", "numpy.digitize", "numpy.array", "numpy.zeros" ], [ "numpy.hstack", "numpy.nonzero", "numpy.unique", "numpy.arange", "numpy.in1d", "numpy.array" ], [ "numpy.cross", "numpy.zeros", "numpy.sqrt", "numpy.genfromtxt" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Niki92More/ga-learner-dsmp-repo
[ "5fb9c6a68e4ef8e04eb42249602cb6b2b92740d6" ]
[ "Carter/code.py" ]
[ "# --------------\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import MinMaxScaler\nimport pandas as pd\n\n# Code starts here\ndf = pd.read_csv(path)\nprint(df.head(5))\nX=df.loc[:,df.columns!='attr1089']\ny=df['attr1089']\nX_train,X_test,y_train,y_test=train_test_split(X,y,test_size = 0.3,random_state = 4)\nscaler=MinMaxScaler()\nscaler.fit(X_train)\nX_train=scaler.transform(X_train)\nX_test=scaler.transform(X_test)\n# Code ends here\n\n\n# --------------\nfrom sklearn.metrics import classification_report\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.metrics import roc_auc_score\nlr= LogisticRegression()\nlr.fit(X_train,y_train)\ny_pred = lr.predict(X_test)\nroc_score =roc_auc_score(y_test, y_pred)\nprint(roc_score)\n\n\n\n# --------------\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn import tree\ndt=DecisionTreeClassifier(random_state=4)\ndt.fit(X_train,y_train)\ny_pred=dt.predict(X_test)\nroc_score =roc_auc_score(y_test, y_pred)\nprint(roc_score)\n\n\n\n# --------------\nfrom sklearn.ensemble import RandomForestClassifier\n\n\n# Code strats here\nrfc=RandomForestClassifier(random_state=4)\nrfc.fit(X_train,y_train)\ny_pred=rfc.predict(X_test)\nroc_score =roc_auc_score(y_test, y_pred)\nprint(roc_score)\n\n\n# Code ends here\n\n\n# --------------\n# Import Bagging Classifier\nfrom sklearn.ensemble import BaggingClassifier\n\n\n# Code starts here\nbagging_clf=BaggingClassifier(base_estimator= DecisionTreeClassifier(), n_estimators=100 , max_samples=100,random_state=0)\nbagging_clf.fit(X_train,y_train)\nscore_bagging=bagging_clf.score(X_test,y_test)\nprint(score_bagging)\n# Code ends here\n\n\n# --------------\n# Import libraries\nfrom sklearn.ensemble import VotingClassifier\n\n# Various models\nclf_1 = LogisticRegression()\nclf_2 = DecisionTreeClassifier(random_state=4)\nclf_3 = RandomForestClassifier(random_state=4)\n\nmodel_list = [('lr',clf_1),('DT',clf_2),('RF',clf_3)]\n\n\n# Code starts here\n\nvoting_clf_hard=VotingClassifier(estimators=model_list,voting='hard')\nvoting_clf_hard.fit(X_train,y_train)\nhard_voting_score=voting_clf_hard.score(X_test,y_test)\nprint(hard_voting_score)\n# Code ends here\n\n\n" ]
[ [ "sklearn.metrics.roc_auc_score", "pandas.read_csv", "sklearn.linear_model.LogisticRegression", "sklearn.ensemble.RandomForestClassifier", "sklearn.model_selection.train_test_split", "sklearn.ensemble.VotingClassifier", "sklearn.tree.DecisionTreeClassifier", "sklearn.preprocessing.MinMaxScaler" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
motapaolla/AIF360
[ "fc018d007a507ee5ff10f7e2787e9d831d84e3ae" ]
[ "tests/sklearn/test_adversarial_debiasing.py" ]
[ "import numpy as np\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.metrics import accuracy_score\nimport tensorflow as tf\n\nfrom aif360.datasets import AdultDataset\nfrom aif360.sklearn.datasets import fetch_adult\nfrom aif360.algorithms.inprocessing import AdversarialDebiasing as OldAdversarialDebiasing\nfrom aif360.sklearn.inprocessing import AdversarialDebiasing\n\n\nX, y, sample_weight = fetch_adult(numeric_only=True)\nadult = AdultDataset(instance_weights_name='fnlwgt', categorical_features=[],\n features_to_keep=['age', 'education-num', 'capital-gain', 'capital-loss',\n 'hours-per-week'], features_to_drop=[])\n\ndef test_adv_debias_old_reproduce():\n \"\"\"Test that the old AdversarialDebiasing is reproducible.\"\"\"\n sess = tf.Session()\n old_adv_deb = OldAdversarialDebiasing(unprivileged_groups=[{'sex': 0}],\n privileged_groups=[{'sex': 1}],\n scope_name='old_classifier',\n sess=sess, num_epochs=5, seed=123)\n old_preds = old_adv_deb.fit_predict(adult)\n sess.close()\n tf.reset_default_graph()\n sess = tf.Session()\n old_adv_deb2 = OldAdversarialDebiasing(unprivileged_groups=[{'sex': 0}],\n privileged_groups=[{'sex': 1}],\n scope_name='old_classifier',\n sess=sess, num_epochs=5, seed=123)\n old_preds2 = old_adv_deb2.fit_predict(adult)\n sess.close()\n\n assert np.allclose(old_preds.labels, old_preds2.labels)\n\ndef test_adv_debias_old():\n \"\"\"Test that the predictions of the old and new AdversarialDebiasing match.\n \"\"\"\n tf.reset_default_graph()\n sess = tf.Session()\n old_adv_deb = OldAdversarialDebiasing(unprivileged_groups=[{'sex': 0}],\n privileged_groups=[{'sex': 1}],\n scope_name='old_classifier',\n sess=sess, num_epochs=5, seed=123)\n old_preds = old_adv_deb.fit_predict(adult)\n sess.close()\n adv_deb = AdversarialDebiasing('sex', num_epochs=5, random_state=123)\n new_preds = adv_deb.fit(X, y).predict(X)\n adv_deb.sess_.close()\n assert np.allclose(old_preds.labels.flatten(), new_preds)\n\ndef test_adv_debias_reproduce():\n \"\"\"Test that the new AdversarialDebiasing is reproducible.\"\"\"\n adv_deb = AdversarialDebiasing('sex', num_epochs=5, random_state=123)\n new_preds = adv_deb.fit(X, y).predict(X)\n adv_deb.sess_.close()\n new_acc = accuracy_score(y, new_preds)\n\n adv_deb2 = AdversarialDebiasing('sex', num_epochs=5, random_state=123)\n new_preds = adv_deb2.fit(X, y).predict(X)\n adv_deb.sess_.close()\n\n assert new_acc == accuracy_score(y, new_preds)\n\ndef test_adv_debias_intersection():\n \"\"\"Test that the new AdversarialDebiasing runs with >2 protected groups.\"\"\"\n adv_deb = AdversarialDebiasing(scope_name='intersect', num_epochs=5)\n adv_deb.fit(X, y)\n adv_deb.sess_.close()\n assert adv_deb.adversary_logits_.shape[1] == 4\n\ndef test_adv_debias_grid():\n \"\"\"Test that the new AdversarialDebiasing works in a grid search (and that\n debiasing results in reduced accuracy).\n \"\"\"\n adv_deb = AdversarialDebiasing('sex', num_epochs=10, random_state=123)\n\n params = {'debias': [True, False]}\n\n clf = GridSearchCV(adv_deb, params, cv=3)\n clf.fit(X, y)\n\n clf.best_estimator_.sess_.close()\n assert clf.best_params_ == {'debias': False}\n" ]
[ [ "sklearn.model_selection.GridSearchCV", "numpy.allclose", "tensorflow.reset_default_graph", "tensorflow.Session", "sklearn.metrics.accuracy_score" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "1.12", "1.4", "1.13", "1.5", "1.7", "0.12", "1.0", "1.2" ] } ]
nail1021734/sequence-to-sequence
[ "e591d225a108556de3eb2e4021ea024352b6ec17" ]
[ "s2s/util/_model.py" ]
[ "import os\nimport re\n\nimport torch\n\nfrom s2s.model import Model\nfrom s2s.path import EXP_PATH\n\n\ndef load_model_from_ckpt(ckpt: int, exp_name: str, model: Model) -> Model:\n exp_path = os.path.join(EXP_PATH, exp_name)\n prog = re.compile(r'model-(\\d+).pt')\n all_ckpt = list(filter(\n lambda file_name: prog.match(file_name),\n os.listdir(exp_path),\n ))\n\n # Load last checkpoint.\n if ckpt == -1:\n ckpt = max(map(\n lambda file_name: int(prog.match(file_name)[1]),\n all_ckpt,\n ))\n\n file_path = os.path.join(exp_path, f'model-{ckpt}.pt')\n\n if not os.path.exists(file_path):\n raise FileNotFoundError(f'{file_path} does not exist.')\n\n model.load_state_dict(torch.load(file_path))\n return model\n" ]
[ [ "torch.load" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
lsscecilia/GraphVisualisation
[ "7490062ac0596835620e2026a756b6be8f0667e7" ]
[ "misc/PlotInteractiveGraph.py" ]
[ "# Copyright (c) 2021 Cecilia Lee\n\nfrom bokeh.io import output_notebook, show, save\nfrom bokeh.models import Range1d, Circle, ColumnDataSource, MultiLine\nfrom bokeh.plotting import figure\nfrom bokeh.plotting import from_networkx\nimport networkx\nfrom bokeh.models import (BoxSelectTool, Circle, EdgesAndLinkedNodes, HoverTool,\n MultiLine, NodesAndLinkedEdges, Plot, Range1d, TapTool,)\nfrom bokeh.palettes import Spectral4\nimport itertools\nimport copy\nimport networkx as nx\nimport matplotlib.pyplot as plt\nimport sys, getopt\nimport re\nimport random\nimport numpy as np\n\ndef random_color():\n color = list(np.random.uniform(range(1), size=3))\n return tuple(color)\n\ndef convert_colors(values):\n convert = []\n for value in values:\n indi = []\n #print(value)\n for a in value:\n indi.append((int)(a*255))\n convert.append(tuple(indi))\n return convert\n\ndef convert_color(values):\n convert = []\n for value in values:\n convert.append((int)(value*255))\n return tuple(convert)\n\ndef parseTxtFile(name, with_coloring):\n withWeight = False\n sum=0\n g = nx.Graph()\n with open(name) as f:\n lines = f.readlines()\n color_brk = 0\n chr_dict = {}\n chr_color = {}\n node_color = {}\n if (with_coloring):\n for i in range(len(lines)):\n if (lines[i][0]=='%'):\n color_brk = int(i) + 1\n break\n v = re.split(\" |\\n\",lines[i])\n chr_dict[v[0]] = v[1]\n \n # assign random colour to dictionary\n for key, value in chr_dict.items():\n if value in chr_color:\n node_color[key] = chr_color[value]\n continue \n \n chr_color[value] = random_color()\n node_color[key] = chr_color[value]\n \n # edges\n for i in range(color_brk,len(lines)):\n if (lines[i][0]=='-'):\n brk = int(i)\n break\n wl = -1\n prev = False \n v = re.split(\"--|\\n|,\",lines[i])\n if (len(v)==4):\n #if (v[2]>=50)\n g.add_edge(v[0],v[1], Color='red', weight=v[2])\n else:\n g.add_edge(v[0],v[1], Color='red')\n \n # nodes & position\n for i in range(brk+1,len(lines)):\n if (lines[i][0]=='^'):\n brk = i + 1\n break\n \n v = re.split(\"\\n|[|]|[)]|[(]|,\",lines[i])\n if (with_coloring):\n g.add_node(v[0],X=float(v[2]), Y=float(v[3]),Color = node_color[v[0]])\n else: \n g.add_node(v[0],X=float(v[2]), Y=float(v[3]))\n \n #size\n for r in range(len(lines[brk])):\n if (lines[brk][r]==','):\n comma = r\n if (lines[brk][r]=='\\n'):\n end = r\n lines[brk]\n return [g,[int(lines[brk][:comma]), int(lines[brk][comma+1:end])], withWeight]\n\n\ndef plot(g, coor, outFile, withWeight, with_coloring):\n #Establish which categories will appear when hovering over each node\n HOVER_TOOLTIPS = [(\"Contig\", \"@index\")]\n\n #Create a plot — set dimensions, toolbar, and title\n plot = figure(tooltips = HOVER_TOOLTIPS,\n tools=\"pan,wheel_zoom,save,reset\", active_scroll='wheel_zoom',\n x_range=Range1d(-10.1, 10.1), y_range=Range1d(-10.1, 10.1), title='Graph')\n\n # Define node positions data structure (dict) for plotting\n node_positions = {node[0]: (node[1]['X'], node[1]['Y']) for node in g.nodes(data=True)}\n # Define data structure (list) of edge colors for plotting\n edge_colors = [e[2]['Color'] for e in g.edges(data=True)]\n #node colors\n\n cm = 1/2.54\n plt.figure(figsize=(coor[0]*cm, coor[1]*cm))\n if (with_coloring):\n node_colors = [node[1]['Color'] for node in g.nodes(data=True)]\n nx.draw(g, pos=node_positions,node_color=node_colors, edge_color=edge_colors, node_size=50, with_labels = True, font_size=5)\n else:\n nx.draw(g, pos=node_positions,edge_color=edge_colors, node_size=50, with_labels = True, font_size=5)\n if (withWeight):\n labels = {e: g.edges[e]['weight'] for e in g.edges}\n nx.draw_networkx_edge_labels(g,pos=node_positions, edge_labels=labels, font_size=5)\n plt.savefig(outFile, dpi=1000)\n \n # node_colors_rgb = convert_color(node_colors)\n if (with_coloring):\n for node in g.nodes(data=True):\n node[1]['node_color'] = convert_color(node[1]['Color'])\n network_graph = from_networkx(g, node_positions, scale=10, center=(0, 0))\n \n \n node_hover_tool = HoverTool(renderers=[network_graph.node_renderer], \n tooltips=[(\"Contig\", \"@index\")])\n edge_hover_tool = HoverTool(renderers=[network_graph.edge_renderer], \n tooltips=[(\"weight\", \"@weight\")])\n plot.add_tools(edge_hover_tool, node_hover_tool)\n \n network_graph.node_renderer.glyph = Circle(size=15, fill_color=\"node_color\")\n network_graph.node_renderer.selection_glyph = Circle(size=15, fill_color=Spectral4[2])\n network_graph.node_renderer.hover_glyph = Circle(size=15, fill_color=Spectral4[1])\n\n network_graph.edge_renderer.glyph = MultiLine(line_color=\"#CCCCCC\", line_alpha=0.8, line_width=5)\n network_graph.edge_renderer.selection_glyph = MultiLine(line_color=Spectral4[2], line_width=5)\n network_graph.edge_renderer.hover_glyph = MultiLine(line_color=Spectral4[1], line_width=5)\n \n network_graph.selection_policy = NodesAndLinkedEdges()\n network_graph.inspection_policy = EdgesAndLinkedNodes()\n # Add network graph to the plot\n plot.renderers.append(network_graph)\n\n\n show(plot)\n print(outFile)\n save(plot, filename=outFile+\".html\")\n\nif __name__ == \"__main__\":\n arguments = len(sys.argv)-1\n longOptions =['version', 'help', 'colour']\n options = \"vhi:n:c\"\n colour = False\n try: \n opts, args = getopt.getopt(sys.argv[1:], options, longOptions)\n except getopt.GetoptError:\n print('wrong params')\n sys.exit(1)\n for opt,arg in opts:\n if opt in (\"-c\", \"--colour\"):\n colour = True\n\n else:\n inFile = sys.argv[arguments-1]\n outFile = sys.argv[arguments]\n results = parseTxtFile(inFile, colour)\n plot(results[0], results[1], outFile, results[2], colour)\n" ]
[ [ "matplotlib.pyplot.savefig", "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ljchang/neurosynth
[ "470bae38911d8d31015016f098530a4a2e59f217" ]
[ "neurosynth/base/lexparser.py" ]
[ "\"\"\" Parsing expression grammar for feature-based study selection. \"\"\"\n\nfrom ply import lex\nfrom ply import yacc\nimport pandas as pd\nimport logging\n\nlogger = logging.getLogger('neurosynth.lexparser')\n\n\nclass Lexer(object):\n\n tokens = (\n 'WORD', 'FLOAT', 'ANDNOT', 'OR', 'AND', 'LPAR', 'RPAR', 'LT', 'RT'\n )\n\n t_ANDNOT = r'\\&\\~'\n t_AND = r'\\&'\n t_OR = r'\\|'\n t_LPAR = r'\\('\n t_RPAR = r'\\)'\n t_LT = r'\\<'\n t_RT = r'\\>'\n\n t_ignore = ' \\t'\n\n def __init__(self, dataset=None):\n self.dataset = dataset\n\n def t_WORD(self, t):\n r'[a-zA-Z\\_\\-\\*]+'\n return t\n\n def t_FLOAT(self, t):\n r'[0-9\\.]+'\n t.value = float(t.value)\n return t\n\n def build(self, **kwargs):\n self.lexer = lex.lex(module=self, optimize=1, **kwargs)\n\n def t_error(self, t):\n logger.error(\"Illegal character %s!\" % t.value[0])\n t.lexer.skip(1)\n\n def test(self, data):\n\n self.lexer.input(data)\n while True:\n tok = self.lexer.token()\n if not tok:\n break # No more input\n print(tok)\n\n\nclass Parser(object):\n\n def __init__(self, lexer, dataset, threshold=0.001, func=sum):\n\n self.lexer = lexer\n self.dataset = dataset\n self.threshold = threshold\n self.func = func\n self.tokens = lexer.tokens\n\n def p_list_andnot(self, p):\n 'list : list ANDNOT list'\n p[0] = p[1][set(p[1].index)-set(p[3].index)]\n\n def p_list_and(self, p):\n 'list : list AND list'\n p[0] = pd.concat(\n [p[1], p[3]], axis=1).dropna().apply(self.func, axis=1)\n\n def p_list_or(self, p):\n 'list : list OR list'\n p[0] = pd.concat(\n [p[1], p[3]], axis=1).fillna(0.0).apply(self.func, axis=1)\n\n def p_list_lt(self, p):\n 'list : list LT freq'\n p[0] = p[1][p[1] < p[3]]\n\n def p_list_rt(self, p):\n 'list : list RT freq'\n p[0] = p[1][p[1] >= p[3]]\n\n def p_feature_words(self, p):\n '''feature : WORD WORD\n | feature WORD'''\n p[0] = ' '.join([p[1], p[2]])\n\n def p_list_feature(self, p):\n '''list : feature\n | WORD '''\n p[0] = self.dataset.get_studies(\n features=p[1], frequency_threshold=self.threshold, func=self.func,\n return_type='weights')\n\n def p_list_expr(self, p):\n 'list : LPAR list RPAR'\n p[0] = p[2]\n\n def p_freq_float(self, p):\n 'freq : FLOAT'\n p[0] = p[1]\n\n def build(self, **kwargs):\n self.parser = yacc.yacc(module=self, **kwargs)\n\n def p_error(self, p):\n print(p)\n\n def parse(self, input):\n return self.parser.parse(input)\n" ]
[ [ "pandas.concat" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
jturne19/legus-alma
[ "e6d8c512c57cf2447dd9bc2977d27c8d31b9abed" ]
[ "science/science.py" ]
[ "#!/d/users/turner/tools/anaconda3/bin/python\n\"\"\"\nbig 'ole python script that (hopefully) goes through the entire science procedure for give band 4 and band 7 fits files\n\nnotes are given in science.md\n\n**python3**\nto run:\nipython\nexec(open('science.py').read())\n\n\"\"\"\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport astropy\nfrom astropy.io import fits\nimport os, sys, subprocess\nfrom astropy import constants as const\n\n# add scripts directory to the path\nsys.path.insert(0, '/uwpa2/turner/legus-alma/scripts/')\nfrom ds9_regions_from_sextractor import get_regions\nfrom get_sextractor_in_footprint import in_footprint\nfrom overlapping_regions import overlap\nfrom mcmc_error_bars import mcmc_error\nfrom nearest_cluster import nearest_cluster\n\n\"\"\"\nif starting with new band 4 and band 7 fits files, you need to:\n\t1. make sure to have pbcoverage files\n\t2. take global.deg.reg and make new pixel files for band 4 and band 7 \n\n\ndata1 --> results for sextractor with 5 pixels > 2 sigma\ndata2 --> results for sextractor with 5 pixels > 2.5 sigma\ndata3 --> results for sextractor with 5 pixels > 3.0 sigma\n\ndata4 --> retry sextractor 5pix > 2 sigma \ndata5 --> retry sextractor 5pix > 2.5 sigma \ndata6 --> retry sextractor 5pix > 3.0 sigma\ndata7 --> sextractor 2pix > 2 sigma\ndata8 --> sextractor 2pix > 3 sigma\n\ndata_oct23 --> using band4.ilsang.pbcor.fits & band7.ilsang.pbcor.fits | sextractor with 5 pixels > 2 sigma\ndata_oct23_2 --> using band4.ilsang.pbcor.fits & band7.ilsang.feather.fits | sextractor with 5 pixels > 2 sigma\ndata_oct23_3 --> using band4.ilsang.pbcor.fits & band7.ilsang.feather.fits | sextractor with 5 pixels > 3 sigma\n\ndata_nov13 --> redo data_oct23 but with much closer overlapping criteria\n\ndata_jan31 --> double check if fluxes are being correctly calculated from sextractor\n\t\t\t 50 contigous pixels > 2 sigma (50 pixels ~ 1/5 beam size)\n\ndata_feb5 --> same as before just now outputs the source fluxes [W/m2] in a seperate file\n\ndata_feb7 --> last one \n\ndata_feb8 --> nevermind this is the last one since overlapping_regions was set to 2.2 arcsec for the separation\n\t\t\t but want to have just 1.1 arcsec (beam size)\n\ndata_feb12 --> just kidding, this is the last one. fixed beam sizes in fluxes section\n\n\"\"\"\n# decide what you want:\n\nglobal_phot = True\t\t# perform global photometry?\nregions = True\t\t# use sextractor to get dust regions and whatnot?\nfluxes = True\t\t# calculate flux in dust regions and get slopes?\ncreate_legus_region_files = False\t\t# create ds9 region files from legus cluster catalog? (probably not necessary since those files are already on the github repo)\nclosest_clusters = True\t\t# find closest stellar clusters to dust regions?\nplot = True\t\t# do some plotting?\nbackup\t\t\t\t\t = True \t\t# backup files\nbackup_dir = 'data_feb12'\n\n\nmain_dir = '/uwpa2/turner/legus-alma/'\n\nos.chdir(main_dir + 'science')\n\n# define the band 4 and band 7 fits files to use\nb4_fits = 'band4.ilsang.pbcor.fits'\nb7_fits = 'band7.ilsang.pbcor.fits'\n# define the other fits files needed\nb4_pbcoverage = 'band4.ilsang.pb.fits'\nb7_pbcoverage = 'band7.ilsang.pb.fits'\n\n\n# defining some functions\ndef slope(x,y):\n\t\"\"\"simple function to return the slope of a log line connecting two points\n\t\"\"\"\n\treturn (np.log10(y[1]) - np.log10(y[0])) / (np.log10(x[1]) - np.log10(x[0]))\n\ndef mbb_func_freq(freq, beta, T, sr_in_images):\n\n\tC3 = 2 * const.h.value / const.c.value**2\n\tC4 = const.h.value / const.k_B.value\n\n\tbb = C3 * (freq**(3 + beta)) / (np.exp(C4*freq/T) - 1)\n\treturn bb * freq * sr_in_images\n\t\n\n# global photometry modified blackbody fit\nif global_phot:\n\tfrom modBBfit import fit\n\n\tprint('\\ndoing global photometry bidness \\n')\n\n\tos.chdir(main_dir + 'science/herschel/')\n\t# first need pixel scale of the PACS and SPIRE images. units are MJy*pix/sr. we want Jy\n\thead = fits.open('NGC0628_S500_110_SSS_111_PACSS_70.fits')[0].header\n\tcd11 = head['CD1_1']\t# deg/pix\n\tcd22 = head['CD2_2']\t# deg/pix\n\tpix_scale_sr = (-cd11 * 3600)*(cd22*3600) / 4.255e10\t# sr/pix\n\n\t# read in phot data from tables.asc\n\tnet, num_of_pix = np.loadtxt('tables.asc', usecols=[1,2], unpack=True)\n\t# total number of sr in the images\n\tsr_in_images = pix_scale_sr * num_of_pix[0]\n\t# wavelengths of the hershcel images\n\twavel = np.array([71.8, 103.0, 157.0, 252.0, 353.0, 511.0])*1.0e-6 \t# meters\n\t# convert to frequencies\n\tfreq = const.c.value/wavel \t# Hz\n\t# now convert flux from MJy*pix/sr to Jy\n\tnet = net * pix_scale_sr * 1e6\n\tflux = net * freq \t# nu*F_nu\n\n\t# calculate error in fluxes\n\tsky_sigma, n_sky = np.loadtxt('sky.sigma.temp.dat', usecols=[2,3], unpack=True)\n\tflux_err = sky_sigma * np.sqrt(num_of_pix + num_of_pix**2/n_sky)\n\tcal_err = np.array([.05, .05, .05, .07, .07, .07])*flux\n\tflux_err = flux_err * pix_scale_sr * freq * 1e6\n\tflux_err = np.sqrt(flux_err**2 + cal_err**2)*1e-26\n\n\tbeta, T = fit(freq, flux, flux_err, sr_in_images)\n\n\t# array of wavelengths for fitting\n\twfit = np.arange(50, 1e4, 10)\n\t# free-free and synchrotron\n\tff_flux0 = 12.319e-19\n\tsync_flux0 = 4.567e-19\n\t# alma freq\n\talma_freq = np.array([1.45E+11, 3.43E+11])\n\talma_wavel = const.c.value/alma_freq[0] * 1e6\n\n\tbb = mbb_func_freq(const.c.value/(wfit*1e-6), beta[0], T[0], sr_in_images)*1e-26\n\tff = ff_flux0 * (alma_wavel/wfit)**.9\n\tsync = sync_flux0 * (alma_wavel/wfit)**.2\n\n\tflux_sum = bb + ff + sync\n\n\t# alma global phot\n\tanet, anum_of_pix = np.loadtxt(main_dir+'/science/global/tables.4.asc', usecols=[1,2], unpack=True)\n\t# net is in units Jy*pix/beam\n\tb4_hdulist = fits.open(main_dir+ 'science/'+b4_fits)\n\tb4_hdu = b4_hdulist[0]\n\tb4_header = b4_hdu.header\n\tb4_bmaj = b4_header['bmaj'] * 3600.0\n\tb4_bmin = b4_header['bmin'] * 3600.0\n\n\tb7_hdulist = fits.open(main_dir+'science/'+b7_fits)\n\tb7_hdu = b7_hdulist[0]\n\tb7_header = b7_hdu.header\n\tb7_bmaj = b7_header['bmaj'] * 3600.0 \n\tb7_bmin = b7_header['bmin'] * 3600.0 \n\n\tbeams = np.array([ np.pi/4.0 *b4_bmaj * b4_bmin, np.pi/4.0 * b7_bmaj*b7_bmin])\n\tpixel_size = 0.06**2\n\tpix_per_beam = beams/pixel_size\n\n\talma_flux = anet / pix_per_beam\t# Jy \n\n\tprint('\\nband7 flux = %1.5f Jy\\n'%alma_flux[1])\n\tprint('\\nband4 flux = %1.5f Jy\\n'%alma_flux[0])\n\n\t# save alma data\n\tnp.savetxt(main_dir+'science/global/alma_global_flux.dat',np.transpose([const.c.value/alma_freq * 1e6, alma_flux*alma_freq*1e-26]), header='wavelength (micron) \\t flux (W/m2)' )\n\t# save herschel data\n\tnp.savetxt('herschel_flux.dat', np.transpose([const.c.value/freq * 1e6, flux*1e-26, flux_err]), header='wavelength (micron) \\t flux (W/m2) \\t 1 sigma error')\n\t# save bb, ff, and sync data\n\tnp.savetxt('radiation.dat', np.transpose([wfit, bb, ff, sync, flux_sum]), header='wavelength (micron) \\t BB (W/m2) \\t F-F flux (W/m2) \\t Synchrotron (W/m2) \\t BB+FF+Sync (W/m2)')\n\tnp.savetxt('bb_params.dat', [beta, T], header='best fit parameters for the modified blackbody fit \\nbeta, +error, -error \\ntemperature, +error, -error')\n\n\t# for testing how things look:\n\t# read in data from the things above\n\twavel, bb, ff, sync, total = np.loadtxt(main_dir+'science/herschel/radiation.dat', unpack=True)\n\thwavel, hflux, herr = np.loadtxt(main_dir+'science/herschel/herschel_flux.dat', unpack=True)\n\tawavel, aflux = np.loadtxt(main_dir + 'science/global/alma_global_flux.dat', unpack=True)\n\n\tplt.figure()\n\tplt.loglog(hwavel, hflux, 'ko')\n\tplt.loglog(awavel, aflux, 'ro')\n\tplt.loglog(wavel, total, 'k-')\n\tplt.xlabel('Wavelength (micron)')\n\tplt.ylabel(r'Flux (W/m$^2$)')\n\tplt.show()\n\n\nif regions:\n\n\tprint('\\ndoing dust region bidness \\n')\n\n\tos.chdir(main_dir + 'science/')\n\t# read in band 4 header and data \n\tb4_hdulist = fits.open(b4_fits)\n\tb4_hdu = b4_hdulist[0]\n\tb4_header = b4_hdu.header\n\tb4_data = b4_hdu.data\n\t\n\tb4_bmaj = b4_header['bmaj'] * 3600.0 \t# restoring beam major axis in arcsec\n\tb4_bmin = b4_header['bmin'] * 3600.0 \t# restoring beam minor axis in arcsec\n\t\n\t\n\t# read in band 7 header and data\n\tb7_hdulist = fits.open(b7_fits)\n\tb7_hdu = b7_hdulist[0]\n\tb7_header = b7_hdu.header\n\tb7_data = b7_hdu.data\n\t\n\tb7_bmaj = b7_header['bmaj'] * 3600.0 \t# restoring beam major axis in arcsec\n\tb7_bmin = b7_header['bmin'] * 3600.0 \t# restoring beam minor axis in arcsec\n\t\n\t# use sextractor to extract the dust regions\n\t# need to run sextractor from physics network computer like uwpa\n\tb4_sexcmd = 'sex ../%s -c config.sex -catalog_name band4.cat -detect_minarea 50 -detect_thresh 1.0 -analysis_thresh 1.0 -seeing_FWHM %1.2f -pixel_scale 0.06 -back_type manual -back_value 0.0'%(b4_fits, b4_bmaj)\n\tb7_sexcmd = 'sex ../%s -c config.sex -catalog_name band7.cat -detect_minarea 50 -detect_thresh 1.0 -analysis_thresh 1.0 -seeing_FWHM %1.2f -pixel_scale 0.06 -back_type manual -back_value 0.0'%(b7_fits, b7_bmaj)\n\t\n\t# need to run sextractor from extract directory with the config files and default params things\n\tos.chdir(main_dir+'science/extract')\n\t\n\t# run sextractor commands\n\ttry:\n\t\tsubprocess.call(b4_sexcmd.split())\n\t\tsubprocess.call(b7_sexcmd.split())\n\texcept OSError as e:\n\t\tprint(e)\n\t\n\t# make new .cats that only have regions that lie within the 80% pbcoverage\n\tin_footprint('band4.cat', '../'+b4_pbcoverage)\n\tin_footprint('band7.cat', '../'+b7_pbcoverage)\n\t\n\t# next step is to make ds9 regions out of the sextractor .cat\n\tget_regions('band4.in_footprint.cat')\n\tget_regions('band7.in_footprint.cat')\n\t# outputs band4.in_footprint.reg and band7.in_footprint.reg\n\tdone = False\n\twhile not done:\n\t\tcheck = input('Did you open the in_footprint.reg files and then save them as degree region files? [y/n] ')\n\t\tif check == 'y'or check == 'yes' or check == 'Y' or check == 'Yes' or check == 'YES' or check == 'yeet' or check == 'YEET':\n\t\t\t# need to open band4.in_footprint.reg and band7.in_footprint.reg in ds9 and save as degree region files\n\t\t\toverlap('band4.in_footprint.deg.reg', 'band7.in_footprint.deg.reg', sep=1.1)\n\t\t\t# outputs band4.overlap.deg.reg and band7.overlap.deg.reg\n\t\t\tdone = True\n\t\telse:\n\t\t\tprint('\\nwell do it\\n')\n\nif fluxes:\n\n\tprint('\\ndoing flux continuum slope bidness \\n')\n\n\tos.chdir(main_dir + 'science/extract')\n\n\t# grab fluxes in regions\n\t# need the band frequencies\n\t#\t\t\t\t band 4, band 7\n\tfreq = np.array([1.45E+11, 3.43E+11])\n\twavel = const.c.value/freq\n\t\n\t\n\tb4_flux, b4_fluxerr, b4_bg = np.loadtxt('band4.overlap.cat', usecols=[0,1,3], unpack=True)\n\tb7_flux, b7_fluxerr, b7_bg = np.loadtxt('band7.overlap.cat', usecols=[0,1,3], unpack=True)\n\t\n\tos.chdir(main_dir + 'science/')\n\t\n\t# these are the wrong beam sizes (< 0.2% difference)\n\t# b4_bmaj = 1.12562286853788\n\t# b4_bmin = 1.07750606536872\n\t# b7_bmaj = 1.11270332336436\n\t# b7_bmin = 1.04236483573908\n\n\t# flux from sextractor is in Jy pix/beam so need to get rid of beam business\n\tbeams = np.array([ np.pi/4.0 * b4_bmaj * b4_bmin, np.pi/4.0 * b7_bmaj*b7_bmin])\n\tpixel_size = 0.06**2\n\tpix_per_beam = beams/pixel_size\n\t\n\t# Jy\n\tflux = np.array([ b4_flux/pix_per_beam[0], b7_flux/pix_per_beam[1] ])\n\tflux_err = np.array([ b4_fluxerr/pix_per_beam[0], b7_fluxerr/pix_per_beam[1] ])\n\n\t# Jy to W/m2\n\tfWm2 = np.array([ flux[0] * 1e-26 * freq[0], flux[1] * 1e-26 * freq[1] ])\n\tefWm2 = np.array([ flux_err[0] * 1e-26 * freq[0], flux_err[1] * 1e-26 * freq[1]])\n\n\t# output\n\tf = open('source_fluxes.dat', 'w')\n\tf.write(f'{\"# F(0.87mm) W/m2\":>16}')\n\tf.write(' ')\n\tf.write(f'{\"Error\":>11}')\n\tf.write(' ')\n\tf.write(f'{\"F(2.1mm) W/m2\":>13}')\n\tf.write(' ')\n\tf.write(f'{\"Error\":>11}')\n\tf.write('\\n')\n\n\tfor i in range(len(fWm2[0])):\n\t\tf.write(f'{\"%1.5e\"%fWm2[1][i]:>16}')\n\t\tf.write(' ')\n\t\tf.write(f'{\"%1.5e\"%efWm2[1][i]:>11}')\n\t\tf.write(' ')\n\t\tf.write(f'{\"%1.5e\"%fWm2[0][i]:>13}')\n\t\tf.write(' ')\n\t\tf.write(f'{\"%1.5e\"%efWm2[0][i]:>11}')\n\t\tf.write('\\n')\n\n\tf.close()\n\n\t# simple calculation of slopes\n\tslopes = slope(freq, flux)\n\t\n\t# mcmc calculation of slopes and the standard deviations on those slopes\n\terr_params = np.zeros([len(flux[0]), 3])\n\t\n\tfor i in range(len(flux[0])):\n\t\ty = np.array([flux[0][i], flux[1][i]])*1e-26\n\t\terr = np.array([flux_err[0][i], flux_err[1][i]])*1e-26\n\t\n\t\terr_params[i] = mcmc_error(slope, wavel, y, err)\n\t\n\tnp.savetxt('slopes+errs.dat', err_params, header='mean slope \\t std dev \\t median slope')\n\nif create_legus_region_files:\n\n\tprint('\\nturning legus clusters into ds9 region files \\n')\n\n\tos.chdir(main_dir + 'science/legus')\n\n\tra, dec, cl = np.loadtxt('hlsp_legus_hst_acs-wfc3_ngc628-c_multiband_v1_padagb-mwext-avgapcor.tab', usecols=[3,4,33], unpack=True)\n\tall_clusters = np.loadtxt('hlsp_legus_hst_acs-wfc3_ngc628-c_multiband_v1_padagb-mwext-avgapcor.tab')\n\n\t# first get rid of classes 0 and 4\n\tw = np.where((cl != 0) & (cl != 4))[0]\n\tra = ra[w]\n\tdec = dec[w]\n\n\tall_clusters = all_clusters[w]\n\n\t# output these star clusters as a ds9 degree region file\n\tf = open('ngc628-c_clusters_class123.deg.reg w')\n\tf.write('fk5\\n')\n\n\tfor i in range(len(ra)):\n\t\tf.write('point(%1.6f,%1.6f) # point=X\\n'%(ra[i], dec[i]))\n\n\tf.close()\n\n\t# write out new cluster catalog file with just classes 1,2,3\n\tnp.savetxt('ngc628-c_clusters_class123.cat', all_clusters, delimiter='\\t')\n\n\nif closest_clusters:\n\n\tprint('\\nfinding the closest stellar clusters to the dust blobs \\n')\n\n\tos.chdir(main_dir + 'science')\n\tdustcoords, starcoords, age, mass, excess = nearest_cluster('extract/band4.overlap.deg.reg')\n\n\t# calculate angular separations\n\tang_sep = np.array([ dustcoords[i].separation(starcoords[i]).arcsec for i in range(len(dustcoords))])\n\t# calculate physical separations in pc\n\tphys_sep = np.array([ ang*10e6 / 206265 for ang in ang_sep ])\n\n\tage_avg = np.array([ np.mean(a) for a in age ])\n\tmass_avg = np.array([ np.mean(m) for m in mass ])\n\texcess_avg = np.array([ np.mean(e) for e in excess])\n\tphys_sep_avg = np.array([ np.mean(p) for p in phys_sep ])\n\tang_sep_avg = np.array([ np.mean(a) for a in ang_sep])\n\n\tage_min = np.array([ np.min(a) for a in age ])\n\tmass_min = np.array([ np.min(m) for m in mass ])\n\texcess_min = np.array([ np.min(e) for e in excess ])\n\tphys_sep_min = np.array([ np.min(p) for p in phys_sep])\n\tang_sep_min = np.array([ np.min(a) for a in ang_sep])\n\n\tnp.savetxt('closest_clusters_props.average.dat', np.transpose([ang_sep_avg, phys_sep_avg, age_avg, mass_avg, excess_avg]), header='ang sep (arsec) \\t physical sep (pc) \\t age (yr) \\t mass (solar mass) \\t E(B-V)')\n\tnp.savetxt('closest_clusters_props.minimum.dat', np.transpose([ang_sep_min, phys_sep_min, age_min, mass_min, excess_min]), header='ang sep (arsec) \\t physical sep (pc) \\t age (yr) \\t mass (solar mass) \\t E(B-V)')\n\nif plot:\n\n\tprint('\\ndoing some plotting bidness \\n')\n\n\tang_sep_avg, phys_sep_avg, age_avg, mass_avg, excess_avg = np.loadtxt('closest_clusters_props.average.dat', unpack=True)\n\tang_sep_min, phys_sep_min, age_min, mass_min, excess_min = np.loadtxt('closest_clusters_props.minimum.dat', unpack=True)\n\tslopes = np.loadtxt('slopes+errs.dat', usecols=[0])\n\n\tplt.figure()\n\tplt.semilogx(age_avg, slopes, 'ro')\n\t# plt.loglog(age_avg, np.abs(slopes), 'ro')\n\tplt.xlabel('Age (years)')\n\tplt.ylabel('Dust Continuum Slope')\n\tplt.title('3 closest clusters averaged')\n\tplt.savefig('figs/age_avg.png')\n\t# plt.show()\n\n\tplt.figure()\n\tplt.semilogx(age_min, slopes, 'ro')\n\tplt.xlabel('Age (years)')\n\tplt.ylabel('Dust Continuum Slope')\n\tplt.title('closest cluster')\n\tplt.savefig('figs/age_min.png')\n\t# plt.show()\n\n\tplt.figure()\n\tplt.semilogx(mass_avg, slopes, 'ro')\n\t# plt.loglog(mass_avg, -slopes, 'ro')\t\n\tplt.xlabel('Mass (solar masses)')\n\tplt.ylabel('Dust Continuum Slope')\n\tplt.title('3 closest clusters averaged')\n\tplt.savefig('figs/mass_avg.png')\n\t# plt.show()\n\n\tplt.figure()\n\tplt.semilogx(mass_min, slopes, 'ro')\n\tplt.xlabel('Mass (solar masses)')\n\tplt.ylabel('Dust Continuum Slope')\n\tplt.title('closest cluster')\n\tplt.savefig('figs/mass_min.png')\n\t# plt.show()\n\n\tplt.figure()\n\tplt.semilogx(excess_avg, slopes, 'ro')\n\t# plt.loglog(excess_avg, -slopes, 'ro')\n\tplt.xlabel('E(B-V)')\n\tplt.ylabel('Dust Continuum Slope')\n\tplt.title('3 closest clusters averaged')\n\tplt.savefig('figs/excess_avg.png')\n\t# plt.show()\n\n\tplt.figure()\n\tplt.semilogx(excess_min, slopes, 'ro')\n\tplt.xlabel('E(B-V)')\n\tplt.ylabel('Dust Continuum Slope')\n\tplt.title('closest cluster')\n\tplt.savefig('figs/excess_min.png')\n\t# plt.show()\n\n\tplt.figure()\n\tplt.semilogy(phys_sep_avg, age_avg, 'bo')\n\tplt.xlabel('Physical Separation (pc)')\n\tplt.ylabel('Age (yr)')\n\tplt.title('3 closest clusters averaged')\n\tplt.savefig('figs/age_sep_avg.png')\n\n\tplt.figure()\n\tplt.semilogy(phys_sep_min, age_min, 'bo')\n\tplt.xlabel('Physical Separation (pc)')\n\tplt.ylabel('Age (yr)')\n\tplt.title('closest cluster')\n\tplt.savefig('figs/age_sep_min.png')\n\n\tplt.figure()\n\tplt.semilogx(phys_sep_min, slopes, 'ro')\n\tplt.xlabel('Physical Separation (pc)')\n\tplt.ylabel('Dust Continuum Slope')\n\tplt.title('closest cluster')\n\tplt.savefig('figs/slope_vs_sep.png')\n\nif backup:\n\n\tos.chdir(main_dir + 'science')\n\t# save relevant files in new directory\n\n\textract_files = 'cp extract/band4.cat extract/band4.in_footprint.cat extract/band4.in_footprint.deg.reg extract/band4.overlap.cat extract/band4.overlap.deg.reg extract/band7.cat extract/band7.in_footprint.cat extract/band7.in_footprint.deg.reg extract/band7.overlap.cat extract/band7.overlap.deg.reg '+backup_dir\n\therschel_files = 'cp herschel/bb_params.dat herschel/herschel_flux.dat herschel/radiation.dat herschel/sky.sigma.temp.dat '+backup_dir # also tables.asc renamed to tables.herschel.asc\n\tglobal_files = 'cp global/alma_global_flux.dat '+backup_dir # also tables.asc renamed to tables.alma.asc\n\tfiles = 'cp slopes+errs.dat all_clusters.dat closest_clusters_props.average.dat closest_clusters_props.minimum.dat source_fluxes.dat '+backup_dir #also figs directory copied\n\n\ttry:\n\t\tsubprocess.call(['mkdir', '-p', backup_dir])\n\texcept:\n\t\tprint('failed to make backup directory')\n\ttry:\n\t\tsubprocess.call(extract_files.split())\n\texcept:\n\t\tprint('failed to copy extract directory files')\n\ttry:\n\t\tsubprocess.call(herschel_files.split())\n\texcept:\n\t\tprint('failed to copy herschel directory files')\n\ttry:\n\t\tsubprocess.call(['cp', 'herschel/tables.asc', backup_dir+'/tables.herschel.asc'])\n\texcept:\n\t\tprint('failed to copy herschel tables')\n\ttry:\n\t\tsubprocess.call(global_files.split())\n\texcept:\n\t\tprint('failed to copy global flux files')\n\ttry:\n\t\tsubprocess.call(['cp', 'global/tables.4.asc', backup_dir+'/tables.alma.asc'])\n\texcept:\n\t\tprint('failed to global tables')\n\ttry:\n\t\tsubprocess.call(files.split())\n\texcept:\n\t\tprint('failed to copy main directory files')\n\ttry:\n\t\tsubprocess.call(['cp', '-r', 'figs', backup_dir+'/'])\n\texcept:\n\t\tprint('failed to copy the figs directory')\n" ]
[ [ "numpy.sqrt", "matplotlib.pyplot.loglog", "numpy.mean", "numpy.exp", "numpy.where", "numpy.arange", "matplotlib.pyplot.figure", "matplotlib.pyplot.semilogx", "matplotlib.pyplot.title", "numpy.min", "matplotlib.pyplot.savefig", "numpy.log10", "numpy.transpose", "numpy.savetxt", "matplotlib.pyplot.show", "numpy.array", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.semilogy", "matplotlib.pyplot.xlabel", "numpy.loadtxt" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
1ucky40nc3/models
[ "1933222e454f0d2ab8582e48fcc46f26c36ace87", "192ae544169c1230c21141c033800aa1bd94e9b6", "192ae544169c1230c21141c033800aa1bd94e9b6" ]
[ "official/nlp/keras_nlp/layers/fast_attention_util.py", "official/vision/beta/projects/simclr/heads/simclr_head.py", "research/object_detection/meta_architectures/deepmac_meta_arch_test.py" ]
[ "# coding=utf-8\n# Copyright 2021 The Google Research Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Keras-based einsum layer.\n\nCopied from\nhttps://github.com/tensorflow/models/blob/master/official/nlp/modeling/layers/dense_einsum.py.\n\"\"\"\n\n# Copied from:\n# https://github.com/google-research/google-research/blob/master/performer/fast_attention/tensorflow/util.py\n\nimport tensorflow as tf\n\n_CHR_IDX = [\"a\", \"b\", \"c\", \"d\", \"e\", \"f\", \"g\", \"h\", \"i\", \"j\", \"k\", \"l\", \"m\"]\n\n\[email protected]_keras_serializable(package=\"Text\")\nclass DenseEinsum(tf.keras.layers.Layer):\n \"\"\"A densely connected layer that uses tf.einsum as the backing computation.\n\n This layer can perform einsum calculations of arbitrary dimensionality.\n\n Arguments:\n output_shape: Positive integer or tuple, dimensionality of the output space.\n num_summed_dimensions: The number of dimensions to sum over. Standard 2D\n matmul should use 1, 3D matmul should use 2, and so forth.\n activation: Activation function to use. If you don't specify anything, no\n activation is applied\n (ie. \"linear\" activation: `a(x) = x`).\n use_bias: Boolean, whether the layer uses a bias vector.\n kernel_initializer: Initializer for the `kernel` weights matrix.\n bias_initializer: Initializer for the bias vector.\n kernel_regularizer: Regularizer function applied to the `kernel` weights\n matrix.\n bias_regularizer: Regularizer function applied to the bias vector.\n activity_regularizer: Regularizer function applied to the output of the\n layer (its \"activation\")..\n kernel_constraint: Constraint function applied to the `kernel` weights\n matrix.\n bias_constraint: Constraint function applied to the bias vector.\n Input shape:\n N-D tensor with shape: `(batch_size, ..., input_dim)`. The most common\n situation would be a 2D input with shape `(batch_size, input_dim)`.\n Output shape:\n N-D tensor with shape: `(batch_size, ..., units)`. For instance, for a 2D\n input with shape `(batch_size, input_dim)`, the output would have shape\n `(batch_size, units)`.\n \"\"\"\n\n def __init__(self,\n output_shape,\n num_summed_dimensions=1,\n activation=None,\n use_bias=True,\n kernel_initializer=\"glorot_uniform\",\n bias_initializer=\"zeros\",\n kernel_regularizer=None,\n bias_regularizer=None,\n activity_regularizer=None,\n kernel_constraint=None,\n bias_constraint=None,\n **kwargs):\n super(DenseEinsum, self).__init__(**kwargs)\n self._output_shape = output_shape if isinstance(\n output_shape, (list, tuple)) else (output_shape,)\n self._activation = tf.keras.activations.get(activation)\n self._use_bias = use_bias\n self._kernel_initializer = tf.keras.initializers.get(kernel_initializer)\n self._bias_initializer = tf.keras.initializers.get(bias_initializer)\n self._kernel_regularizer = tf.keras.regularizers.get(kernel_regularizer)\n self._bias_regularizer = tf.keras.regularizers.get(bias_regularizer)\n self._kernel_constraint = tf.keras.constraints.get(kernel_constraint)\n self._bias_constraint = tf.keras.constraints.get(bias_constraint)\n self._num_summed_dimensions = num_summed_dimensions\n self._einsum_string = None\n\n def _build_einsum_string(self, free_input_dims, bound_dims, output_dims):\n input_str = \"\"\n kernel_str = \"\"\n output_str = \"\"\n letter_offset = 0\n for i in range(free_input_dims):\n char = _CHR_IDX[i + letter_offset]\n input_str += char\n output_str += char\n\n letter_offset += free_input_dims\n for i in range(bound_dims):\n char = _CHR_IDX[i + letter_offset]\n input_str += char\n kernel_str += char\n\n letter_offset += bound_dims\n for i in range(output_dims):\n char = _CHR_IDX[i + letter_offset]\n kernel_str += char\n output_str += char\n\n return input_str + \",\" + kernel_str + \"->\" + output_str\n\n def build(self, input_shape):\n input_shape = tf.TensorShape(input_shape)\n input_rank = input_shape.rank\n free_input_dims = input_rank - self._num_summed_dimensions\n output_dims = len(self._output_shape)\n\n self._einsum_string = self._build_einsum_string(free_input_dims,\n self._num_summed_dimensions,\n output_dims)\n\n # This is only saved for testing purposes.\n self._kernel_shape = (\n input_shape[free_input_dims:].concatenate(self._output_shape))\n\n self._kernel = self.add_weight(\n \"kernel\",\n shape=self._kernel_shape,\n initializer=self._kernel_initializer,\n regularizer=self._kernel_regularizer,\n constraint=self._kernel_constraint,\n dtype=self.dtype,\n trainable=True)\n if self._use_bias:\n self._bias = self.add_weight(\n \"bias\",\n shape=self._output_shape,\n initializer=self._bias_initializer,\n regularizer=self._bias_regularizer,\n constraint=self._bias_constraint,\n dtype=self.dtype,\n trainable=True)\n else:\n self._bias = None\n super(DenseEinsum, self).build(input_shape)\n\n def get_config(self):\n config = {\n \"output_shape\":\n self._output_shape,\n \"num_summed_dimensions\":\n self._num_summed_dimensions,\n \"activation\":\n tf.keras.activations.serialize(self._activation),\n \"use_bias\":\n self._use_bias,\n \"kernel_initializer\":\n tf.keras.initializers.serialize(self._kernel_initializer),\n \"bias_initializer\":\n tf.keras.initializers.serialize(self._bias_initializer),\n \"kernel_regularizer\":\n tf.keras.regularizers.serialize(self._kernel_regularizer),\n \"bias_regularizer\":\n tf.keras.regularizers.serialize(self._bias_regularizer),\n \"activity_regularizer\":\n tf.keras.regularizers.serialize(self._activity_regularizer),\n \"kernel_constraint\":\n tf.keras.constraints.serialize(self._kernel_constraint),\n \"bias_constraint\":\n tf.keras.constraints.serialize(self._bias_constraint)\n }\n base_config = super(DenseEinsum, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n def call(self, inputs):\n ret = tf.einsum(self._einsum_string, inputs, self._kernel)\n if self._use_bias:\n ret += self._bias\n if self._activation is not None:\n ret = self._activation(ret)\n return ret", "# Copyright 2021 The TensorFlow Authors. All Rights Reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n\r\n# Copyright 2020 The TensorFlow Authors. All Rights Reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# ==============================================================================\r\n\"\"\"Dense prediction heads.\"\"\"\r\n\r\nfrom typing import Text, Optional\r\n\r\nimport tensorflow as tf\r\n\r\nfrom official.vision.beta.projects.simclr.modeling.layers import nn_blocks\r\n\r\nregularizers = tf.keras.regularizers\r\nlayers = tf.keras.layers\r\n\r\n\r\[email protected]_keras_serializable(package='simclr')\r\nclass ProjectionHead(tf.keras.layers.Layer):\r\n \"\"\"Projection head.\"\"\"\r\n\r\n def __init__(\r\n self,\r\n num_proj_layers: int = 3,\r\n proj_output_dim: Optional[int] = None,\r\n ft_proj_idx: int = 0,\r\n kernel_initializer: Text = 'VarianceScaling',\r\n kernel_regularizer: Optional[regularizers.Regularizer] = None,\r\n bias_regularizer: Optional[regularizers.Regularizer] = None,\r\n use_sync_bn: bool = False,\r\n norm_momentum: float = 0.99,\r\n norm_epsilon: float = 0.001,\r\n **kwargs):\r\n \"\"\"The projection head used during pretraining of SimCLR.\r\n\r\n Args:\r\n num_proj_layers: `int` number of Dense layers used.\r\n proj_output_dim: `int` output dimension of projection head, i.e., output\r\n dimension of the final layer.\r\n ft_proj_idx: `int` index of layer to use during fine-tuning. 0 means no\r\n projection head during fine tuning, -1 means the final layer.\r\n kernel_initializer: kernel_initializer for convolutional layers.\r\n kernel_regularizer: tf.keras.regularizers.Regularizer object for Conv2D.\r\n Default to None.\r\n bias_regularizer: tf.keras.regularizers.Regularizer object for Conv2d.\r\n Default to None.\r\n use_sync_bn: if True, use synchronized batch normalization.\r\n norm_momentum: `float` normalization omentum for the moving average.\r\n norm_epsilon: `float` small float added to variance to avoid dividing by\r\n zero.\r\n **kwargs: keyword arguments to be passed.\r\n \"\"\"\r\n super(ProjectionHead, self).__init__(**kwargs)\r\n\r\n assert proj_output_dim is not None or num_proj_layers == 0\r\n assert ft_proj_idx <= num_proj_layers, (num_proj_layers, ft_proj_idx)\r\n\r\n self._proj_output_dim = proj_output_dim\r\n self._num_proj_layers = num_proj_layers\r\n self._ft_proj_idx = ft_proj_idx\r\n self._kernel_initializer = kernel_initializer\r\n self._kernel_regularizer = kernel_regularizer\r\n self._bias_regularizer = bias_regularizer\r\n self._use_sync_bn = use_sync_bn\r\n self._norm_momentum = norm_momentum\r\n self._norm_epsilon = norm_epsilon\r\n self._layers = []\r\n\r\n def get_config(self):\r\n config = {\r\n 'proj_output_dim': self._proj_output_dim,\r\n 'num_proj_layers': self._num_proj_layers,\r\n 'ft_proj_idx': self._ft_proj_idx,\r\n 'kernel_initializer': self._kernel_initializer,\r\n 'kernel_regularizer': self._kernel_regularizer,\r\n 'bias_regularizer': self._bias_regularizer,\r\n 'use_normalization': self._use_normalization,\r\n 'norm_momentum': self._norm_momentum,\r\n 'norm_epsilon': self._norm_epsilon\r\n }\r\n base_config = super(ProjectionHead, self).get_config()\r\n return dict(list(base_config.items()) + list(config.items()))\r\n\r\n def build(self, input_shape):\r\n self._layers = []\r\n if self._num_proj_layers > 0:\r\n intermediate_dim = int(input_shape[-1])\r\n for j in range(self._num_proj_layers):\r\n if j != self._num_proj_layers - 1:\r\n # for the middle layers, use bias and relu for the output.\r\n layer = nn_blocks.DenseBN(\r\n output_dim=intermediate_dim,\r\n use_bias=True,\r\n use_normalization=True,\r\n activation='relu',\r\n kernel_initializer=self._kernel_initializer,\r\n kernel_regularizer=self._kernel_regularizer,\r\n bias_regularizer=self._bias_regularizer,\r\n use_sync_bn=self._use_sync_bn,\r\n norm_momentum=self._norm_momentum,\r\n norm_epsilon=self._norm_epsilon,\r\n name='nl_%d' % j)\r\n else:\r\n # for the final layer, neither bias nor relu is used.\r\n layer = nn_blocks.DenseBN(\r\n output_dim=self._proj_output_dim,\r\n use_bias=False,\r\n use_normalization=True,\r\n activation=None,\r\n kernel_regularizer=self._kernel_regularizer,\r\n kernel_initializer=self._kernel_initializer,\r\n use_sync_bn=self._use_sync_bn,\r\n norm_momentum=self._norm_momentum,\r\n norm_epsilon=self._norm_epsilon,\r\n name='nl_%d' % j)\r\n self._layers.append(layer)\r\n super(ProjectionHead, self).build(input_shape)\r\n\r\n def call(self, inputs, training=None):\r\n hiddens_list = [tf.identity(inputs, 'proj_head_input')]\r\n\r\n if self._num_proj_layers == 0:\r\n proj_head_output = inputs\r\n proj_finetune_output = inputs\r\n else:\r\n for j in range(self._num_proj_layers):\r\n hiddens = self._layers[j](hiddens_list[-1], training)\r\n hiddens_list.append(hiddens)\r\n proj_head_output = tf.identity(\r\n hiddens_list[-1], 'proj_head_output')\r\n proj_finetune_output = tf.identity(\r\n hiddens_list[self._ft_proj_idx], 'proj_finetune_output')\r\n\r\n # The first element is the output of the projection head.\r\n # The second element is the input of the finetune head.\r\n return proj_head_output, proj_finetune_output\r\n\r\n\r\[email protected]_keras_serializable(package='simclr')\r\nclass ClassificationHead(tf.keras.layers.Layer):\r\n \"\"\"Classification Head.\"\"\"\r\n\r\n def __init__(\r\n self,\r\n num_classes: int,\r\n kernel_initializer: Text = 'random_uniform',\r\n kernel_regularizer: Optional[regularizers.Regularizer] = None,\r\n bias_regularizer: Optional[regularizers.Regularizer] = None,\r\n name: Text = 'head_supervised',\r\n **kwargs):\r\n \"\"\"The classification head used during pretraining or fine tuning.\r\n\r\n Args:\r\n num_classes: `int` size of the output dimension or number of classes\r\n for classification task.\r\n kernel_initializer: kernel_initializer for convolutional layers.\r\n kernel_regularizer: tf.keras.regularizers.Regularizer object for Conv2D.\r\n Default to None.\r\n bias_regularizer: tf.keras.regularizers.Regularizer object for Conv2d.\r\n Default to None.\r\n name: `str`, name of the layer.\r\n **kwargs: keyword arguments to be passed.\r\n \"\"\"\r\n super(ClassificationHead, self).__init__(name=name, **kwargs)\r\n self._num_classes = num_classes\r\n self._kernel_initializer = kernel_initializer\r\n self._kernel_regularizer = kernel_regularizer\r\n self._bias_regularizer = bias_regularizer\r\n self._name = name\r\n\r\n def get_config(self):\r\n config = {\r\n 'num_classes': self._num_classes,\r\n 'kernel_initializer': self._kernel_initializer,\r\n 'kernel_regularizer': self._kernel_regularizer,\r\n 'bias_regularizer': self._bias_regularizer,\r\n }\r\n base_config = super(ClassificationHead, self).get_config()\r\n return dict(list(base_config.items()) + list(config.items()))\r\n\r\n def build(self, input_shape):\r\n self._dense0 = layers.Dense(\r\n units=self._num_classes,\r\n kernel_initializer=self._kernel_initializer,\r\n kernel_regularizer=self._kernel_regularizer,\r\n bias_regularizer=self._bias_regularizer,\r\n activation=None)\r\n super(ClassificationHead, self).build(input_shape)\r\n\r\n def call(self, inputs, training=None):\r\n inputs = self._dense0(inputs)\r\n return inputs\r\n", "\"\"\"Tests for google3.third_party.tensorflow_models.object_detection.meta_architectures.deepmac_meta_arch.\"\"\"\r\n\r\nimport functools\r\nimport unittest\r\n\r\nfrom absl.testing import parameterized\r\nimport numpy as np\r\nimport tensorflow as tf\r\n\r\nfrom object_detection.core import losses\r\nfrom object_detection.core import preprocessor\r\nfrom object_detection.meta_architectures import center_net_meta_arch\r\nfrom object_detection.meta_architectures import deepmac_meta_arch\r\nfrom object_detection.utils import tf_version\r\n\r\n\r\nclass DummyFeatureExtractor(center_net_meta_arch.CenterNetFeatureExtractor):\r\n\r\n def __init__(self,\r\n channel_means,\r\n channel_stds,\r\n bgr_ordering,\r\n num_feature_outputs,\r\n stride):\r\n self._num_feature_outputs = num_feature_outputs\r\n self._stride = stride\r\n super(DummyFeatureExtractor, self).__init__(\r\n channel_means=channel_means, channel_stds=channel_stds,\r\n bgr_ordering=bgr_ordering)\r\n\r\n def predict(self):\r\n pass\r\n\r\n def loss(self):\r\n pass\r\n\r\n def postprocess(self):\r\n pass\r\n\r\n def call(self, inputs):\r\n batch_size, input_height, input_width, _ = inputs.shape\r\n fake_output = tf.ones([\r\n batch_size, input_height // self._stride, input_width // self._stride,\r\n 64\r\n ], dtype=tf.float32)\r\n return [fake_output] * self._num_feature_outputs\r\n\r\n @property\r\n def out_stride(self):\r\n return self._stride\r\n\r\n @property\r\n def num_feature_outputs(self):\r\n return self._num_feature_outputs\r\n\r\n\r\nclass MockMaskNet(tf.keras.layers.Layer):\r\n\r\n def __call__(self, instance_embedding, pixel_embedding, training):\r\n return tf.zeros_like(pixel_embedding[:, :, :, 0]) + 0.9\r\n\r\n\r\ndef build_meta_arch(predict_full_resolution_masks=False, use_dice_loss=False):\r\n \"\"\"Builds the DeepMAC meta architecture.\"\"\"\r\n\r\n feature_extractor = DummyFeatureExtractor(\r\n channel_means=(1.0, 2.0, 3.0),\r\n channel_stds=(10., 20., 30.),\r\n bgr_ordering=False,\r\n num_feature_outputs=2,\r\n stride=4)\r\n image_resizer_fn = functools.partial(\r\n preprocessor.resize_to_range,\r\n min_dimension=128,\r\n max_dimension=128,\r\n pad_to_max_dimesnion=True)\r\n\r\n object_center_params = center_net_meta_arch.ObjectCenterParams(\r\n classification_loss=losses.WeightedSigmoidClassificationLoss(),\r\n object_center_loss_weight=1.0,\r\n min_box_overlap_iou=1.0,\r\n max_box_predictions=5,\r\n use_labeled_classes=False)\r\n\r\n if use_dice_loss:\r\n classification_loss = losses.WeightedDiceClassificationLoss(False)\r\n else:\r\n classification_loss = losses.WeightedSigmoidClassificationLoss()\r\n\r\n deepmac_params = deepmac_meta_arch.DeepMACParams(\r\n classification_loss=classification_loss,\r\n dim=8,\r\n task_loss_weight=1.0,\r\n pixel_embedding_dim=2,\r\n allowed_masked_classes_ids=[],\r\n mask_size=16,\r\n mask_num_subsamples=-1,\r\n use_xy=True,\r\n network_type='hourglass10',\r\n use_instance_embedding=True,\r\n num_init_channels=8,\r\n predict_full_resolution_masks=predict_full_resolution_masks,\r\n postprocess_crop_size=128,\r\n max_roi_jitter_ratio=0.0,\r\n roi_jitter_mode='random'\r\n )\r\n\r\n object_detection_params = center_net_meta_arch.ObjectDetectionParams(\r\n localization_loss=losses.L1LocalizationLoss(),\r\n offset_loss_weight=1.0,\r\n scale_loss_weight=0.1\r\n )\r\n\r\n return deepmac_meta_arch.DeepMACMetaArch(\r\n is_training=True,\r\n add_summaries=False,\r\n num_classes=6,\r\n feature_extractor=feature_extractor,\r\n object_center_params=object_center_params,\r\n deepmac_params=deepmac_params,\r\n object_detection_params=object_detection_params,\r\n image_resizer_fn=image_resizer_fn)\r\n\r\n\r\[email protected](tf_version.is_tf1(), 'Skipping TF2.X only test.')\r\nclass DeepMACUtilsTest(tf.test.TestCase):\r\n\r\n def test_subsample_trivial(self):\r\n \"\"\"Test subsampling masks.\"\"\"\r\n\r\n boxes = np.arange(4).reshape(4, 1) * np.ones((4, 4))\r\n masks = np.arange(4).reshape(4, 1, 1) * np.ones((4, 32, 32))\r\n weights = np.ones(4)\r\n classes = tf.one_hot(tf.range(4), depth=4)\r\n\r\n result = deepmac_meta_arch.subsample_instances(\r\n classes, weights, boxes, masks, 4)\r\n self.assertAllClose(result[0], classes)\r\n self.assertAllClose(result[1], weights)\r\n self.assertAllClose(result[2], boxes)\r\n self.assertAllClose(result[3], masks)\r\n\r\n\r\[email protected](tf_version.is_tf1(), 'Skipping TF2.X only test.')\r\nclass DeepMACMetaArchTest(tf.test.TestCase):\r\n\r\n def setUp(self): # pylint:disable=g-missing-super-call\r\n self.model = build_meta_arch()\r\n\r\n def test_mask_network(self):\r\n net = deepmac_meta_arch.MaskHeadNetwork('hourglass10', 8)\r\n\r\n out = net(tf.zeros((2, 4)), tf.zeros((2, 32, 32, 16)), training=True)\r\n self.assertEqual(out.shape, (2, 32, 32))\r\n\r\n def test_mask_network_hourglass20(self):\r\n net = deepmac_meta_arch.MaskHeadNetwork('hourglass20', 8)\r\n\r\n out = net(tf.zeros((2, 4)), tf.zeros((2, 32, 32, 16)), training=True)\r\n self.assertEqual(out.shape, (2, 32, 32))\r\n\r\n def test_mask_network_resnet(self):\r\n\r\n net = deepmac_meta_arch.MaskHeadNetwork('resnet4')\r\n\r\n out = net(tf.zeros((2, 4)), tf.zeros((2, 32, 32, 16)), training=True)\r\n self.assertEqual(out.shape, (2, 32, 32))\r\n\r\n def test_mask_network_resnet_tf_function(self):\r\n\r\n net = deepmac_meta_arch.MaskHeadNetwork('resnet8')\r\n call_func = tf.function(net.__call__)\r\n\r\n out = call_func(tf.zeros((2, 4)), tf.zeros((2, 32, 32, 16)), training=True)\r\n self.assertEqual(out.shape, (2, 32, 32))\r\n\r\n def test_get_mask_head_input(self):\r\n\r\n boxes = tf.constant([[0., 0., 0.25, 0.25], [0.75, 0.75, 1.0, 1.0]],\r\n dtype=tf.float32)\r\n\r\n pixel_embedding = np.zeros((32, 32, 4), dtype=np.float32)\r\n pixel_embedding[:16, :16] = 1.0\r\n pixel_embedding[16:, 16:] = 2.0\r\n pixel_embedding = tf.constant(pixel_embedding)\r\n\r\n mask_inputs = self.model._get_mask_head_input(boxes, pixel_embedding)\r\n self.assertEqual(mask_inputs.shape, (2, 16, 16, 6))\r\n\r\n y_grid, x_grid = tf.meshgrid(np.linspace(-1.0, 1.0, 16),\r\n np.linspace(-1.0, 1.0, 16), indexing='ij')\r\n for i in range(2):\r\n mask_input = mask_inputs[i]\r\n self.assertAllClose(y_grid, mask_input[:, :, 0])\r\n self.assertAllClose(x_grid, mask_input[:, :, 1])\r\n pixel_embedding = mask_input[:, :, 2:]\r\n self.assertAllClose(np.zeros((16, 16, 4)) + i + 1, pixel_embedding)\r\n\r\n def test_get_mask_head_input_no_crop_resize(self):\r\n\r\n model = build_meta_arch(predict_full_resolution_masks=True)\r\n boxes = tf.constant([[0., 0., 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]],\r\n dtype=tf.float32)\r\n\r\n pixel_embedding_np = np.random.randn(32, 32, 4).astype(np.float32)\r\n pixel_embedding = tf.constant(pixel_embedding_np)\r\n\r\n mask_inputs = model._get_mask_head_input(boxes, pixel_embedding)\r\n self.assertEqual(mask_inputs.shape, (2, 32, 32, 6))\r\n\r\n y_grid, x_grid = tf.meshgrid(np.linspace(-1.0, 1.0, 32),\r\n np.linspace(-1.0, 1.0, 32), indexing='ij')\r\n for i in range(2):\r\n mask_input = mask_inputs[i]\r\n self.assertAllClose(y_grid, mask_input[:, :, 0])\r\n self.assertAllClose(x_grid, mask_input[:, :, 1])\r\n pixel_embedding = mask_input[:, :, 2:]\r\n self.assertAllClose(pixel_embedding_np, pixel_embedding)\r\n\r\n def test_get_instance_embeddings(self):\r\n\r\n embeddings = np.zeros((32, 32, 2))\r\n embeddings[8, 8] = 1.0\r\n embeddings[24, 16] = 2.0\r\n embeddings = tf.constant(embeddings)\r\n\r\n boxes = tf.constant([[0., 0., 0.5, 0.5], [0.5, 0.0, 1.0, 1.0]])\r\n\r\n center_embeddings = self.model._get_instance_embeddings(boxes, embeddings)\r\n\r\n self.assertAllClose(center_embeddings, [[1.0, 1.0], [2.0, 2.0]])\r\n\r\n def test_get_groundtruth_mask_output(self):\r\n\r\n boxes = tf.constant([[0., 0., 0.25, 0.25], [0.75, 0.75, 1.0, 1.0]],\r\n dtype=tf.float32)\r\n masks = np.zeros((2, 32, 32), dtype=np.float32)\r\n masks[0, :16, :16] = 0.5\r\n masks[1, 16:, 16:] = 0.1\r\n masks = self.model._get_groundtruth_mask_output(boxes, masks)\r\n self.assertEqual(masks.shape, (2, 16, 16))\r\n\r\n self.assertAllClose(masks[0], np.zeros((16, 16)) + 0.5)\r\n self.assertAllClose(masks[1], np.zeros((16, 16)) + 0.1)\r\n\r\n def test_get_groundtruth_mask_output_crop_resize(self):\r\n\r\n model = build_meta_arch(predict_full_resolution_masks=True)\r\n boxes = tf.constant([[0., 0., 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]],\r\n dtype=tf.float32)\r\n masks = tf.ones((2, 32, 32))\r\n masks = model._get_groundtruth_mask_output(boxes, masks)\r\n self.assertAllClose(masks, np.ones((2, 32, 32)))\r\n\r\n def test_per_instance_loss(self):\r\n\r\n model = build_meta_arch()\r\n model._mask_net = MockMaskNet()\r\n boxes = tf.constant([[0.0, 0.0, 0.25, 0.25], [0.75, 0.75, 1.0, 1.0]])\r\n masks = np.zeros((2, 32, 32), dtype=np.float32)\r\n masks[0, :16, :16] = 1.0\r\n masks[1, 16:, 16:] = 1.0\r\n masks = tf.constant(masks)\r\n\r\n loss = model._compute_per_instance_mask_loss(\r\n boxes, masks, tf.zeros((32, 32, 2)), tf.zeros((32, 32, 2)))\r\n self.assertAllClose(\r\n loss, np.zeros(2) - tf.math.log(tf.nn.sigmoid(0.9)))\r\n\r\n def test_per_instance_loss_no_crop_resize(self):\r\n\r\n model = build_meta_arch(predict_full_resolution_masks=True)\r\n model._mask_net = MockMaskNet()\r\n boxes = tf.constant([[0.0, 0.0, 1.0, 1.0], [0.0, 0.0, 1.0, 1.0]])\r\n masks = np.ones((2, 128, 128), dtype=np.float32)\r\n masks = tf.constant(masks)\r\n\r\n loss = model._compute_per_instance_mask_loss(\r\n boxes, masks, tf.zeros((32, 32, 2)), tf.zeros((32, 32, 2)))\r\n self.assertAllClose(\r\n loss, np.zeros(2) - tf.math.log(tf.nn.sigmoid(0.9)))\r\n\r\n def test_per_instance_loss_no_crop_resize_dice(self):\r\n\r\n model = build_meta_arch(predict_full_resolution_masks=True,\r\n use_dice_loss=True)\r\n model._mask_net = MockMaskNet()\r\n boxes = tf.constant([[0.0, 0.0, 1.0, 1.0], [0.0, 0.0, 1.0, 1.0]])\r\n masks = np.ones((2, 128, 128), dtype=np.float32)\r\n masks = tf.constant(masks)\r\n\r\n loss = model._compute_per_instance_mask_loss(\r\n boxes, masks, tf.zeros((32, 32, 2)), tf.zeros((32, 32, 2)))\r\n pred = tf.nn.sigmoid(0.9)\r\n expected = (1.0 - ((2.0 * pred) / (1.0 + pred)))\r\n self.assertAllClose(loss, [expected, expected], rtol=1e-3)\r\n\r\n def test_empty_masks(self):\r\n boxes = tf.zeros([0, 4])\r\n masks = tf.zeros([0, 128, 128])\r\n\r\n loss = self.model._compute_per_instance_mask_loss(\r\n boxes, masks, tf.zeros((32, 32, 2)), tf.zeros((32, 32, 2)))\r\n self.assertEqual(loss.shape, (0,))\r\n\r\n def test_postprocess(self):\r\n\r\n model = build_meta_arch()\r\n model._mask_net = MockMaskNet()\r\n boxes = np.zeros((2, 3, 4), dtype=np.float32)\r\n boxes[:, :, [0, 2]] = 0.0\r\n boxes[:, :, [1, 3]] = 8.0\r\n boxes = tf.constant(boxes)\r\n\r\n masks = model._postprocess_masks(\r\n boxes, tf.zeros((2, 32, 32, 2)), tf.zeros((2, 32, 32, 2)))\r\n prob = tf.nn.sigmoid(0.9).numpy()\r\n self.assertAllClose(masks, prob * np.ones((2, 3, 16, 16)))\r\n\r\n def test_postprocess_no_crop_resize_shape(self):\r\n\r\n model = build_meta_arch(predict_full_resolution_masks=True)\r\n model._mask_net = MockMaskNet()\r\n boxes = np.zeros((2, 3, 4), dtype=np.float32)\r\n boxes[:, :, [0, 2]] = 0.0\r\n boxes[:, :, [1, 3]] = 8.0\r\n boxes = tf.constant(boxes)\r\n\r\n masks = model._postprocess_masks(\r\n boxes, tf.zeros((2, 32, 32, 2)), tf.zeros((2, 32, 32, 2)))\r\n prob = tf.nn.sigmoid(0.9).numpy()\r\n self.assertAllClose(masks, prob * np.ones((2, 3, 128, 128)))\r\n\r\n def test_crop_masks_within_boxes(self):\r\n masks = np.zeros((2, 32, 32))\r\n masks[0, :16, :16] = 1.0\r\n masks[1, 16:, 16:] = 1.0\r\n boxes = tf.constant([[0.0, 0.0, 15.0 / 32, 15.0 / 32],\r\n [0.5, 0.5, 1.0, 1]])\r\n masks = deepmac_meta_arch.crop_masks_within_boxes(\r\n masks, boxes, 128)\r\n masks = (masks.numpy() > 0.0).astype(np.float32)\r\n self.assertAlmostEqual(masks.sum(), 2 * 128 * 128)\r\n\r\n def test_transform_boxes_to_feature_coordinates(self):\r\n batch_size = 2\r\n model = build_meta_arch()\r\n model._mask_net = MockMaskNet()\r\n boxes = np.zeros((batch_size, 3, 4), dtype=np.float32)\r\n boxes[:, :, [0, 2]] = 0.1\r\n boxes[:, :, [1, 3]] = 0.5\r\n boxes = tf.constant(boxes)\r\n true_image_shapes = tf.constant([\r\n [64, 32, 3], # Image 1 is padded during resizing.\r\n [64, 64, 3], # Image 2 is not padded.\r\n ])\r\n resized_image_height = 64\r\n resized_image_width = 64\r\n resized_image_shape = [\r\n batch_size, resized_image_height, resized_image_width, 3\r\n ]\r\n\r\n feature_map_height = 32\r\n feature_map_width = 32\r\n instance_embedding = tf.zeros(\r\n (batch_size, feature_map_height, feature_map_width, 2))\r\n\r\n expected_boxes = np.array([\r\n [ # Image 1\r\n # 0.1 * (64 / resized_image_height) * feature_map_height -> 3.2\r\n # 0.5 * (32 / resized_image_width) * feature_map_width -> 8.0\r\n [3.2, 8., 3.2, 8.],\r\n [3.2, 8., 3.2, 8.],\r\n [3.2, 8., 3.2, 8.],\r\n ],\r\n [ # Image 2\r\n # 0.1 * (64 / resized_image_height) * feature_map_height -> 3.2\r\n # 0.5 * (64 / resized_image_width) * feature_map_width -> 16\r\n [3.2, 16., 3.2, 16.],\r\n [3.2, 16., 3.2, 16.],\r\n [3.2, 16., 3.2, 16.],\r\n ],\r\n ])\r\n\r\n box_strided = model._transform_boxes_to_feature_coordinates(\r\n boxes, true_image_shapes, resized_image_shape, instance_embedding)\r\n self.assertAllClose(box_strided, expected_boxes)\r\n\r\n def test_fc_tf_function(self):\r\n\r\n net = deepmac_meta_arch.MaskHeadNetwork('fully_connected', 8, mask_size=32)\r\n call_func = tf.function(net.__call__)\r\n\r\n out = call_func(tf.zeros((2, 4)), tf.zeros((2, 32, 32, 8)), training=True)\r\n self.assertEqual(out.shape, (2, 32, 32))\r\n\r\n\r\[email protected](tf_version.is_tf1(), 'Skipping TF2.X only test.')\r\nclass FullyConnectedMaskHeadTest(tf.test.TestCase):\r\n\r\n def test_fc_mask_head(self):\r\n head = deepmac_meta_arch.FullyConnectedMaskHead(512, 16)\r\n inputs = tf.random.uniform([100, 16, 16, 512])\r\n output = head(inputs)\r\n self.assertAllEqual([100, 16, 16, 1], output.numpy().shape)\r\n\r\n\r\[email protected](tf_version.is_tf1(), 'Skipping TF2.X only test.')\r\nclass ResNetMaskHeadTest(tf.test.TestCase, parameterized.TestCase):\r\n\r\n @parameterized.parameters(['resnet4', 'resnet8', 'resnet20'])\r\n def test_pass(self, name):\r\n net = deepmac_meta_arch.ResNetMaskNetwork(name, 8)\r\n out = net(tf.zeros((3, 32, 32, 16)))\r\n self.assertEqual(out.shape[:3], (3, 32, 32))\r\n\r\n\r\nif __name__ == '__main__':\r\n tf.test.main()\r\n" ]
[ [ "tensorflow.TensorShape", "tensorflow.keras.constraints.get", "tensorflow.keras.activations.serialize", "tensorflow.keras.constraints.serialize", "tensorflow.keras.regularizers.get", "tensorflow.keras.initializers.serialize", "tensorflow.keras.utils.register_keras_serializable", "tensorflow.keras.regularizers.serialize", "tensorflow.einsum", "tensorflow.keras.activations.get", "tensorflow.keras.initializers.get" ], [ "tensorflow.identity", "tensorflow.keras.utils.register_keras_serializable" ], [ "tensorflow.constant", "tensorflow.nn.sigmoid", "tensorflow.range", "tensorflow.zeros", "numpy.linspace", "numpy.arange", "tensorflow.random.uniform", "tensorflow.ones", "tensorflow.test.main", "numpy.ones", "tensorflow.zeros_like", "tensorflow.function", "numpy.random.randn", "numpy.array", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.7", "2.6", "2.4", "2.3", "2.5", "2.2" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.7", "2.6", "2.4", "2.3", "2.5", "2.2" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
DIAGNijmegen/nnUNet
[ "40093c30e0115a1d77736a6dfd302ffa2ff08a68" ]
[ "nnunet/evaluation/model_selection/ensemble.py" ]
[ "# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport shutil\nimport nnunet.utilities.shutil_sol as shutil_sol\nfrom multiprocessing.pool import Pool\n\nimport numpy as np\nfrom batchgenerators.utilities.file_and_folder_operations import *\nfrom nnunet.configuration import default_num_threads\nfrom nnunet.evaluation.evaluator import aggregate_scores\nfrom nnunet.inference.segmentation_export import save_segmentation_nifti_from_softmax\nfrom nnunet.paths import network_training_output_dir, preprocessing_output_dir\nfrom nnunet.postprocessing.connected_components import determine_postprocessing\n\n\ndef merge(args):\n file1, file2, properties_file, out_file = args\n if not isfile(out_file):\n res1 = np.load(file1)['softmax']\n res2 = np.load(file2)['softmax']\n props = load_pickle(properties_file)\n mn = np.mean((res1, res2), 0)\n # Softmax probabilities are already at target spacing so this will not do any resampling (resampling parameters\n # don't matter here)\n save_segmentation_nifti_from_softmax(mn, out_file, props, 3, None, None, None, force_separate_z=None,\n interpolation_order_z=0)\n\n\ndef ensemble(training_output_folder1, training_output_folder2, output_folder, task, validation_folder, folds, allow_ensembling: bool = True):\n print(\"\\nEnsembling folders\\n\", training_output_folder1, \"\\n\", training_output_folder2)\n\n output_folder_base = output_folder\n output_folder = join(output_folder_base, \"ensembled_raw\")\n\n # only_keep_largest_connected_component is the same for all stages\n dataset_directory = join(preprocessing_output_dir, task)\n plans = load_pickle(join(training_output_folder1, \"plans.pkl\")) # we need this only for the labels\n\n files1 = []\n files2 = []\n property_files = []\n out_files = []\n gt_segmentations = []\n\n folder_with_gt_segs = join(dataset_directory, \"gt_segmentations\")\n # in the correct shape and we need the original geometry to restore the niftis\n\n for f in folds:\n validation_folder_net1 = join(training_output_folder1, \"fold_%d\" % f, validation_folder)\n validation_folder_net2 = join(training_output_folder2, \"fold_%d\" % f, validation_folder)\n\n if not isdir(validation_folder_net1):\n raise AssertionError(\"Validation directory missing: %s. Please rerun validation with `nnUNet_train CONFIG TRAINER TASK FOLD -val --npz`\" % validation_folder_net1)\n if not isdir(validation_folder_net2):\n raise AssertionError(\"Validation directory missing: %s. Please rerun validation with `nnUNet_train CONFIG TRAINER TASK FOLD -val --npz`\" % validation_folder_net2)\n\n # we need to ensure the validation was successful. We can verify this via the presence of the summary.json file\n if not isfile(join(validation_folder_net1, 'summary.json')):\n raise AssertionError(\"Validation directory incomplete: %s. Please rerun validation with `nnUNet_train CONFIG TRAINER TASK FOLD -val --npz`\" % validation_folder_net1)\n if not isfile(join(validation_folder_net2, 'summary.json')):\n raise AssertionError(\"Validation directory missing: %s. Please rerun validation with `nnUNet_train CONFIG TRAINER TASK FOLD -val --npz`\" % validation_folder_net2)\n\n patient_identifiers1_npz = [i[:-4] for i in subfiles(validation_folder_net1, False, None, 'npz', True)]\n patient_identifiers2_npz = [i[:-4] for i in subfiles(validation_folder_net2, False, None, 'npz', True)]\n\n # we don't do postprocessing anymore so there should not be any of that noPostProcess\n patient_identifiers1_nii = [i[:-7] for i in subfiles(validation_folder_net1, False, None, suffix='nii.gz', sort=True) if not i.endswith(\"noPostProcess.nii.gz\") and not i.endswith('_postprocessed.nii.gz')]\n patient_identifiers2_nii = [i[:-7] for i in subfiles(validation_folder_net2, False, None, suffix='nii.gz', sort=True) if not i.endswith(\"noPostProcess.nii.gz\") and not i.endswith('_postprocessed.nii.gz')]\n\n if not all([i in patient_identifiers1_npz for i in patient_identifiers1_nii]):\n raise AssertionError(\"Missing npz files in folder %s. Please run the validation for all models and folds with the '--npz' flag.\" % (validation_folder_net1))\n if not all([i in patient_identifiers2_npz for i in patient_identifiers2_nii]):\n raise AssertionError(\"Missing npz files in folder %s. Please run the validation for all models and folds with the '--npz' flag.\" % (validation_folder_net2))\n\n patient_identifiers1_npz.sort()\n patient_identifiers2_npz.sort()\n\n assert all([i == j for i, j in zip(patient_identifiers1_npz, patient_identifiers2_npz)]), \"npz filenames do not match. This should not happen.\"\n\n maybe_mkdir_p(output_folder)\n\n for p in patient_identifiers1_npz:\n files1.append(join(validation_folder_net1, p + '.npz'))\n files2.append(join(validation_folder_net2, p + '.npz'))\n property_files.append(join(validation_folder_net1, p) + \".pkl\")\n out_files.append(join(output_folder, p + \".nii.gz\"))\n gt_segmentations.append(join(folder_with_gt_segs, p + \".nii.gz\"))\n\n p = Pool(default_num_threads)\n p.map(merge, zip(files1, files2, property_files, out_files))\n p.close()\n p.join()\n\n if not isfile(join(output_folder, \"summary.json\")) and len(out_files) > 0:\n aggregate_scores(tuple(zip(out_files, gt_segmentations)), labels=plans['all_classes'],\n json_output_file=join(output_folder, \"summary.json\"), json_task=task,\n json_name=task + \"__\" + output_folder_base.split(\"/\")[-1], num_threads=default_num_threads)\n\n if allow_ensembling and not isfile(join(output_folder_base, \"postprocessing.json\")):\n # now lets also look at postprocessing. We cannot just take what we determined in cross-validation and apply it\n # here because things may have changed and may also be too inconsistent between the two networks\n determine_postprocessing(output_folder_base, folder_with_gt_segs, \"ensembled_raw\", \"temp\",\n \"ensembled_postprocessed\", default_num_threads, dice_threshold=0)\n\n out_dir_all_json = join(network_training_output_dir, \"summary_jsons\")\n json_out = load_json(join(output_folder_base, \"ensembled_postprocessed\", \"summary.json\"))\n\n json_out[\"experiment_name\"] = output_folder_base.split(\"/\")[-1]\n save_json(json_out, join(output_folder_base, \"ensembled_postprocessed\", \"summary.json\"))\n\n maybe_mkdir_p(out_dir_all_json)\n shutil_sol.copyfile(join(output_folder_base, \"ensembled_postprocessed\", \"summary.json\"),\n join(out_dir_all_json, \"%s__%s.json\" % (task, output_folder_base.split(\"/\")[-1])))\n" ]
[ [ "numpy.load", "numpy.mean" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
TuKJet/EAST
[ "82979147ae44bd8a97a49357c94674590b13648b" ]
[ "pyicdartools/TL_iou.py" ]
[ "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport sys\nsys.path.append('.')\nfrom collections import namedtuple\nfrom pyicdartools import rrc_evaluation_funcs\nimport importlib\nfrom shapely.geometry import Polygon as plg\nimport numpy as np\n\ndef evaluation_imports():\n \"\"\"\n evaluation_imports: Dictionary ( key = module name , value = alias ) with python modules used in the evaluation.\n \"\"\"\n return {\n 'Polygon':'plg',\n 'numpy':'np'\n }\n\ndef default_evaluation_params():\n \"\"\"\n default_evaluation_params: Default parameters to use for the validation and evaluation.\n \"\"\"\n return {\n 'IOU_CONSTRAINT' : 0.5,\n 'AREA_PRECISION_CONSTRAINT' : 0.5,\n 'GT_SAMPLE_NAME_2_ID':'X([0-9]+).txt',#'gt_img_([0-9]+).txt',\n 'DET_SAMPLE_NAME_2_ID':'X([0-9]+).txt',#'res_img_([0-9]+).txt',\n 'LTRB':False, #LTRB:2points(left,top,right,bottom) or 4 points(x1,y1,x2,y2,x3,y3,x4,y4)\n 'CRLF':False, # Lines are delimited by Windows CRLF format\n 'CONFIDENCES':False, #Detections must include confidence value. AP will be calculated\n 'PER_SAMPLE_RESULTS':True #Generate per sample results and produce data for visualization\n }\n\ndef validate_data(gtFilePath, submFilePath,evaluationParams):\n \"\"\"\n Method validate_data: validates that all files in the results folder are correct (have the correct name contents).\n Validates also that there are no missing files in the folder.\n If some error detected, the method raises the error\n \"\"\"\n gt = rrc_evaluation_funcs.load_zip_file(gtFilePath,evaluationParams['GT_SAMPLE_NAME_2_ID'])\n\n subm = rrc_evaluation_funcs.load_zip_file(submFilePath,evaluationParams['DET_SAMPLE_NAME_2_ID'],True)\n\n #Validate format of GroundTruth\n for k in gt:\n rrc_evaluation_funcs.validate_lines_in_file(k,gt[k],evaluationParams['CRLF'],evaluationParams['LTRB'],True)\n\n #Validate format of results\n for k in subm:\n if (k in gt) == False :\n raise Exception(\"The sample %s not present in GT\" %k)\n\n rrc_evaluation_funcs.validate_lines_in_file(k,subm[k],evaluationParams['CRLF'],evaluationParams['LTRB'],False,evaluationParams['CONFIDENCES'])\n\n\ndef evaluate_method(gtFilePath, submFilePath, evaluationParams):\n \"\"\"\n Method evaluate_method: evaluate method and returns the results\n Results. Dictionary with the following values:\n - method (required) Global method metrics. Ex: { 'Precision':0.8,'Recall':0.9 }\n - samples (optional) Per sample metrics. Ex: {'sample1' : { 'Precision':0.8,'Recall':0.9 } , 'sample2' : { 'Precision':0.8,'Recall':0.9 }\n \"\"\"\n \"\"\"\n for module,alias in evaluation_imports().items():\n globals()[alias] = importlib.import_module(module)\n \"\"\"\n def polygon_from_points(points):\n \"\"\"\n Returns a Polygon object to use with the Polygon2 class from a list of 8 points: x1,y1,x2,y2,x3,y3,x4,y4\n \"\"\"\n resBoxes=np.empty([1,8],dtype='int32')\n resBoxes[0,0]=int(points[0])\n resBoxes[0,4]=int(points[1])\n resBoxes[0,1]=int(points[2])\n resBoxes[0,5]=int(points[3])\n resBoxes[0,2]=int(points[4])\n resBoxes[0,6]=int(points[5])\n resBoxes[0,3]=int(points[6])\n resBoxes[0,7]=int(points[7])\n pointMat = resBoxes[0].reshape([2,4]).T\n return plg( pointMat)\n\n def rectangle_to_polygon(rect):\n resBoxes=np.empty([1,8],dtype='int32')\n resBoxes[0,0]=int(rect.xmin)\n resBoxes[0,4]=int(rect.ymax)\n resBoxes[0,1]=int(rect.xmin)\n resBoxes[0,5]=int(rect.ymin)\n resBoxes[0,2]=int(rect.xmax)\n resBoxes[0,6]=int(rect.ymin)\n resBoxes[0,3]=int(rect.xmax)\n resBoxes[0,7]=int(rect.ymax)\n\n pointMat = resBoxes[0].reshape([2,4]).T\n\n return plg( pointMat)\n\n def rectangle_to_points(rect):\n points = [int(rect.xmin), int(rect.ymax), int(rect.xmax), int(rect.ymax), int(rect.xmax), int(rect.ymin), int(rect.xmin), int(rect.ymin)]\n return points\n\n def get_union(pD,pG):\n areaA = pD.area;\n areaB = pG.area;\n return areaA + areaB - get_intersection(pD, pG);\n\n def get_intersection_over_union(pD,pG):\n try:\n return get_intersection(pD, pG) / get_union(pD, pG);\n except:\n return 0\n\n def get_intersection(pD,pG):\n pInt = pD & pG\n try:\n if len(pInt) == 0:\n return 0\n except:\n return pInt.area\n\n def compute_ap(confList, matchList,numGtCare):\n correct = 0\n AP = 0\n if len(confList)>0:\n confList = np.array(confList)\n matchList = np.array(matchList)\n sorted_ind = np.argsort(-confList)\n confList = confList[sorted_ind]\n matchList = matchList[sorted_ind]\n for n in range(len(confList)):\n match = matchList[n]\n if match:\n correct += 1\n AP += float(correct)/(n + 1)\n\n if numGtCare>0:\n AP /= numGtCare\n\n return AP\n\n perSampleMetrics = {}\n\n matchedSum = 0\n\n Rectangle = namedtuple('Rectangle', 'xmin ymin xmax ymax')\n\n gt = rrc_evaluation_funcs.load_zip_file(gtFilePath,evaluationParams['GT_SAMPLE_NAME_2_ID'])\n subm = rrc_evaluation_funcs.load_zip_file(submFilePath,evaluationParams['DET_SAMPLE_NAME_2_ID'],True)\n\n numGlobalCareGt = 0;\n numGlobalCareDet = 0;\n\n arrGlobalConfidences = [];\n arrGlobalMatches = [];\n\n for ids, resFile in enumerate(gt):\n\n gtFile = rrc_evaluation_funcs.decode_utf8(gt[resFile])\n recall = 0\n precision = 0\n hmean = 0\n\n detMatched = 0\n\n iouMat = np.empty([1,1])\n\n gtPols = []\n detPols = []\n\n gtPolPoints = []\n detPolPoints = []\n\n #Array of Ground Truth Polygons' keys marked as don't Care\n gtDontCarePolsNum = []\n #Array of Detected Polygons' matched with a don't Care GT\n detDontCarePolsNum = []\n\n pairs = []\n detMatchedNums = []\n\n arrSampleConfidences = [];\n arrSampleMatch = [];\n sampleAP = 0;\n\n evaluationLog = \"\"\n\n pointsList,_,transcriptionsList = rrc_evaluation_funcs.get_tl_line_values_from_file_contents(gtFile,evaluationParams['CRLF'],evaluationParams['LTRB'],True,False)\n for n in range(len(pointsList)):\n points = pointsList[n]\n transcription = transcriptionsList[n]\n dontCare = transcription == \"###\"\n if evaluationParams['LTRB']:\n gtRect = Rectangle(*points)\n gtPol = rectangle_to_polygon(gtRect)\n else:\n gtPol = polygon_from_points(points)\n gtPols.append(gtPol)\n gtPolPoints.append(points)\n if dontCare:\n gtDontCarePolsNum.append( len(gtPols)-1 )\n\n evaluationLog += \"GT polygons: \" + str(len(gtPols)) + (\" (\" + str(len(gtDontCarePolsNum)) + \" don't care)\\n\" if len(gtDontCarePolsNum)>0 else \"\\n\")\n\n if resFile in subm:\n\n detFile = rrc_evaluation_funcs.decode_utf8(subm[resFile])\n\n pointsList,confidencesList,_ = rrc_evaluation_funcs.get_tl_line_values_from_file_contents(detFile,evaluationParams['CRLF'],evaluationParams['LTRB'],False,evaluationParams['CONFIDENCES'])\n for n in range(len(pointsList)):\n points = pointsList[n]\n\n if evaluationParams['LTRB']:\n detRect = Rectangle(*points)\n detPol = rectangle_to_polygon(detRect)\n else:\n detPol = polygon_from_points(points)\n detPols.append(detPol)\n detPolPoints.append(points)\n if len(gtDontCarePolsNum)>0 :\n for dontCarePol in gtDontCarePolsNum:\n dontCarePol = gtPols[dontCarePol]\n intersected_area = get_intersection(dontCarePol,detPol)\n pdDimensions = detPol.area\n precision = 0 if pdDimensions == 0 else intersected_area / pdDimensions\n if (precision > evaluationParams['AREA_PRECISION_CONSTRAINT'] ):\n detDontCarePolsNum.append( len(detPols)-1 )\n break\n\n evaluationLog += \"DET polygons: \" + str(len(detPols)) + (\" (\" + str(len(detDontCarePolsNum)) + \" don't care)\\n\" if len(detDontCarePolsNum)>0 else \"\\n\")\n\n if len(gtPols)>0 and len(detPols)>0:\n #Calculate IoU and precision matrixs\n outputShape=[len(gtPols),len(detPols)]\n iouMat = np.empty(outputShape)\n gtRectMat = np.zeros(len(gtPols),np.int8)\n detRectMat = np.zeros(len(detPols),np.int8)\n for gtNum in range(len(gtPols)):\n for detNum in range(len(detPols)):\n pG = gtPols[gtNum]\n pD = detPols[detNum]\n iouMat[gtNum,detNum] = get_intersection_over_union(pD,pG)\n\n for gtNum in range(len(gtPols)):\n match = False;\n for detNum in range(len(detPols)):\n if gtRectMat[gtNum] == 0 and detRectMat[detNum] == 0 and gtNum not in gtDontCarePolsNum and detNum not in detDontCarePolsNum :\n if iouMat[gtNum,detNum]>evaluationParams['IOU_CONSTRAINT']:\n gtRectMat[gtNum] = 1\n detRectMat[detNum] = 1\n detMatched += 1\n pairs.append({'gt':gtNum,'det':detNum})\n detMatchedNums.append(detNum)\n evaluationLog += \"Match GT #\" + str(gtNum) + \" with Det #\" + str(detNum) + \"\\n\"\n match = True\n\n if evaluationParams['CONFIDENCES']:\n for detNum in range(len(detPols)):\n if detNum not in detDontCarePolsNum :\n #we exclude the don't care detections\n match = detNum in detMatchedNums\n\n arrSampleConfidences.append(confidencesList[detNum])\n arrSampleMatch.append(match)\n\n arrGlobalConfidences.append(confidencesList[detNum]);\n arrGlobalMatches.append(match);\n\n numGtCare = (len(gtPols) - len(gtDontCarePolsNum))\n numDetCare = (len(detPols) - len(detDontCarePolsNum))\n if numGtCare == 0:\n recall = float(1)\n precision = float(0) if numDetCare >0 else float(1)\n sampleAP = precision\n else:\n recall = float(detMatched) / numGtCare\n precision = 0 if numDetCare==0 else float(detMatched) / numDetCare\n if evaluationParams['CONFIDENCES'] and evaluationParams['PER_SAMPLE_RESULTS']:\n sampleAP = compute_ap(arrSampleConfidences, arrSampleMatch, numGtCare )\n\n hmean = 0 if (precision + recall)==0 else 2.0 * precision * recall / (precision + recall)\n print('=='*28)\n print('ID:{:3d} P {:3d}% R {:3d}% Hmean {:3d}% Matched:{:2d} GT:{:2d} Det:{:2d}'.format(ids+1, int(precision*100), int(recall*100), int(hmean*100),detMatched, numGtCare, numDetCare))\n matchedSum += detMatched\n numGlobalCareGt += numGtCare\n numGlobalCareDet += numDetCare\n\n if evaluationParams['PER_SAMPLE_RESULTS']:\n perSampleMetrics[resFile] = {\n 'precision':precision,\n 'recall':recall,\n 'hmean':hmean,\n 'pairs':pairs,\n 'AP':sampleAP,\n 'iouMat':[] if len(detPols)>100 else iouMat.tolist(),\n 'gtPolPoints':gtPolPoints,\n 'detPolPoints':detPolPoints,\n 'gtDontCare':gtDontCarePolsNum,\n 'detDontCare':detDontCarePolsNum,\n 'evaluationParams': evaluationParams,\n 'evaluationLog': evaluationLog\n }\n\n # Compute MAP and MAR\n AP = 0\n if evaluationParams['CONFIDENCES']:\n AP = compute_ap(arrGlobalConfidences, arrGlobalMatches, numGlobalCareGt)\n\n methodRecall = 0 if numGlobalCareGt == 0 else float(matchedSum)/numGlobalCareGt\n methodPrecision = 0 if numGlobalCareDet == 0 else float(matchedSum)/numGlobalCareDet\n methodHmean = 0 if methodRecall + methodPrecision==0 else 2* methodRecall * methodPrecision / (methodRecall + methodPrecision)\n\n methodMetrics = {'precision':methodPrecision, 'recall':methodRecall,'hmean': methodHmean, 'AP': AP }\n\n resDict = {'calculated':True,'Message':'','method': methodMetrics,'per_sample': perSampleMetrics}\n\n\n return resDict;\n\n\n\nif __name__=='__main__':\n rrc_evaluation_funcs.main_evaluation(None,default_evaluation_params,validate_data,evaluate_method)\n" ]
[ [ "numpy.argsort", "numpy.array", "numpy.empty" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
morgannewellsun/Reverse-Conway
[ "1750bf6ab96b8d6ccc1d8905264097e091adb82c" ]
[ "src/data/tests/verify_kaggle_training.py" ]
[ "# Run this script from the dir Reverse-Conway/src.\nimport sys\nsys.path.append('.')\n\nimport pandas as pd\nimport numpy as np\nimport time\nfrom components.binary_conway_forward_prop_fn import BinaryConwayForwardPropFn\n\ndef timing(msg):\n global prev_t\n t = time.time()\n t_sec = round(t - prev_t)\n (t_min, t_sec) = divmod(t_sec, 60)\n (t_hour, t_min) = divmod(t_min, 60)\n prev_t = t\n print('{} - {}:{}:{}'.format(msg, t_hour, t_min, t_sec))\n\n\n\ndef verify_by_arr_rep():\n binary_prop = BinaryConwayForwardPropFn(numpy_mode=True)\n board_size = 25 * 25\n for idx, row in data.iterrows():\n delta = row[0]\n s_arr = row[1:(board_size+1)]\n k_arr = row[(board_size+1):]\n s_ten = np.array(s_arr).reshape((1, 25, 25, 1))\n k_ten = np.array(k_arr)\n m_ten = binary_prop(s_ten, delta)\n model_res = m_ten.flatten().astype(int)\n if sum(abs(k_ten - model_res)) > 0:\n raise Exception('Failed to match game {}:'.format(idx))\n timing('All loaded training data are verified.')\n\n\n\nmax_csv_rows = 100000\nkaggle_root = '../../gamelife_data/kaggle/'\ndata = pd.read_csv(kaggle_root + 'train.csv', index_col=0, dtype='int', nrows=max_csv_rows)\n\nprev_t = time.time()\nverify_by_arr_rep()\n" ]
[ [ "numpy.array", "pandas.read_csv" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0", "1.4", "1.1", "1.5", "1.2", "1.3" ], "scipy": [], "tensorflow": [] } ]
qzhu2017/PyXtal
[ "c9c9b109f0a258ca992f59455eb57c9577ef80bc" ]
[ "pyxtal/representation.py" ]
[ "import os\nimport numpy as np\nfrom pyxtal.symmetry import Group\nfrom pyxtal.lattice import Lattice\nfrom pyxtal.wyckoff_site import mol_site\nfrom pyxtal.molecule import find_id_from_smile\n \nclass representation():\n \"\"\"\n A class to handle the 1D representation of molecular crystal \n Works for Zprime > 1\n\n Args:\n x: a list of [cell, site_1, site_2, ...]\n smiles: a list of [smile_1, smile_2, ...]\n \"\"\"\n\n def __init__(self, x, smiles=None):\n if smiles is not None:\n self.smiles = []\n for i, smile in enumerate(smiles):\n if smile.endswith('.smi'): \n smile = smile[:-4]\n self.smiles.append(smile)\n else:\n self.smiles = None\n self.x = x\n\n def __str__(self):\n return self.to_string()\n\n @classmethod\n def from_pyxtal(cls, struc):\n \"\"\"\n Initialize 1D rep. from the pyxtal object\n\n Args:\n struc: pyxtal object\n \"\"\"\n symmetry = [struc.group.number, struc.diag]\n lat = struc.lattice.encode()\n vector = [symmetry + lat]\n smiles = []\n for site in struc.mol_sites:\n vector.append(site.encode())\n smiles.append(site.molecule.smile)\n x = vector\n return cls(x, smiles)\n \n @classmethod\n def from_string(cls, inputs, smiles, composition=None):\n \"\"\"\n Initialize 1D rep. from the string\n\n Args:\n inputs: input string \n smiles: list of smiles\n \"\"\"\n #parse the cell\n if composition is None:\n composition = [1] * len(smiles)\n\n inputs = [float(tmp) for tmp in inputs.split()]\n g, diag = int(inputs[0]), int(inputs[1])\n if g <= 2:\n n_cell = 8\n elif g <= 15:\n n_cell = 6\n elif g <= 74:\n n_cell = 5\n elif g <= 194:\n n_cell = 4\n else:\n n_cell = 3 #cubic\n cell = [g, diag] + inputs[2:n_cell]\n \n x = [cell]\n n_site = int(inputs[n_cell])\n if n_site != sum(composition):\n msg = \"Composition is inconsistent: {:d}/{:d}\\n\".format(sum(composition), n_site)\n msg += str(inputs)\n raise ValueError(msg)\n n_cell += 1\n\n for i, smile in enumerate(smiles):\n if smile.endswith('.smi'): \n smile=smile[:-4]\n for c in range(composition[i]):\n if smile in [\"Cl-\"]:\n n_mol = 4\n else:\n n_torsion = len(find_id_from_smile(smile))\n n_mol = 7 + n_torsion\n inputs[n_cell+n_mol-1] = int(inputs[n_cell+n_mol-1])\n x.append(inputs[n_cell:n_cell+n_mol])\n n_cell += n_mol\n return cls(x, smiles)\n \n def to_pyxtal(self, smiles=None, composition=None):\n \"\"\"\n Export the pyxtal structure\n\n Args:\n smiles: list of smiles\n compoisition: list of composition\n \"\"\"\n from pyxtal import pyxtal\n if smiles is None:\n smiles = self.smiles\n\n if composition is None:\n composition = [1] * len(smiles)\n\n if sum(composition) + 1 != len(self.x):\n msg = \"Composition is inconsistent:\\n\"\n msg += str(composition) + \"\\n\"\n msg += self.to_string()\n raise ValueError(msg)\n\n # symmetry\n v = self.x[0]\n struc = pyxtal(molecular=True)\n struc.group, number, struc.diag = Group(v[0]), v[0], v[1]\n \n # lattice\n ltype = struc.group.lattice_type\n if ltype == 'triclinic':\n a, b, c, alpha, beta, gamma = v[2], v[3], v[4], v[5], v[6], v[7]\n elif ltype == 'monoclinic':\n a, b, c, alpha, beta, gamma = v[2], v[3], v[4], 90, v[5], 90\n elif ltype == 'orthorhombic':\n a, b, c, alpha, beta, gamma = v[2], v[3], v[4], 90, 90, 90\n elif ltype == 'tetragonal':\n a, b, c, alpha, beta, gamma = v[2], v[2], v[3], 90, 90, 90\n elif ltype == 'hexagonal':\n a, b, c, alpha, beta, gamma = v[2], v[2], v[3], 90, 90, 120\n else:\n a, b, c, alpha, beta, gamma = v[2], v[2], v[2], 90, 90, 90\n try:\n struc.lattice = Lattice.from_para(a, b, c, alpha, beta, gamma, ltype=ltype)\n except:\n print(a, b, c, alpha, beta, gamma, ltype)\n raise ValueError(\"Problem in Lattice\")\n \n # sites\n struc.numMols = [0] * len(smiles) \n struc.molecules = []\n struc.mol_sites = [] \n\n count = 1\n for i, comp in enumerate(composition): \n smile = smiles[i]\n if smile.endswith('.smi'): smile=smile[:-4]\n for j in range(comp):\n v = self.x[count]\n dicts = {}\n dicts['smile'] = smile\n dicts['dim'] = 3\n dicts['PBC'] = [1, 1, 1]\n dicts['number'] = number\n dicts['diag'] = struc.diag\n dicts['index'] = 0\n dicts['lattice'] = struc.lattice.matrix\n dicts['lattice_type'] = ltype\n dicts['center'] = v[:3]\n if smile not in [\"Cl-\"]:\n dicts['orientation'] = np.array(v[3:6])\n dicts['rotor'] = v[6:-1]\n dicts['reflect'] = int(v[-1])\n site = mol_site.from_1D_dicts(dicts)\n struc.mol_sites.append(site)\n struc.numMols[i] += site.wp.multiplicity\n #move to next rep\n count += 1\n struc.molecules.append(site.molecule)\n\n struc._get_formula()\n struc.source = '1D rep.'\n struc.valid = True\n\n return struc\n \n def to_string(self, time=None, eng=None, tag=None):\n \"\"\"\n Export string representation\n\n Args:\n time: float\n eng: float\n tag: string\n \"\"\"\n x = self.x\n strs = \"{:3d} {:d} \".format(int(x[0][0]), int(x[0][1]))\n\n # data for cell\n if x[0][0] <= 74:\n num = 5\n elif x[0][0] <=194:\n num = 4\n else:\n num = 3\n\n for c in x[0][2:num]:\n strs += \"{:5.2f} \".format(c)\n for c in x[0][num:]:\n strs += \"{:5.1f} \".format(c)\n \n # data for molecule\n strs += \"{:d} \".format(len(x)-1)\n for i in range(1, len(x)):\n for v in x[i][:3]:\n strs += \"{:4.2f} \".format(v) \n for v in x[i][3:-1]:\n strs += \"{:6.1f} \".format(v) \n strs += \"{:d} \".format(int(x[i][-1]))\n\n if time is not None:\n strs += \"{:5.2f}\".format(time)\n\n if eng is not None:\n strs += \"{:11.3f}\".format(eng)\n \n if tag is not None:\n strs += \" {:s}\".format(tag)\n \n return strs\n\n\n\nif __name__ == \"__main__\":\n\n #aspirin\n smiles = ['CC(=O)OC1=CC=CC=C1C(=O)O']\n x = [[14,False,11.43,6.49,11.19,83.31],[0.77,0.57,0.53,48.55,24.31,145.94,-77.85,-4.40,170.86,False]]\n #rep0 = representation(x, smiles)\n #print(rep0.to_string())\n rep1 = representation(x, smiles)\n xtal = rep1.to_pyxtal()\n print(xtal)\n rep2 = representation.from_pyxtal(xtal)\n print(rep2.to_pyxtal())\n print(rep2.to_string())\n string = \"14 0 11.43 6.49 11.19 83.31 1 0.77 0.57 0.53 48.55 24.31 145.9 -77.85 -4.40 170.9 0\"\n rep3 = representation.from_string(string, smiles)\n print(rep3.to_pyxtal())\n" ]
[ [ "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
ryuikaneko/tight_binding_shell_condition
[ "37ed5f1497b6e757873831ea515a29e4a9f2e50e" ]
[ "2d_triangular/filling_1over2_BC_P_P/2d_triangular.py" ]
[ "#!/usr/bin/env python\n\n# coding:utf-8\nfrom __future__ import print_function\nimport numpy as np\n\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\n\ndef ene(kx,ky):\n return -2.0*(np.cos(kx)+np.cos(ky)+np.cos(kx+ky))\n\ndef calc_k_ene(Lx,Ly,BCx,BCy):\n if BCx == 'AP' or BCx == 'antiperiodic':\n xshift = 0.5\n elif BCx == 'P' or BCx == 'periodic':\n xshift = 0.0\n else:\n xshift = 0.0\n if BCy == 'AP' or BCy == 'antiperiodic':\n yshift = 0.5\n elif BCy == 'P' or BCy == 'periodic':\n yshift = 0.0\n else:\n yshift = 0.0\n list_kx = np.array([2.0*np.pi*((x+xshift)/Lx-float(Lx//2)/Lx) for x in range(Lx)])\n list_ky = np.array([2.0*np.pi*((y+yshift)/Ly-float(Ly//2)/Ly) for y in range(Ly)])\n list_enekxky = np.array([ene(kx,ky) for ky in list_ky for kx in list_kx])\n list_intkxky = np.array([Lx*y+x for y in range(Ly) for x in range(Lx)])\n return list_enekxky, list_intkxky, xshift, yshift\n\ndef calc_shell_cond(Lx,Ly,BCx,BCy,filling_numer,filling_denom):\n filling = float(filling_numer)/float(filling_denom)\n numel = Lx*Ly*filling_numer//filling_denom\n list_enekxky, list_intkxky, xshift, yshift = calc_k_ene(Lx,Ly,BCx,BCy)\n list_ind = np.argsort(list_enekxky)\n list_sorted_enekxky = list_enekxky[list_ind]\n list_sorted_intkxky = list_intkxky[list_ind]\n chemipo = 0.5*(list_sorted_enekxky[numel] + list_sorted_enekxky[numel-1])\n totene = np.sum(list_sorted_enekxky[:numel])\n gap = list_sorted_enekxky[numel] - list_sorted_enekxky[numel-1]\n if np.abs(gap)>1e-10:\n shellcond = 'closed'\n else:\n shellcond = 'open'\n return filling, numel, chemipo, totene, gap, shellcond, \\\n list_sorted_enekxky, list_sorted_intkxky, xshift, yshift\n\ndef main():\n BCx = 'P'\n BCy = 'P'\n filling_numer = 1\n filling_denom = 2\n list_L = []\n list_enedens = []\n file = open(\"dat_2d_triangular\",'w')\n file.write(\"# L filling(=n/2) BCx BCy num_electrons(=nup=ndown) chemi_potential ene ene_dens gap shell_cond\\n\")\n for L in range(2,50,2):\n Lx = L\n Ly = L\n filling, numel, chemipo, totene, gap, shellcond, \\\n list_enekxky, list_intkxky, xshift, yshift = \\\n calc_shell_cond(Lx,Ly,BCx,BCy,filling_numer,filling_denom)\n list_L.append(L)\n list_enedens.append(totene/Lx/Ly)\n file.write(\"{} {} {} {} {} {} {} {} {} {}\\n\".format(\\\n L,filling,BCx,BCy,numel,chemipo,totene,totene/Lx/Ly,gap,shellcond))\n file.close()\n\n list_L = np.array(list_L)\n list_enedens = np.array(list_enedens)\n plt.xlabel(\"1/L^2\")\n plt.ylabel(\"E/L^2\")\n plt.plot(1.0/list_L**2,list_enedens,color='blue',marker='o',markerfacecolor='white')\n plt.savefig(\"fig_2d_triangular_enedens.png\")\n plt.cla()\n plt.clf()\n\n L = 30\n Lx = L\n Ly = L\n filling, numel, chemipo, totene, gap, shellcond, \\\n list_enekxky, list_intkxky, xshift, yshift = \\\n calc_shell_cond(Lx,Ly,BCx,BCy,filling_numer,filling_denom)\n list_intkx = list_intkxky%Lx\n list_intky = list_intkxky//Lx\n list_kx = (list_intkx.astype(np.float64)+xshift)/Lx-float(Lx//2)/Lx\n list_ky = (list_intky.astype(np.float64)+yshift)/Ly-float(Ly//2)/Ly\n plt.xlabel(\"kx/pi\")\n plt.ylabel(\"ky/pi\")\n plt.xticks([-0.5,-0.25,0,0.25,0.5])\n plt.yticks([-0.5,-0.25,0,0.25,0.5])\n plt.xlim(-0.55,0.55)\n plt.ylim(-0.55,0.55)\n## https://stackoverflow.com/questions/17990845/how-to-equalize-the-scales-of-x-axis-and-y-axis-in-python-matplotlib\n# plt.axis('equal')\n plt.gca().set_aspect('equal',adjustable='box')\n plt.plot(list_kx,list_ky,color='blue',marker='o',\\\n markerfacecolor='white',linestyle='None')\n plt.plot(list_kx[:numel],list_ky[:numel],color='blue',marker='o',\\\n markerfacecolor='blue',linestyle='None')\n plt.savefig(\"fig_2d_triangular_fermisurface.png\")\n plt.cla()\n plt.clf()\n\n L = 2**9\n Lx = L\n Ly = L\n nbins = L//2\n filling, numel, chemipo, totene, gap, shellcond, \\\n list_enekxky, list_intkxky, xshift, yshift = \\\n calc_shell_cond(Lx,Ly,BCx,BCy,filling_numer,filling_denom)\n plt.xlabel(\"E\")\n plt.ylabel(\"DOS\")\n plt.hist(list_enekxky-chemipo,bins=nbins,density=True)\n plt.savefig(\"fig_2d_triangular_dos.png\")\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "matplotlib.pyplot.gca", "matplotlib.pyplot.yticks", "numpy.abs", "matplotlib.use", "matplotlib.pyplot.cla", "matplotlib.pyplot.ylim", "numpy.cos", "matplotlib.pyplot.savefig", "matplotlib.pyplot.plot", "matplotlib.pyplot.xlim", "matplotlib.pyplot.clf", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.hist", "numpy.argsort", "matplotlib.pyplot.xticks", "numpy.array", "numpy.sum", "matplotlib.pyplot.ylabel" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
njzr/DadmaTools
[ "b26ad8aa834f642d49bd120bd7cf1fdf40741be1", "b26ad8aa834f642d49bd120bd7cf1fdf40741be1", "b26ad8aa834f642d49bd120bd7cf1fdf40741be1", "b26ad8aa834f642d49bd120bd7cf1fdf40741be1", "b26ad8aa834f642d49bd120bd7cf1fdf40741be1", "b26ad8aa834f642d49bd120bd7cf1fdf40741be1" ]
[ "dadmatools/models/flair/parser/modules/scalar_mix.py", "dadmatools/models/tokenization/utils.py", "dadmatools/models/flair/algorithms/.ipynb_checkpoints/maximum_spanning_tree-checkpoint.py", "dadmatools/models/flair/visual/training_curves.py", "dadmatools/models/flair/models/.ipynb_checkpoints/ensemble_model-checkpoint.py", "dadmatools/datasets/datasets/persent/persent.py" ]
[ "# -*- coding: utf-8 -*-\n\nimport torch\nimport torch.nn as nn\n\n\nclass ScalarMix(nn.Module):\n\n def __init__(self, n_layers, dropout=0):\n super(ScalarMix, self).__init__()\n\n self.n_layers = n_layers\n self.dropout = dropout\n\n self.weights = nn.Parameter(torch.zeros(n_layers))\n self.gamma = nn.Parameter(torch.tensor([1.0]))\n self.dropout = nn.Dropout(dropout)\n\n def extra_repr(self):\n s = f\"n_layers={self.n_layers}\"\n if self.dropout.p > 0:\n s += f\", dropout={self.dropout.p}\"\n\n return s\n\n def forward(self, tensors):\n normed_weights = self.dropout(self.weights.softmax(-1))\n weighted_sum = sum(w * h for w, h in zip(normed_weights, tensors))\n\n return self.gamma * weighted_sum\n", "from collections import Counter\nfrom copy import copy\nimport json\nimport numpy as np\nimport re\nimport logging\n\nfrom dadmatools.models.common.utils import ud_scores, harmonic_mean\nfrom dadmatools.utils.conll import CoNLL\nfrom dadmatools.models.common.doc import *\n\nlogger = logging.getLogger('stanza')\n\ndef load_mwt_dict(filename):\n if filename is not None:\n with open(filename, 'r') as f:\n mwt_dict0 = json.load(f)\n\n mwt_dict = dict()\n for item in mwt_dict0:\n (key, expansion), count = item\n\n if key not in mwt_dict or mwt_dict[key][1] < count:\n mwt_dict[key] = (expansion, count)\n\n return mwt_dict\n else:\n return\n\ndef process_sentence(sentence, mwt_dict=None):\n sent = []\n i = 0\n for tok, p, position_info in sentence:\n expansion = None\n if (p == 3 or p == 4) and mwt_dict is not None:\n # MWT found, (attempt to) expand it!\n if tok in mwt_dict:\n expansion = mwt_dict[tok][0]\n elif tok.lower() in mwt_dict:\n expansion = mwt_dict[tok.lower()][0]\n if expansion is not None:\n sent.append({ID: (i+1, i+len(expansion)), TEXT: tok})\n if position_info is not None:\n sent[-1][START_CHAR] = position_info[0]\n sent[-1][END_CHAR] = position_info[1]\n for etok in expansion:\n sent.append({ID: (i+1, ), TEXT: etok})\n i += 1\n else:\n if len(tok) <= 0:\n continue\n sent.append({ID: (i+1, ), TEXT: tok})\n if position_info is not None:\n sent[-1][START_CHAR] = position_info[0]\n sent[-1][END_CHAR] = position_info[1]\n if p == 3 or p == 4:# MARK\n sent[-1][MISC] = 'MWT=Yes'\n i += 1\n return sent\n\n\n# https://stackoverflow.com/questions/201323/how-to-validate-an-email-address-using-a-regular-expression\nEMAIL_RAW_RE = r\"\"\"(?:[a-z0-9!#$%&'*+/=?^_`{|}~-]+(?:\\.[a-z0-9!#$%&'*+/=?^_`{|}~-]+)*|\"(?:[\\x01-\\x08\\x0b\\x0c\\x0e-\\x1f\\x21\\x23-\\x5b\\x5d-\\x7f]|\\\\[\\x01-\\x09\\x0b\\x0c\\x0e-\\x7f])*\")@(?:(?:[a-z0-9](?:[a-z0-9-]*[a-z0-9])?\\.)+[a-z0-9](?:[a-z0-9-]*[a-z0-9])?|\\[(?:(?:(?:2(?:5[0-5]|[0-4][0-9])|1[0-9][0-9]|[1-9]?[0-9]))\\.){3}(?:(?:2(?:5[0-5]|[0-4][0-9])|1[0-9][0-9]|[1-9]?[0-9])|[a-z0-9-]*[a-z0-9]:(?:[\\x01-\\x08\\x0b\\x0c\\x0e-\\x1f\\x21-\\x5a\\x53-\\x7f]|\\\\[\\x01-\\x09\\x0b\\x0c\\x0e-\\x7f])+)\\])\"\"\"\n\n# https://stackoverflow.com/questions/3809401/what-is-a-good-regular-expression-to-match-a-url\n# modification: disallow \" as opposed to all ^\\s\nURL_RAW_RE = r\"\"\"(?:https?:\\/\\/(?:www\\.|(?!www))[a-zA-Z0-9][a-zA-Z0-9-]+[a-zA-Z0-9]\\.[^\\s\"]{2,}|www\\.[a-zA-Z0-9][a-zA-Z0-9-]+[a-zA-Z0-9]\\.[^\\s\"]{2,}|https?:\\/\\/(?:www\\.|(?!www))[a-zA-Z0-9]+\\.[^\\s\"]{2,}|www\\.[a-zA-Z0-9]+\\.[^\\s\"]{2,})\"\"\"\n\nMASK_RE = re.compile(f\"(?:{EMAIL_RAW_RE}|{URL_RAW_RE})\")\n\ndef find_spans(raw):\n \"\"\"\n Return spans of text which don't contain <PAD> and are split by <PAD>\n \"\"\"\n pads = [idx for idx, char in enumerate(raw) if char == '<PAD>']\n if len(pads) == 0:\n spans = [(0, len(raw))]\n else:\n prev = 0\n spans = []\n for pad in pads:\n if pad != prev:\n spans.append( (prev, pad) )\n prev = pad + 1\n if prev < len(raw):\n spans.append( (prev, len(raw)) )\n return spans\n\ndef update_pred_regex(raw, pred):\n \"\"\"\n Update the results of a tokenization batch by checking the raw text against a couple regular expressions\n\n Currently, emails and urls are handled\n TODO: this might work better as a constraint on the inference\n\n for efficiency pred is modified in place\n \"\"\"\n spans = find_spans(raw)\n\n for span_begin, span_end in spans:\n text = \"\".join(raw[span_begin:span_end])\n for match in MASK_RE.finditer(text):\n match_begin, match_end = match.span()\n # first, update all characters touched by the regex to not split\n # with the exception of the last character...\n for char in range(match_begin+span_begin, match_end+span_begin-1):\n pred[char] = 0\n # if the last character is not currently a split, make it a word split\n if pred[match_end+span_begin-1] == 0:\n pred[match_end+span_begin-1] = 1\n\n return pred\n\nSPACE_RE = re.compile(r'\\s')\nSPACE_SPLIT_RE = re.compile(r'( *[^ ]+)')\n\ndef output_predictions(output_file, trainer, data_generator, vocab, mwt_dict, max_seqlen=1000, orig_text=None, no_ssplit=False, use_regex_tokens=True):\n paragraphs = []\n for i, p in enumerate(data_generator.sentences):\n start = 0 if i == 0 else paragraphs[-1][2]\n length = sum([len(x[0]) for x in p])\n paragraphs += [(i, start, start+length, length)] # para idx, start idx, end idx, length\n\n paragraphs = list(sorted(paragraphs, key=lambda x: x[3], reverse=True))\n\n all_preds = [None] * len(paragraphs)\n all_raw = [None] * len(paragraphs)\n\n eval_limit = max(3000, max_seqlen)\n\n batch_size = trainer.args['batch_size']\n skip_newline = trainer.args['skip_newline']\n batches = int((len(paragraphs) + batch_size - 1) / batch_size)\n\n for i in range(batches):\n # At evaluation time, each paragraph is treated as a single \"sentence\", and a batch of `batch_size` paragraphs \n # are tokenized together. `offsets` here are used by the data generator to identify which paragraphs to use\n # for the next batch of evaluation.\n batchparas = paragraphs[i * batch_size : (i + 1) * batch_size]\n offsets = [x[1] for x in batchparas]\n\n batch = data_generator.next(eval_offsets=offsets)\n raw = batch[3]\n\n N = len(batch[3][0])\n if N <= eval_limit:\n pred = np.argmax(trainer.predict(batch), axis=2)\n else:\n idx = [0] * len(batchparas)\n adv = [0] * len(batchparas)\n Ns = [p[3] for p in batchparas]\n pred = [[] for _ in batchparas]\n while True:\n ens = [min(N - idx1, eval_limit) for idx1, N in zip(idx, Ns)]\n en = max(ens)\n batch1 = batch[0][:, :en], batch[1][:, :en], batch[2][:, :en], [x[:en] for x in batch[3]]\n pred1 = np.argmax(trainer.predict(batch1), axis=2)\n\n for j in range(len(batchparas)):\n sentbreaks = np.where((pred1[j] == 2) + (pred1[j] == 4))[0]\n if len(sentbreaks) <= 0 or idx[j] >= Ns[j] - eval_limit:\n advance = ens[j]\n else:\n advance = np.max(sentbreaks) + 1\n\n pred[j] += [pred1[j, :advance]]\n idx[j] += advance\n adv[j] = advance\n\n if all([idx1 >= N for idx1, N in zip(idx, Ns)]):\n break\n # once we've made predictions on a certain number of characters for each paragraph (recorded in `adv`),\n # we skip the first `adv` characters to make the updated batch\n batch = data_generator.next(eval_offsets=adv, old_batch=batch)\n\n pred = [np.concatenate(p, 0) for p in pred]\n\n for j, p in enumerate(batchparas):\n len1 = len([1 for x in raw[j] if x != '<PAD>'])\n if pred[j][len1-1] < 2:\n pred[j][len1-1] = 2\n elif pred[j][len1-1] > 2:\n pred[j][len1-1] = 4\n if use_regex_tokens:\n all_preds[p[0]] = update_pred_regex(raw[j], pred[j][:len1])\n else:\n all_preds[p[0]] = pred[j][:len1]\n all_raw[p[0]] = raw[j]\n\n offset = 0\n oov_count = 0\n doc = []\n\n text = SPACE_RE.sub(' ', orig_text) if orig_text is not None else None\n char_offset = 0\n use_la_ittb_shorthand = trainer.args['shorthand'] == 'la_ittb'\n\n UNK_ID = vocab.unit2id('<UNK>')\n\n # Once everything is fed through the tokenizer model, it's time to decode the predictions\n # into actual tokens and sentences that the rest of the pipeline uses\n for j in range(len(paragraphs)):\n raw = all_raw[j]\n pred = all_preds[j]\n\n current_tok = ''\n current_sent = []\n\n for t, p in zip(raw, pred):\n if t == '<PAD>':\n break\n # hack la_ittb\n if use_la_ittb_shorthand and t in (\":\", \";\"):\n p = 2\n offset += 1\n if vocab.unit2id(t) == UNK_ID:\n oov_count += 1\n\n current_tok += t\n if p >= 1:\n tok = vocab.normalize_token(current_tok)\n assert '\\t' not in tok, tok\n if len(tok) <= 0:\n current_tok = ''\n continue\n if orig_text is not None:\n st = -1\n tok_len = 0\n for part in SPACE_SPLIT_RE.split(current_tok):\n if len(part) == 0: continue\n if skip_newline:\n part_pattern = re.compile(r'\\s*'.join(re.escape(c) for c in part))\n match = part_pattern.search(text, char_offset)\n st0 = match.start(0) - char_offset\n partlen = match.end(0) - match.start(0)\n else:\n st0 = text.index(part, char_offset) - char_offset\n partlen = len(part)\n lstripped = part.lstrip()\n if st < 0:\n st = char_offset + st0 + (len(part) - len(lstripped))\n char_offset += st0 + partlen\n position_info = (st, char_offset)\n else:\n position_info = None\n current_sent.append((tok, p, position_info))\n current_tok = ''\n if (p == 2 or p == 4) and not no_ssplit:\n doc.append(process_sentence(current_sent, mwt_dict))\n current_sent = []\n\n assert(len(current_tok) == 0)\n if len(current_sent):\n doc.append(process_sentence(current_sent, mwt_dict))\n\n return doc\n# if output_file: CoNLL.dict2conll(doc, output_file)\n# return oov_count, offset, all_preds, doc\n\ndef eval_model(args, trainer, batches, vocab, mwt_dict):\n oov_count, N, all_preds, doc = output_predictions(args['conll_file'], trainer, batches, vocab, mwt_dict, args['max_seqlen'])\n\n all_preds = np.concatenate(all_preds, 0)\n labels = [y[1] for x in batches.data for y in x]\n counter = Counter(zip(all_preds, labels))\n\n def f1(pred, gold, mapping):\n pred = [mapping[p] for p in pred]\n gold = [mapping[g] for g in gold]\n\n lastp = -1; lastg = -1\n tp = 0; fp = 0; fn = 0\n for i, (p, g) in enumerate(zip(pred, gold)):\n if p == g > 0 and lastp == lastg:\n lastp = i\n lastg = i\n tp += 1\n elif p > 0 and g > 0:\n lastp = i\n lastg = i\n fp += 1\n fn += 1\n elif p > 0:\n # and g == 0\n lastp = i\n fp += 1\n elif g > 0:\n lastg = i\n fn += 1\n\n if tp == 0:\n return 0\n else:\n return 2 * tp / (2 * tp + fp + fn)\n\n f1tok = f1(all_preds, labels, {0:0, 1:1, 2:1, 3:1, 4:1})\n f1sent = f1(all_preds, labels, {0:0, 1:0, 2:1, 3:0, 4:1})\n f1mwt = f1(all_preds, labels, {0:0, 1:1, 2:1, 3:2, 4:2})\n logger.info(f\"{args['shorthand']}: token F1 = {f1tok*100:.2f}, sentence F1 = {f1sent*100:.2f}, mwt F1 = {f1mwt*100:.2f}\")\n return harmonic_mean([f1tok, f1sent, f1mwt], [1, 1, .01])\n\n", "from typing import List, Set, Tuple, Dict\nimport numpy\n\nfrom flair.utils.checks import ConfigurationError\n\ndef decode_mst(energy: numpy.ndarray,\n length: int,\n has_labels: bool = True) -> Tuple[numpy.ndarray, numpy.ndarray]:\n \"\"\"\n Note: Counter to typical intuition, this function decodes the _maximum_\n spanning tree.\n Decode the optimal MST tree with the Chu-Liu-Edmonds algorithm for\n maximum spanning arboresences on graphs.\n Parameters\n ----------\n energy : ``numpy.ndarray``, required.\n A tensor with shape (num_labels, timesteps, timesteps)\n containing the energy of each edge. If has_labels is ``False``,\n the tensor should have shape (timesteps, timesteps) instead.\n length : ``int``, required.\n The length of this sequence, as the energy may have come\n from a padded batch.\n has_labels : ``bool``, optional, (default = True)\n Whether the graph has labels or not.\n \"\"\"\n if has_labels and energy.ndim != 3:\n raise ConfigurationError(\"The dimension of the energy array is not equal to 3.\")\n elif not has_labels and energy.ndim != 2:\n raise ConfigurationError(\"The dimension of the energy array is not equal to 2.\")\n input_shape = energy.shape\n max_length = input_shape[-1]\n\n # Our energy matrix might have been batched -\n # here we clip it to contain only non padded tokens.\n if has_labels:\n energy = energy[:, :length, :length]\n # get best label for each edge.\n label_id_matrix = energy.argmax(axis=0)\n energy = energy.max(axis=0)\n else:\n energy = energy[:length, :length]\n label_id_matrix = None\n # get original score matrix\n original_score_matrix = energy\n # initialize score matrix to original score matrix\n score_matrix = numpy.array(original_score_matrix, copy=True)\n\n old_input = numpy.zeros([length, length], dtype=numpy.int32)\n old_output = numpy.zeros([length, length], dtype=numpy.int32)\n current_nodes = [True for _ in range(length)]\n representatives: List[Set[int]] = []\n\n for node1 in range(length):\n original_score_matrix[node1, node1] = 0.0\n score_matrix[node1, node1] = 0.0\n representatives.append({node1})\n\n for node2 in range(node1 + 1, length):\n old_input[node1, node2] = node1\n old_output[node1, node2] = node2\n\n old_input[node2, node1] = node2\n old_output[node2, node1] = node1\n\n final_edges: Dict[int, int] = {}\n\n # The main algorithm operates inplace.\n chu_liu_edmonds(length, score_matrix, current_nodes,\n final_edges, old_input, old_output, representatives)\n\n heads = numpy.zeros([max_length], numpy.int32)\n if has_labels:\n head_type = numpy.ones([max_length], numpy.int32)\n else:\n head_type = None\n\n for child, parent in final_edges.items():\n heads[child] = parent\n if has_labels:\n head_type[child] = label_id_matrix[parent, child]\n\n return heads, head_type\n\n\ndef chu_liu_edmonds(length: int,\n score_matrix: numpy.ndarray,\n current_nodes: List[bool],\n final_edges: Dict[int, int],\n old_input: numpy.ndarray,\n old_output: numpy.ndarray,\n representatives: List[Set[int]]):\n \"\"\"\n Applies the chu-liu-edmonds algorithm recursively\n to a graph with edge weights defined by score_matrix.\n Note that this function operates in place, so variables\n will be modified.\n Parameters\n ----------\n length : ``int``, required.\n The number of nodes.\n score_matrix : ``numpy.ndarray``, required.\n The score matrix representing the scores for pairs\n of nodes.\n current_nodes : ``List[bool]``, required.\n The nodes which are representatives in the graph.\n A representative at it's most basic represents a node,\n but as the algorithm progresses, individual nodes will\n represent collapsed cycles in the graph.\n final_edges: ``Dict[int, int]``, required.\n An empty dictionary which will be populated with the\n nodes which are connected in the maximum spanning tree.\n old_input: ``numpy.ndarray``, required.\n a map from an edge to its head node.\n Key: The edge is a tuple, and elements in a tuple\n could be a node or a representative of a cycle.\n old_output: ``numpy.ndarray``, required.\n representatives : ``List[Set[int]]``, required.\n A list containing the nodes that a particular node\n is representing at this iteration in the graph.\n Returns\n -------\n Nothing - all variables are modified in place.\n \"\"\"\n # Set the initial graph to be the greedy best one.\n # Node '0' is always the root node.\n parents = [-1]\n for node1 in range(1, length):\n # Init the parent of each node to be the root node.\n parents.append(0)\n if current_nodes[node1]:\n # If the node is a representative,\n # find the max outgoing edge to other non-root representative,\n # and update its parent.\n max_score = score_matrix[0, node1]\n for node2 in range(1, length):\n if node2 == node1 or not current_nodes[node2]:\n continue\n\n new_score = score_matrix[node2, node1]\n if new_score > max_score:\n max_score = new_score\n parents[node1] = node2\n\n # Check if this solution has a cycle.\n has_cycle, cycle = _find_cycle(parents, length, current_nodes)\n # If there are no cycles, find all edges and return.\n if not has_cycle:\n final_edges[0] = -1\n for node in range(1, length):\n if not current_nodes[node]:\n continue\n\n parent = old_input[parents[node], node]\n child = old_output[parents[node], node]\n final_edges[child] = parent\n return\n\n # Otherwise, we have a cycle so we need to remove an edge.\n # From here until the recursive call is the contraction stage of the algorithm.\n cycle_weight = 0.0\n # Find the weight of the cycle.\n index = 0\n for node in cycle:\n index += 1\n cycle_weight += score_matrix[parents[node], node]\n\n # For each node in the graph, find the maximum weight incoming\n # and outgoing edge into the cycle.\n cycle_representative = cycle[0]\n for node in range(length):\n # Nodes not in the cycle.\n if not current_nodes[node] or node in cycle:\n continue\n\n in_edge_weight = float(\"-inf\")\n in_edge = -1\n out_edge_weight = float(\"-inf\")\n out_edge = -1\n\n for node_in_cycle in cycle:\n if score_matrix[node_in_cycle, node] > in_edge_weight:\n in_edge_weight = score_matrix[node_in_cycle, node]\n in_edge = node_in_cycle\n\n # Add the new edge score to the cycle weight\n # and subtract the edge we're considering removing.\n score = (cycle_weight +\n score_matrix[node, node_in_cycle] -\n score_matrix[parents[node_in_cycle], node_in_cycle])\n\n if score > out_edge_weight:\n out_edge_weight = score\n out_edge = node_in_cycle\n\n score_matrix[cycle_representative, node] = in_edge_weight\n old_input[cycle_representative, node] = old_input[in_edge, node]\n old_output[cycle_representative, node] = old_output[in_edge, node]\n\n score_matrix[node, cycle_representative] = out_edge_weight\n old_output[node, cycle_representative] = old_output[node, out_edge]\n old_input[node, cycle_representative] = old_input[node, out_edge]\n\n # For the next recursive iteration, we want to consider the cycle as a\n # single node. Here we collapse the cycle into the first node in the\n # cycle (first node is arbitrary), set all the other nodes not be\n # considered in the next iteration. We also keep track of which\n # representatives we are considering this iteration because we need\n # them below to check if we're done.\n considered_representatives: List[Set[int]] = []\n for i, node_in_cycle in enumerate(cycle):\n considered_representatives.append(set())\n if i > 0:\n # We need to consider at least one\n # node in the cycle, arbitrarily choose\n # the first.\n current_nodes[node_in_cycle] = False\n\n for node in representatives[node_in_cycle]:\n considered_representatives[i].add(node)\n if i > 0:\n representatives[cycle_representative].add(node)\n\n chu_liu_edmonds(length, score_matrix, current_nodes, final_edges, old_input, old_output, representatives)\n\n # Expansion stage.\n # check each node in cycle, if one of its representatives\n # is a key in the final_edges, it is the one we need.\n # The node we are looking for is the node which is the child\n # of the incoming edge to the cycle.\n found = False\n key_node = -1\n for i, node in enumerate(cycle):\n for cycle_rep in considered_representatives[i]:\n if cycle_rep in final_edges:\n key_node = node\n found = True\n break\n if found:\n break\n\n # break the cycle.\n previous = parents[key_node]\n while previous != key_node:\n child = old_output[parents[previous], previous]\n parent = old_input[parents[previous], previous]\n final_edges[child] = parent\n previous = parents[previous]\n\n\ndef _find_cycle(parents: List[int],\n length: int,\n current_nodes: List[bool]) -> Tuple[bool, List[int]]:\n \"\"\"\n :return:\n has_cycle: whether the graph has at least a cycle.\n cycle: a list of nodes which form a cycle in the graph.\n \"\"\"\n\n # 'added' means that the node has been visited.\n added = [False for _ in range(length)]\n added[0] = True\n cycle = set()\n has_cycle = False\n for i in range(1, length):\n if has_cycle:\n break\n # don't redo nodes we've already\n # visited or aren't considering.\n if added[i] or not current_nodes[i]:\n continue\n # Initialize a new possible cycle.\n this_cycle = set()\n this_cycle.add(i)\n added[i] = True\n has_cycle = True\n next_node = i\n while parents[next_node] not in this_cycle:\n next_node = parents[next_node]\n # If we see a node we've already processed,\n # we can stop, because the node we are\n # processing would have been in that cycle.\n # Note that in the first pass of the for loop,\n # every node except that the root has been assigned\n # a head, if there's no cycle, the while loop\n # will finally arrive at the root\n if added[next_node]:\n has_cycle = False\n break\n added[next_node] = True\n this_cycle.add(next_node)\n\n if has_cycle:\n original = next_node\n cycle.add(original)\n next_node = parents[original]\n while next_node != original:\n cycle.add(next_node)\n next_node = parents[next_node]\n break\n\n return has_cycle, list(cycle)\n\n\ndef decode_mst_with_coreference(\n energy: numpy.ndarray,\n coreference: List[int],\n length: int,\n has_labels: bool = True) -> Tuple[numpy.ndarray, numpy.ndarray]:\n \"\"\"\n Note: Counter to typical intuition, this function decodes the _maximum_\n spanning tree.\n Decode the optimal MST tree with the Chu-Liu-Edmonds algorithm for\n maximum spanning arboresences on graphs.\n Parameters\n ----------\n energy : ``numpy.ndarray``, required.\n A tensor with shape (num_labels, timesteps, timesteps)\n containing the energy of each edge. If has_labels is ``False``,\n the tensor should have shape (timesteps, timesteps) instead.\n coreference: ``List[int]``, required.\n A list which maps a node to its first precedent.\n length : ``int``, required.\n The length of this sequence, as the energy may have come\n from a padded batch.\n has_labels : ``bool``, optional, (default = True)\n Whether the graph has labels or not.\n \"\"\"\n if has_labels and energy.ndim != 3:\n raise ConfigurationError(\"The dimension of the energy array is not equal to 3.\")\n elif not has_labels and energy.ndim != 2:\n raise ConfigurationError(\"The dimension of the energy array is not equal to 2.\")\n input_shape = energy.shape\n max_length = input_shape[-1]\n\n # Our energy matrix might have been batched -\n # here we clip it to contain only non padded tokens.\n if has_labels:\n energy = energy[:, :length, :length]\n # get best label for each edge.\n label_id_matrix = energy.argmax(axis=0)\n energy = energy.max(axis=0)\n else:\n energy = energy[:length, :length]\n label_id_matrix = None\n # get original score matrix\n original_score_matrix = energy\n # initialize score matrix to original score matrix\n score_matrix = numpy.array(original_score_matrix, copy=True)\n\n old_input = numpy.zeros([length, length], dtype=numpy.int32)\n old_output = numpy.zeros([length, length], dtype=numpy.int32)\n current_nodes = [True for _ in range(length)]\n representatives: List[Set[int]] = []\n\n for node1 in range(length):\n original_score_matrix[node1, node1] = 0.0\n score_matrix[node1, node1] = 0.0\n representatives.append({node1})\n\n for node2 in range(node1 + 1, length):\n old_input[node1, node2] = node1\n old_output[node1, node2] = node2\n\n old_input[node2, node1] = node2\n old_output[node2, node1] = node1\n\n final_edges: Dict[int, int] = {}\n\n # The main algorithm operates inplace.\n adapted_chu_liu_edmonds(\n length, score_matrix, coreference, current_nodes,\n final_edges, old_input, old_output, representatives)\n\n # Modify edges which are invalid according to coreference.\n _validate(final_edges, length, original_score_matrix, coreference)\n\n heads = numpy.zeros([max_length], numpy.int32)\n if has_labels:\n head_type = numpy.ones([max_length], numpy.int32)\n else:\n head_type = None\n\n for child, parent in final_edges.items():\n heads[child] = parent\n if has_labels:\n head_type[child] = label_id_matrix[parent, child]\n\n return heads, head_type\n\n\ndef adapted_chu_liu_edmonds(length: int,\n score_matrix: numpy.ndarray,\n coreference: List[int],\n current_nodes: List[bool],\n final_edges: Dict[int, int],\n old_input: numpy.ndarray,\n old_output: numpy.ndarray,\n representatives: List[Set[int]]):\n \"\"\"\n Applies the chu-liu-edmonds algorithm recursively\n to a graph with edge weights defined by score_matrix.\n Note that this function operates in place, so variables\n will be modified.\n Parameters\n ----------\n length : ``int``, required.\n The number of nodes.\n score_matrix : ``numpy.ndarray``, required.\n The score matrix representing the scores for pairs\n of nodes.\n coreference: ``List[int]``, required.\n A list which maps a node to its first precedent.\n current_nodes : ``List[bool]``, required.\n The nodes which are representatives in the graph.\n A representative at it's most basic represents a node,\n but as the algorithm progresses, individual nodes will\n represent collapsed cycles in the graph.\n final_edges: ``Dict[int, int]``, required.\n An empty dictionary which will be populated with the\n nodes which are connected in the maximum spanning tree.\n old_input: ``numpy.ndarray``, required.\n a map from an edge to its head node.\n Key: The edge is a tuple, and elements in a tuple\n could be a node or a representative of a cycle.\n old_output: ``numpy.ndarray``, required.\n representatives : ``List[Set[int]]``, required.\n A list containing the nodes that a particular node\n is representing at this iteration in the graph.\n Returns\n -------\n Nothing - all variables are modified in place.\n \"\"\"\n # Set the initial graph to be the greedy best one.\n # Node '0' is always the root node.\n parents = [-1]\n for node1 in range(1, length):\n # Init the parent of each node to be the root node.\n parents.append(0)\n if current_nodes[node1]:\n # If the node is a representative,\n # find the max outgoing edge to other non-root representative,\n # and update its parent.\n max_score = score_matrix[0, node1]\n for node2 in range(1, length):\n if node2 == node1 or not current_nodes[node2]:\n continue\n\n # Exclude edges formed by two coreferred nodes\n _parent = old_input[node1, node2]\n _child = old_output[node1, node2]\n if coreference[_parent] == coreference[_child]:\n continue\n\n new_score = score_matrix[node2, node1]\n if new_score > max_score:\n max_score = new_score\n parents[node1] = node2\n\n # Check if this solution has a cycle.\n has_cycle, cycle = _find_cycle(parents, length, current_nodes)\n # If there are no cycles, find all edges and return.\n if not has_cycle:\n final_edges[0] = -1\n for node in range(1, length):\n if not current_nodes[node]:\n continue\n\n parent = old_input[parents[node], node]\n child = old_output[parents[node], node]\n final_edges[child] = parent\n return\n\n # Otherwise, we have a cycle so we need to remove an edge.\n # From here until the recursive call is the contraction stage of the algorithm.\n cycle_weight = 0.0\n # Find the weight of the cycle.\n index = 0\n for node in cycle:\n index += 1\n cycle_weight += score_matrix[parents[node], node]\n\n # For each node in the graph, find the maximum weight incoming\n # and outgoing edge into the cycle.\n cycle_representative = cycle[0]\n for node in range(length):\n # Nodes not in the cycle.\n if not current_nodes[node] or node in cycle:\n continue\n\n in_edge_weight = float(\"-inf\")\n in_edge = -1\n out_edge_weight = float(\"-inf\")\n out_edge = -1\n\n for node_in_cycle in cycle:\n # Exclude edges formed by two coreferred nodes.\n _parent = old_input[node_in_cycle, node]\n _child = old_output[node_in_cycle, node]\n if coreference[_parent] != coreference[_child]:\n if score_matrix[node_in_cycle, node] > in_edge_weight:\n in_edge_weight = score_matrix[node_in_cycle, node]\n in_edge = node_in_cycle\n\n # Exclude edges formed by two coreferred nodes.\n _parent = old_input[node, node_in_cycle]\n _child = old_output[node, node_in_cycle]\n if coreference[_parent] != coreference[_child]:\n # Add the new edge score to the cycle weight\n # and subtract the edge we're considering removing.\n score = (cycle_weight +\n score_matrix[node, node_in_cycle] -\n score_matrix[parents[node_in_cycle], node_in_cycle])\n\n if score > out_edge_weight:\n out_edge_weight = score\n out_edge = node_in_cycle\n\n score_matrix[cycle_representative, node] = in_edge_weight\n old_input[cycle_representative, node] = old_input[in_edge, node]\n old_output[cycle_representative, node] = old_output[in_edge, node]\n\n score_matrix[node, cycle_representative] = out_edge_weight\n old_output[node, cycle_representative] = old_output[node, out_edge]\n old_input[node, cycle_representative] = old_input[node, out_edge]\n\n # For the next recursive iteration, we want to consider the cycle as a\n # single node. Here we collapse the cycle into the first node in the\n # cycle (first node is arbitrary), set all the other nodes not be\n # considered in the next iteration. We also keep track of which\n # representatives we are considering this iteration because we need\n # them below to check if we're done.\n considered_representatives: List[Set[int]] = []\n for i, node_in_cycle in enumerate(cycle):\n considered_representatives.append(set())\n if i > 0:\n # We need to consider at least one\n # node in the cycle, arbitrarily choose\n # the first.\n current_nodes[node_in_cycle] = False\n\n for node in representatives[node_in_cycle]:\n considered_representatives[i].add(node)\n if i > 0:\n representatives[cycle_representative].add(node)\n\n adapted_chu_liu_edmonds(length, score_matrix, coreference, current_nodes, final_edges, old_input, old_output, representatives)\n\n # Expansion stage.\n # check each node in cycle, if one of its representatives\n # is a key in the final_edges, it is the one we need.\n # The node we are looking for is the node which is the child\n # of the incoming edge to the cycle.\n found = False\n key_node = -1\n for i, node in enumerate(cycle):\n for cycle_rep in considered_representatives[i]:\n if cycle_rep in final_edges:\n key_node = node\n found = True\n break\n if found:\n break\n\n # break the cycle.\n previous = parents[key_node]\n while previous != key_node:\n child = old_output[parents[previous], previous]\n parent = old_input[parents[previous], previous]\n final_edges[child] = parent\n previous = parents[previous]\n\n\ndef _validate(final_edges, length, original_score_matrix, coreference):\n # Count how many edges have been modified by this function.\n modified = 0\n\n # Make a constant used by _find_cycle.\n current_nodes = [True for _ in range(length)]\n\n # Group nodes by coreference.\n group_by_precedent = {}\n for node, precedent in enumerate(coreference):\n if precedent not in group_by_precedent:\n group_by_precedent[precedent] = []\n group_by_precedent[precedent].append(node)\n\n # Validate parents of nodes in each group.\n for group in group_by_precedent.values():\n # Skip if only one node in the group.\n if len(group) == 1:\n continue\n # Group conflicting nodes by parent.\n conflicts_by_parent = {}\n for child in group:\n parent = final_edges[child]\n if parent not in conflicts_by_parent:\n conflicts_by_parent[parent] = []\n conflicts_by_parent[parent].append(child)\n\n # Keep the parents which have already been taken.\n reserved_parents = set(conflicts_by_parent.keys())\n for parent, conflicts in conflicts_by_parent.items():\n # Skip if no conflict.\n if len(conflicts) == 1:\n continue\n # Find the node that has the maximum edge with the parent.\n winner = max(conflicts, key=lambda _child: original_score_matrix[parent, _child])\n # Modify other nodes' parents.\n for child in conflicts:\n # Skip the winner.\n if child == winner:\n continue\n # Sort its candidate parents by score.\n parent_scores = original_score_matrix[:, child]\n for _parent in numpy.argsort(parent_scores)[::-1]:\n # Skip its current parent and the reserved parents.\n if _parent == parent or _parent in reserved_parents:\n continue\n # Check if there's any cycle if we use this parent.\n parents = final_edges.copy()\n parents[child] = _parent\n has_cycle, _ = _find_cycle(parents, length, current_nodes)\n if has_cycle:\n continue\n # Add it to the reserved parents.\n reserved_parents.add(_parent)\n # Update its parent.\n final_edges[child] = _parent\n # Update the counter.\n modified += 1\n break\n # else:\n # print('* Could not find another parent. Use the old one.')\n # if modified > 0:\n # print('* Validate')\n return modified\n", "import logging\nfrom collections import defaultdict\nfrom pathlib import Path\nfrom typing import Union, List\n\nimport numpy as np\nimport csv\n\nimport matplotlib\nimport math\n\n# to enable %matplotlib inline if running in ipynb\nfrom IPython import get_ipython\n\nipy = get_ipython()\nif ipy is not None:\n ipy.run_line_magic(\"matplotlib\", \"inline\")\n\n# change from Agg to TkAgg for interative mode\ntry:\n # change from Agg to TkAgg for interative mode\n matplotlib.use(\"TkAgg\")\nexcept:\n pass\n\n\nimport matplotlib\nmatplotlib.use('pdf') # shows in the browser\nimport matplotlib.pyplot as plt\n\n\n# header for 'weights.txt'\nWEIGHT_NAME = 1\nWEIGHT_NUMBER = 2\nWEIGHT_VALUE = 3\n\nlog = logging.getLogger(\"flair\")\n\n\nclass Plotter(object):\n \"\"\"\n Plots training parameters (loss, f-score, and accuracy) and training weights over time.\n Input files are the output files 'loss.tsv' and 'weights.txt' from training either a sequence tagger or text\n classification model.\n \"\"\"\n\n @staticmethod\n def _extract_evaluation_data(file_name: Path, score: str = \"F1\") -> dict:\n training_curves = {\n \"train\": {\"loss\": [], \"score\": []},\n \"test\": {\"loss\": [], \"score\": []},\n \"dev\": {\"loss\": [], \"score\": []},\n }\n\n with open(file_name, \"r\") as tsvin:\n tsvin = csv.reader(tsvin, delimiter=\"\\t\")\n\n # determine the column index of loss, f-score and accuracy for train, dev and test split\n row = next(tsvin, None)\n\n score = score.upper()\n\n if f\"TEST_{score}\" not in row:\n log.warning(\"-\" * 100)\n log.warning(f\"WARNING: No {score} found for test split in this data.\")\n log.warning(\n f\"Are you sure you want to plot {score} and not another value?\"\n )\n log.warning(\"-\" * 100)\n\n TRAIN_SCORE = (\n row.index(f\"TRAIN_{score}\") if f\"TRAIN_{score}\" in row else None\n )\n DEV_SCORE = row.index(f\"DEV_{score}\") if f\"DEV_{score}\" in row else None\n TEST_SCORE = row.index(f\"TEST_{score}\")\n\n # then get all relevant values from the tsv\n for row in tsvin:\n\n if TRAIN_SCORE is not None:\n if row[TRAIN_SCORE] != \"_\":\n training_curves[\"train\"][\"score\"].append(\n float(row[TRAIN_SCORE])\n )\n\n if DEV_SCORE is not None:\n if row[DEV_SCORE] != \"_\":\n training_curves[\"dev\"][\"score\"].append(float(row[DEV_SCORE]))\n\n if row[TEST_SCORE] != \"_\":\n training_curves[\"test\"][\"score\"].append(float(row[TEST_SCORE]))\n\n return training_curves\n\n @staticmethod\n def _extract_weight_data(file_name: Path) -> dict:\n weights = defaultdict(lambda: defaultdict(lambda: list()))\n\n with open(file_name, \"r\") as tsvin:\n tsvin = csv.reader(tsvin, delimiter=\"\\t\")\n\n for row in tsvin:\n name = row[WEIGHT_NAME]\n param = row[WEIGHT_NUMBER]\n value = float(row[WEIGHT_VALUE])\n\n weights[name][param].append(value)\n\n return weights\n\n @staticmethod\n def _extract_learning_rate(file_name: Path):\n lrs = []\n losses = []\n\n with open(file_name, \"r\") as tsvin:\n tsvin = csv.reader(tsvin, delimiter=\"\\t\")\n row = next(tsvin, None)\n LEARNING_RATE = row.index(\"LEARNING_RATE\")\n TRAIN_LOSS = row.index(\"TRAIN_LOSS\")\n\n # then get all relevant values from the tsv\n for row in tsvin:\n if row[TRAIN_LOSS] != \"_\":\n losses.append(float(row[TRAIN_LOSS]))\n if row[LEARNING_RATE] != \"_\":\n lrs.append(float(row[LEARNING_RATE]))\n\n return lrs, losses\n\n def plot_weights(self, file_name: Union[str, Path]):\n if type(file_name) is str:\n file_name = Path(file_name)\n\n weights = self._extract_weight_data(file_name)\n\n total = len(weights)\n columns = 2\n rows = max(2, int(math.ceil(total / columns)))\n # print(rows)\n\n # figsize = (16, 16)\n if rows != columns:\n figsize = (8, rows + 0)\n\n fig = plt.figure()\n f, axarr = plt.subplots(rows, columns, figsize=figsize)\n\n c = 0\n r = 0\n for name, values in weights.items():\n # plot i\n axarr[r, c].set_title(name, fontsize=6)\n for _, v in values.items():\n axarr[r, c].plot(np.arange(0, len(v)), v, linewidth=0.35)\n axarr[r, c].set_yticks([])\n axarr[r, c].set_xticks([])\n c += 1\n if c == columns:\n c = 0\n r += 1\n\n while r != rows and c != columns:\n axarr[r, c].set_yticks([])\n axarr[r, c].set_xticks([])\n c += 1\n if c == columns:\n c = 0\n r += 1\n\n # save plots\n f.subplots_adjust(hspace=0.5)\n plt.tight_layout(pad=1.0)\n path = file_name.parent / \"weights.png\"\n plt.savefig(path, dpi=300)\n print(\n f\"Weights plots are saved in {path}\"\n ) # to let user know the path of the save plots\n plt.close(fig)\n\n def plot_training_curves(\n self, file_name: Union[str, Path], plot_values: List[str] = [\"loss\", \"F1\"]\n ):\n if type(file_name) is str:\n file_name = Path(file_name)\n\n fig = plt.figure(figsize=(15, 10))\n\n for plot_no, plot_value in enumerate(plot_values):\n\n training_curves = self._extract_evaluation_data(file_name, plot_value)\n\n plt.subplot(len(plot_values), 1, plot_no + 1)\n if training_curves[\"train\"][\"score\"]:\n x = np.arange(0, len(training_curves[\"train\"][\"score\"]))\n plt.plot(\n x, training_curves[\"train\"][\"score\"], label=f\"training {plot_value}\"\n )\n if training_curves[\"dev\"][\"score\"]:\n x = np.arange(0, len(training_curves[\"dev\"][\"score\"]))\n plt.plot(\n x, training_curves[\"dev\"][\"score\"], label=f\"validation {plot_value}\"\n )\n if training_curves[\"test\"][\"score\"]:\n x = np.arange(0, len(training_curves[\"test\"][\"score\"]))\n plt.plot(\n x, training_curves[\"test\"][\"score\"], label=f\"test {plot_value}\"\n )\n plt.legend(bbox_to_anchor=(1.04, 0), loc=\"lower left\", borderaxespad=0)\n plt.ylabel(plot_value)\n plt.xlabel(\"epochs\")\n\n # save plots\n plt.tight_layout(pad=1.0)\n path = file_name.parent / \"training.png\"\n plt.savefig(path, dpi=300)\n print(\n f\"Loss and F1 plots are saved in {path}\"\n ) # to let user know the path of the save plots\n plt.show(block=False) # to have the plots displayed when user run this module\n plt.close(fig)\n\n def plot_learning_rate(\n self, file_name: Union[str, Path], skip_first: int = 10, skip_last: int = 5\n ):\n if type(file_name) is str:\n file_name = Path(file_name)\n\n lrs, losses = self._extract_learning_rate(file_name)\n lrs = lrs[skip_first:-skip_last] if skip_last > 0 else lrs[skip_first:]\n losses = losses[skip_first:-skip_last] if skip_last > 0 else losses[skip_first:]\n\n fig, ax = plt.subplots(1, 1)\n ax.plot(lrs, losses)\n ax.set_ylabel(\"Loss\")\n ax.set_xlabel(\"Learning Rate\")\n ax.set_xscale(\"log\")\n ax.xaxis.set_major_formatter(plt.FormatStrFormatter(\"%.0e\"))\n\n # plt.show()\n\n # save plot\n plt.tight_layout(pad=1.0)\n path = file_name.parent / \"learning_rate.png\"\n plt.savefig(path, dpi=300)\n print(\n f\"Learning_rate plots are saved in {path}\"\n ) # to let user know the path of the save plots\n plt.show(block=True) # to have the plots displayed when user run this module\n plt.close(fig)\n", "import warnings\r\nimport logging\r\nfrom pathlib import Path\r\n\r\nimport torch.nn\r\nfrom torch.nn.parameter import Parameter\r\nimport torch.nn.functional as F\r\nimport torch.autograd as autograd\r\nimport models.flair.nn\r\nimport models.flair as flair\r\nimport torch\r\n\r\nfrom models.flair.data import Dictionary, Sentence, Token, Label\r\nfrom models.flair.datasets import DataLoader\r\nfrom models.flair.embeddings import TokenEmbeddings\r\nfrom models.flair.file_utils import cached_path\r\n\r\nfrom typing import List, Tuple, Union\r\n\r\nfrom models.flair.training_utils import Metric, Result, store_embeddings\r\nfrom tqdm import tqdm\r\nfrom tabulate import tabulate\r\nimport numpy as np\r\nimport pdb\r\nimport copy\r\nimport time\r\n\r\nimport sys\r\n# sys.path.insert(0,'/home/wangxy/workspace/flair/parser')\r\n# sys.path.append('./flair/parser/modules')\r\n\r\n# from flair.parser.utils.fn import ispunct\r\nimport torch\r\nimport torch.nn as nn\r\nfrom torch.nn.utils.rnn import (pack_padded_sequence, pad_packed_sequence,\r\n\t\t\t\t\t\t\t\tpad_sequence)\r\n\r\n\r\nimport uuid\r\nuid = uuid.uuid4().hex[:6]\r\n \r\n\r\n\r\nlog = logging.getLogger(\"flair\")\r\n\r\nSTART_TAG: str = \"<START>\"\r\nSTOP_TAG: str = \"<STOP>\"\r\n\r\n\r\ndef to_scalar(var):\r\n\treturn var.view(-1).detach().tolist()[0]\r\n\r\n\r\ndef argmax(vec):\r\n\t_, idx = torch.max(vec, 1)\r\n\treturn to_scalar(idx)\r\n\r\n\r\ndef log_sum_exp(vec):\r\n\tmax_score = vec[0, argmax(vec)]\r\n\tmax_score_broadcast = max_score.view(1, -1).expand(1, vec.size()[1])\r\n\treturn max_score + torch.log(torch.sum(torch.exp(vec - max_score_broadcast)))\r\n\r\n\r\ndef argmax_batch(vecs):\r\n\t_, idx = torch.max(vecs, 1)\r\n\treturn idx\r\n\r\n\r\ndef log_sum_exp_batch(vecs):\r\n\tmaxi = torch.max(vecs, 1)[0]\r\n\tmaxi_bc = maxi[:, None].repeat(1, vecs.shape[1])\r\n\trecti_ = torch.log(torch.sum(torch.exp(vecs - maxi_bc), 1))\r\n\treturn maxi + recti_\r\n\r\ndef log_sum_exp_vb(vec, m_size):\r\n\t\"\"\"\r\n\tcalculate log of exp sum\r\n\r\n\targs:\r\n\t\tvec (batch_size, vanishing_dim, hidden_dim) : input tensor\r\n\t\tm_size : hidden_dim\r\n\treturn:\r\n\t\tbatch_size, hidden_dim\r\n\t\"\"\"\r\n\t_, idx = torch.max(vec, 1) # B * 1 * M\r\n\tmax_score = torch.gather(vec, 1, idx.view(-1, 1, m_size)).view(-1, 1, m_size) # B * M\r\n\r\n\treturn max_score.view(-1, m_size) + torch.log(torch.sum(torch.exp(vec - max_score.expand_as(vec)), 1)).view(-1,\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tm_size) # B * M\r\n\r\ndef pad_tensors(tensor_list):\r\n\tml = max([x.shape[0] for x in tensor_list])\r\n\tshape = [len(tensor_list), ml] + list(tensor_list[0].shape[1:])\r\n\ttemplate = torch.zeros(*shape, dtype=torch.long, device=flair.device)\r\n\tlens_ = [x.shape[0] for x in tensor_list]\r\n\tfor i, tensor in enumerate(tensor_list):\r\n\t\ttemplate[i, : lens_[i]] = tensor\r\n\r\n\treturn template, lens_\r\n\r\n\r\nclass EnsembleModel(flair.nn.Model):\r\n\tdef __init__(\r\n\t\tself,\r\n\t\tembeddings: TokenEmbeddings,\r\n\t\ttag_dictionary: Dictionary,\r\n\t\ttag_type: str,\r\n\t\thidden_size: int = 256,\r\n\t\tuse_crf: bool = False,\r\n\t\tuse_rnn: bool = False,\r\n\t\ttrain_initial_hidden_state: bool = False,\r\n\t\trnn_layers: int = 3,\r\n\t\tlstm_dropout: float = 0.0,\r\n\t\tdropout: float = 0.0,\r\n\t\tword_dropout: float = 0.0,\r\n\t\tlocked_dropout: float = 0.0,\r\n\t\tpickle_module: str = \"pickle\",\r\n\t\tconfig = None,\r\n\t\tuse_decoder_timer = True,\r\n\t\tdebug = False,\r\n\t\tword_map = None,\r\n\t\tchar_map = None,\r\n\t\trelearn_embeddings = False,\r\n\t\ttesting = False,\r\n\t\tcandidates = -1,\r\n\t\ttarget_languages = -1,\r\n\t\tbinary = False,\r\n\t):\r\n\t\t\"\"\"\r\n\t\tInitializes a SequenceTagger\r\n\t\t:param hidden_size: number of hidden states in RNN\r\n\t\t:param embeddings: word embeddings used in tagger\r\n\t\t:param tag_dictionary: dictionary of tags you want to predict\r\n\t\t:param tag_type: string identifier for tag type\r\n\t\t:param use_crf: if True use CRF decoder, else project directly to tag space\r\n\t\t:param use_rnn: if True use RNN layer, otherwise use word embeddings directly\r\n\t\t:param rnn_layers: number of RNN layers\r\n\t\t:param dropout: dropout probability\r\n\t\t:param word_dropout: word dropout probability\r\n\t\t:param locked_dropout: locked dropout probability\r\n\t\t:param distill_crf: CRF information distillation\r\n\t\t:param crf_attention: use CRF distillation weights\r\n\t\t:param biaf_attention: use bilinear attention for word-KD distillation\r\n\t\t\"\"\"\r\n\r\n\t\tsuper(EnsembleModel, self).__init__()\r\n\t\tself.debug = False\r\n\t\tself.use_crf = use_crf\r\n\t\tself.use_rnn = use_rnn\r\n\t\tself.hidden_size = hidden_size\r\n\t\tself.embeddings = embeddings\r\n\t\tself.config = config\r\n\t\tself.binary = binary\r\n\r\n\t\tself.rnn_layers: int = rnn_layers\r\n\t\t# set the dictionaries\r\n\t\tself.tag_dictionary: Dictionary = tag_dictionary\r\n\t\tself.tag_type: str = tag_type\r\n\t\tself.tagset_size: int = len(tag_dictionary)\r\n\r\n\t\tself.word_map = word_map\r\n\t\tself.char_map = char_map\r\n\r\n\t\t# initialize the network architecture\r\n\t\tself.nlayers: int = rnn_layers\r\n\t\tself.hidden_word = None\r\n\t\t# dropouts\r\n\t\tself.use_dropout: float = dropout\r\n\t\tself.use_word_dropout: float = word_dropout\r\n\t\tself.use_locked_dropout: float = locked_dropout\r\n\r\n\t\tself.pickle_module = pickle_module\r\n\r\n\t\tif dropout > 0.0:\r\n\t\t\tself.dropout = torch.nn.Dropout(dropout)\r\n\r\n\t\tif word_dropout > 0.0:\r\n\t\t\tself.word_dropout = flair.nn.WordDropout(word_dropout)\r\n\r\n\t\tif locked_dropout > 0.0:\r\n\t\t\tself.locked_dropout = flair.nn.LockedDropout(locked_dropout)\r\n\r\n\t\trnn_input_dim: int = self.embeddings.embedding_length\r\n\r\n\t\tself.relearn_embeddings: bool = relearn_embeddings\r\n\r\n\t\tif self.relearn_embeddings:\r\n\t\t\tself.embedding2nn = torch.nn.Linear(rnn_input_dim + candidates, rnn_input_dim + candidates)\r\n\t\tif candidates == -1:\r\n\t\t\tpdb.set_trace()\r\n\t\tself.candidates = candidates\r\n\t\tself.hidden2score = torch.nn.Linear(rnn_input_dim + candidates, candidates)\r\n\r\n\t\tself.bidirectional = True\r\n\t\tself.rnn_type = \"LSTM\"\r\n\t\tif not self.use_rnn:\r\n\t\t\tself.bidirectional = False\r\n\t\t# bidirectional LSTM on top of embedding layer\r\n\t\tnum_directions = 1\r\n\r\n\t\tif self.use_rnn:\r\n\t\t\tself.rnn = BiLSTM(input_size=rnn_input_dim,\r\n\t\t\t\t\t\t\t hidden_size=hidden_size,\r\n\t\t\t\t\t\t\t num_layers=self.nlayers,\r\n\t\t\t\t\t\t\t dropout=self.lstm_dropout)\r\n\t\t\tself.lstm_dropout_func = SharedDropout(p=self.lstm_dropout)\r\n\r\n\t\t\tmlp_input_hidden = hidden_size * 2\r\n\t\telse:\r\n\t\t\tmlp_input_hidden = rnn_input_dim\r\n\r\n\t\t\r\n\t\t# self.criterion = nn.CrossEntropyLoss()\r\n\t\tself.criterion = nn.BCEWithLogitsLoss(reduction='none')\r\n\t\tif not testing:\r\n\t\t\tself.to(flair.device)\r\n\r\n\r\n\tdef _init_model_with_state_dict(state, testing = False):\r\n\t\tuse_dropout = 0.0 if not \"use_dropout\" in state.keys() else state[\"use_dropout\"]\r\n\t\tuse_word_dropout = (\r\n\t\t\t0.0 if not \"use_word_dropout\" in state.keys() else state[\"use_word_dropout\"]\r\n\t\t)\r\n\t\tuse_locked_dropout = (\r\n\t\t\t0.0\r\n\t\t\tif not \"use_locked_dropout\" in state.keys()\r\n\t\t\telse state[\"use_locked_dropout\"]\r\n\t\t)\r\n\t\tuse_cnn=state[\"use_cnn\"] if 'use_cnn' in state else False\r\n\t\tmodel = EnsembleModel(\r\n\t\t\thidden_size=state[\"hidden_size\"],\r\n\t\t\tembeddings=state[\"embeddings\"],\r\n\t\t\ttag_dictionary=state[\"tag_dictionary\"],\r\n\t\t\ttag_type=state[\"tag_type\"],\r\n\t\t\tuse_crf=state[\"use_crf\"],\r\n\t\t\tuse_rnn=state[\"use_rnn\"],\r\n\t\t\trnn_layers=state[\"rnn_layers\"],\r\n\t\t\tdropout=use_dropout,\r\n\t\t\tword_dropout=use_word_dropout,\r\n\t\t\tlocked_dropout=use_locked_dropout,\r\n\t\t\tconfig=state['config'] if \"config\" in state else None,\r\n\t\t\tword_map=None if 'word_map' not in state else state['word_map'],\r\n\t\t\tchar_map=None if 'char_map' not in state else state['char_map'],\r\n\t\t\trelearn_embeddings = True if 'relearn_embeddings' not in state else state['relearn_embeddings'],\r\n\t\t\ttesting = testing,\r\n\t\t\tcandidates = state['candidates']\r\n\t\t)\r\n\t\tmodel.load_state_dict(state[\"state_dict\"])\r\n\t\treturn model\r\n\tdef _get_state_dict(self):\r\n\t\tmodel_state = {\r\n\t\t\t\"state_dict\": self.state_dict(),\r\n\t\t\t\"embeddings\": self.embeddings,\r\n\t\t\t\"hidden_size\": self.hidden_size,\r\n\t\t\t\"tag_dictionary\":self.tag_dictionary,\r\n\t\t\t\"tag_type\":self.tag_type,\r\n\t\t\t\"use_crf\": self.use_crf,\r\n\t\t\t\"use_rnn\":self.use_rnn,\r\n\t\t\t\"rnn_layers\": self.rnn_layers,\r\n\t\t\t\"dropout\": self.use_dropout,\r\n\t\t\t\"word_dropout\": self.use_word_dropout,\r\n\t\t\t\"locked_dropout\": self.use_locked_dropout,\r\n\t\t\t\"config\": self.config,\r\n\t\t\t\"word_map\": self.word_map,\r\n\t\t\t\"char_map\": self.char_map,\r\n\t\t\t\"relearn_embeddings\": self.relearn_embeddings,\r\n\t\t\t\"candidates\": self.candidates,\r\n\t\t}\r\n\t\treturn model_state\r\n\tdef forward(self, sentences: List[Sentence], prediction_mode = False):\r\n\t\t# self.zero_grad()\r\n\t\t# pdb.set_trace()\r\n\t\tlengths: List[int] = [len(sentence.tokens) for sentence in sentences]\r\n\r\n\t\tlongest_token_sequence_in_batch: int = max(lengths)\r\n\r\n\t\tself.embeddings.embed(sentences)\r\n\t\tsentence_tensor = torch.cat([sentences.features[x].to(flair.device) for x in sorted(sentences.features.keys())],-1)\r\n\t\tsentence_tensor = torch.zeros_like(sentence_tensor)\r\n\t\tsystem_preds=torch.stack([getattr(sentence,self.tag_type+'_system_scores').to(flair.device) for sentence in sentences],0).float()\r\n\t\tsentence_tensor = torch.cat([sentence_tensor,system_preds],-1)\r\n\r\n\t\t# sentence_tensor=torch.stack([getattr(sentence,self.tag_type+'_system_scores').to(flair.device) for sentence in sentences],0).float()\r\n\t\t# if self.use_dropout > 0.0:\r\n\t\t# sentence_tensor = self.dropout(sentence_tensor)\r\n\t\tif self.use_word_dropout > 0.0:\r\n\t\t sentence_tensor = self.word_dropout(sentence_tensor)\r\n\t\t# if self.use_locked_dropout > 0.0:\r\n\t\t# sentence_tensor = self.locked_dropout(sentence_tensor)\r\n\r\n\t\tif self.relearn_embeddings:\r\n\t\t\tsentence_tensor = self.embedding2nn(sentence_tensor)\r\n\t\t\t# sentence_tensor = self.embedding2nn(sentence_tensor)\r\n\r\n\t\t# get the mask and lengths of given batch\r\n\t\tmask=self.sequence_mask(torch.tensor(lengths),longest_token_sequence_in_batch).cuda().type_as(sentence_tensor)\r\n\t\tself.mask=mask\r\n\t\t# convert hidden state to score of each embedding candidates\r\n\t\tscores = self.hidden2score(sentence_tensor)\r\n\t\treturn scores\r\n\r\n\tdef forward_loss(\r\n\t\tself, data_points: Union[List[Sentence], Sentence], sort=True\r\n\t) -> torch.tensor:\r\n\t\tscores = self.forward(data_points)\r\n\t\t# lengths = [len(sentence.tokens) for sentence in data_points]\r\n\t\t# longest_token_sequence_in_batch: int = max(lengths)\r\n\r\n\t\t# max_len = features.shape[1]\r\n\t\t# mask=self.sequence_mask(torch.tensor(lengths), max_len).cuda().type_as(features)\r\n\t\tloss = self._calculate_loss(scores, data_points, self.mask)\r\n\t\treturn loss\r\n\r\n\r\n\tdef sequence_mask(self, lengths, max_len=None):\r\n\t\t\"\"\"\r\n\t\tCreates a boolean mask from sequence lengths.\r\n\t\t\"\"\"\r\n\t\tbatch_size = lengths.numel()\r\n\t\tmax_len = max_len or lengths.max()\r\n\t\treturn (torch.arange(0, max_len)\r\n\t\t\t\t.type_as(lengths)\r\n\t\t\t\t.repeat(batch_size, 1)\r\n\t\t\t\t.lt(lengths.unsqueeze(1)))\r\n\r\n\tdef _calculate_loss(\r\n\t\tself, scores: torch.tensor, sentences: List[Sentence], mask: torch.tensor, return_arc_rel = False,\r\n\t) -> float:\r\n\r\n\t\tif self.binary:\r\n\t\t\tpass\r\n\t\telse:\r\n\t\t\t# the system preds represents whether the tag is correct\r\n\t\t\tif hasattr(sentences,self.tag_type+'_system_preds'):\r\n\t\t\t\tsystem_preds=getattr(sentences,self.tag_type+'_system_preds').to(flair.device).long()\r\n\t\t\telse:\r\n\t\t\t\tsystem_preds=torch.stack([getattr(sentence,self.tag_type+'_system_preds').to(flair.device) for sentence in sentences],0).long()\r\n\t\t\t\r\n\t\t\tmask = mask.bool()\r\n\t\t\t\r\n\t\tloss = self.criterion(scores, system_preds.float()) * mask.unsqueeze(-1)\r\n\t\tloss = loss.sum()/mask.sum()\r\n\t\t# bce_loss = -(torch.log(torch.sigmoid(scores)) * system_preds + torch.log(1-torch.sigmoid(scores)) * (1-system_preds))\r\n\t\t# loss = 2 * ((1-self.interpolation) * arc_loss + self.interpolation * rel_loss)\r\n\r\n\r\n\t\t# score = torch.nn.functional.cross_entropy(features.view(-1,features.shape[-1]), tag_list.view(-1,), reduction='none') * mask.view(-1,)\r\n\r\n\r\n\r\n\t\t# if self.sentence_level_loss or self.use_crf:\r\n\t\t# score = score.sum()/features.shape[0]\r\n\t\t# else:\r\n\t\t# score = score.sum()/mask.sum()\r\n\t\t\t\r\n\t\t# score = (1-self.posterior_interpolation) * score + self.posterior_interpolation * posterior_score\r\n\t\treturn loss\r\n\r\n\tdef evaluate(\r\n\t\tself,\r\n\t\tdata_loader: DataLoader,\r\n\t\tout_path: Path = None,\r\n\t\tembeddings_storage_mode: str = \"cpu\",\r\n\t\tprediction_mode: bool = False,\r\n\t) -> (Result, float):\r\n\t\teval_loss = 0\r\n\t\tbatch_no = 0\r\n\t\tdata_loader.assign_embeddings()\r\n\t\tif out_path is not None:\r\n\t\t\toutfile = open(out_path, \"w\", encoding=\"utf-8\")\r\n\t\tif not self.binary:\r\n\t\t\tmetric = Metric(\"Evaluation\")\r\n\t\twith torch.no_grad():\r\n\t\t\tfor batch in data_loader:\r\n\t\t\t\tbatch_no+=1\r\n\t\t\t\tscores = self.forward(batch, prediction_mode=prediction_mode)\r\n\t\t\t\tloss = self._calculate_loss(scores, batch, self.mask)\r\n\t\t\t\teval_loss += loss\r\n\t\t\t\tif self.binary:\r\n\t\t\t\t\tpdb.set_trace()\r\n\t\t\t\t\tresult = Result(\r\n\t\t\t\t\t\tmain_score=LF1,\r\n\t\t\t\t\t\tlog_line=f\"\\nUF1: {UF1} - LF1 {LF1}\",\r\n\t\t\t\t\t\tlog_header=\"PRECISION\\tRECALL\\tF1\",\r\n\t\t\t\t\t\tdetailed_results=f\"\\nUF1: {UF1} - LF1 {LF1}\",\r\n\t\t\t\t\t)\r\n\t\t\t\telse:\r\n\t\t\t\t\t# if prediction_mode:\r\n\t\t\t\t\t# eval_loss, metric=self.dependency_evaluate(data_loader,out_path=out_path,prediction_mode=prediction_mode)\r\n\t\t\t\t\t# return eval_loss, metric\r\n\t\t\t\t\t# else: \r\n\r\n\t\t\t\t\ttags, _ = self._obtain_labels(scores, batch)\r\n\t\t\t\t\tfor (sentence, sent_tags) in zip(batch, tags):\r\n\t\t\t\t\t\tfor (token, tag) in zip(sentence.tokens, sent_tags):\r\n\t\t\t\t\t\t\ttoken: Token = token\r\n\t\t\t\t\t\t\ttoken.add_tag_label(\"predicted\", tag)\r\n\r\n\t\t\t\t\t\t\t# append both to file for evaluation\r\n\t\t\t\t\t\t\teval_line = \"{} {} {} {}\\n\".format(\r\n\t\t\t\t\t\t\t\ttoken.text,\r\n\t\t\t\t\t\t\t\ttoken.get_tag(self.tag_type).value,\r\n\t\t\t\t\t\t\t\ttag.value,\r\n\t\t\t\t\t\t\t\ttag.score,\r\n\t\t\t\t\t\t\t)\r\n\t\t\t\t\t\t\t# lines.append(eval_line)\r\n\t\t\t\t\t\t\tif out_path is not None:\r\n\t\t\t\t\t\t\t\toutfile.write(eval_line)\r\n\t\t\t\t\t\t# lines.append(\"\\n\")\r\n\t\t\t\t\t\tif out_path is not None:\r\n\t\t\t\t\t\t\toutfile.write(\"\\n\")\r\n\t\t\t\t\tfor sentence in batch:\r\n\t\t\t\t\t\t# make list of gold tags\r\n\t\t\t\t\t\tgold_tags = [\r\n\t\t\t\t\t\t\t(tag.tag, str(tag)) for tag in sentence.get_spans(self.tag_type)\r\n\t\t\t\t\t\t]\r\n\t\t\t\t\t\t# make list of predicted tags\r\n\t\t\t\t\t\tpredicted_tags = [\r\n\t\t\t\t\t\t\t(tag.tag, str(tag)) for tag in sentence.get_spans(\"predicted\")\r\n\t\t\t\t\t\t]\r\n\r\n\t\t\t\t\t\t# check for true positives, false positives and false negatives\r\n\t\t\t\t\t\tfor tag, prediction in predicted_tags:\r\n\t\t\t\t\t\t\tif (tag, prediction) in gold_tags:\r\n\t\t\t\t\t\t\t\tmetric.add_tp(tag)\r\n\t\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\tmetric.add_fp(tag)\r\n\r\n\t\t\t\t\t\tfor tag, gold in gold_tags:\r\n\t\t\t\t\t\t\tif (tag, gold) not in predicted_tags:\r\n\t\t\t\t\t\t\t\tmetric.add_fn(tag)\r\n\t\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\tmetric.add_tn(tag)\r\n\t\teval_loss /= batch_no\r\n\t\tif out_path is not None:\r\n\t\t\toutfile.close()\r\n\t\tdetailed_result = (\r\n\t\t\tf\"\\nMICRO_AVG: acc {metric.micro_avg_accuracy()} - f1-score {metric.micro_avg_f_score()}\"\r\n\t\t\tf\"\\nMACRO_AVG: acc {metric.macro_avg_accuracy()} - f1-score {metric.macro_avg_f_score()}\"\r\n\t\t)\r\n\t\tfor class_name in metric.get_classes():\r\n\t\t\tdetailed_result += (\r\n\t\t\t\tf\"\\n{class_name:<10} tp: {metric.get_tp(class_name)} - fp: {metric.get_fp(class_name)} - \"\r\n\t\t\t\tf\"fn: {metric.get_fn(class_name)} - tn: {metric.get_tn(class_name)} - precision: \"\r\n\t\t\t\tf\"{metric.precision(class_name):.4f} - recall: {metric.recall(class_name):.4f} - \"\r\n\t\t\t\tf\"accuracy: {metric.accuracy(class_name):.4f} - f1-score: \"\r\n\t\t\t\tf\"{metric.f_score(class_name):.4f}\"\r\n\t\t\t)\r\n\r\n\t\tresult = Result(\r\n\t\t\tmain_score=metric.micro_avg_f_score(),\r\n\t\t\tlog_line=f\"{metric.precision()}\\t{metric.recall()}\\t{metric.micro_avg_f_score()}\",\r\n\t\t\tlog_header=\"PRECISION\\tRECALL\\tF1\",\r\n\t\t\tdetailed_results=detailed_result,\r\n\t\t)\r\n\t\treturn result, eval_loss\r\n\r\n\tdef _obtain_labels(\r\n\t\tself, system_preds, sentences, get_all_tags: bool = False\r\n\t) -> (List[List[Label]], List[List[List[Label]]]):\r\n\t\t\"\"\"\r\n\t\tReturns a tuple of two lists:\r\n\t\t - The first list corresponds to the most likely `Label` per token in each sentence.\r\n\t\t - The second list contains a probability distribution over all `Labels` for each token\r\n\t\t in a sentence for all sentences.\r\n\t\t\"\"\"\r\n\t\tlengths: List[int] = [len(sentence.tokens) for sentence in sentences]\r\n\t\ttags = []\r\n\t\tall_tags = []\r\n\t\tfeature = system_preds.argmax(-1)\r\n\t\tconfidences = system_preds.softmax(-1)\r\n\t\tfor i, vals in enumerate(zip(feature, lengths)):\r\n\t\t\tfeats, length=vals\r\n\t\t\t\r\n\t\r\n\t\t\ttag_list = [Label(token.system_preds[feats[token_id]], confidences[i][token_id][feats[token_id]]) for token_id, token in enumerate(sentences[i])]\r\n\t\t\ttags.append(tag_list.copy())\r\n\r\n\t\t\tif get_all_tags:\r\n\t\t\t\tpdb.set_trace()\r\n\t\t\t\tall_tags.append(\r\n\t\t\t\t\t[\r\n\t\t\t\t\t\t[\r\n\t\t\t\t\t\t\tLabel(\r\n\t\t\t\t\t\t\t\tself.tag_dictionary.get_item_for_index(score_id), score\r\n\t\t\t\t\t\t\t)\r\n\t\t\t\t\t\t\tfor score_id, score in enumerate(score_dist)\r\n\t\t\t\t\t\t]\r\n\t\t\t\t\t\tfor score_dist in scores\r\n\t\t\t\t\t]\r\n\t\t\t\t)\r\n\r\n\t\treturn tags, all_tags\r\n\r\n\r\n\tdef compute_F1(self, tp, fp, fn):\r\n\t\tprecision = tp/(tp+fp + 1e-12)\r\n\t\trecall = tp/(tp+fn + 1e-12)\r\n\t\treturn 2 * (precision * recall) / (precision + recall+ 1e-12)\r\n\r\n\r\n\[email protected]_grad()\r\n\tdef dependency_evaluate(self, loader, out_path=None, prediction_mode=False):\r\n\t\t# self.model.eval()\r\n\r\n\t\tloss, metric = 0, Metric()\r\n\t\t# total_start_time=time.time()\r\n\t\t# forward_time=0\r\n\t\t# loss_time=0\r\n\t\t# decode_time=0\r\n\t\t# punct_time=0\r\n\t\tlines=[]\r\n\t\tfor batch in loader:\r\n\t\t\tforward_start=time.time()\r\n\t\t\tarc_scores, rel_scores = self.forward(batch)\r\n\t\t\t# forward_end=time.time()\r\n\t\t\tmask = self.mask\r\n\t\t\tif not prediction_mode:\r\n\t\t\t\tloss += self._calculate_loss(arc_scores, rel_scores, batch, mask)\r\n\t\t\t# loss_end=time.time()\r\n\t\t\t# forward_time+=forward_end-forward_start\r\n\t\t\t# loss_time+=loss_end-forward_end\r\n\t\t\tmask=mask.bool()\r\n\t\t\t# decode_start=time.time()\r\n\t\t\tarc_preds, rel_preds = self.decode(arc_scores, rel_scores, mask)\r\n\t\t\t# decode_end=time.time()\r\n\t\t\t# decode_time+=decode_end-decode_start\r\n\t\t\t# ignore all punctuation if not specified\r\n\t\t\t# if out_path is not None:\r\n\t\t\t# pdb.set_trace()\r\n\t\t\tif not self.punct:\r\n\t\t\t\tfor sent_id,sentence in enumerate(batch):\r\n\t\t\t\t\tfor token_id, token in enumerate(sentence):\r\n\t\t\t\t\t\tupos=token.get_tag('upos').value\r\n\t\t\t\t\t\txpos=token.get_tag('pos').value\r\n\t\t\t\t\t\tword=token.text\r\n\t\t\t\t\t\tif is_punctuation(word,upos,self.punct_list) or is_punctuation(word,upos,self.punct_list):\r\n\t\t\t\t\t\t\tmask[sent_id][token_id]=0\r\n\t\t\t\t# mask &= words.unsqueeze(-1).ne(self.puncts).all(-1)\r\n\t\t\tif out_path is not None:\r\n\t\t\t\tfor (sent_idx, sentence) in enumerate(batch):\r\n\t\t\t\t\tfor token_idx, token in enumerate(sentence):\r\n\t\t\t\t\t\tif token_idx == 0:\r\n\t\t\t\t\t\t\tcontinue\r\n\r\n\t\t\t\t\t\t# append both to file for evaluation\r\n\t\t\t\t\t\teval_line = \"{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\t{}\\n\".format(\r\n\t\t\t\t\t\t\ttoken_idx,\r\n\t\t\t\t\t\t\ttoken.text,\r\n\t\t\t\t\t\t\t'X',\r\n\t\t\t\t\t\t\t'X',\r\n\t\t\t\t\t\t\t'X',\r\n\t\t\t\t\t\t\t'X',\r\n\t\t\t\t\t\t\tarc_preds[sent_idx,token_idx],\r\n\t\t\t\t\t\t\tself.tag_dictionary.get_item_for_index(rel_preds[sent_idx,token_idx]),\r\n\t\t\t\t\t\t\t'X',\r\n\t\t\t\t\t\t\t'X',\r\n\t\t\t\t\t\t)\r\n\t\t\t\t\t\tlines.append(eval_line)\r\n\t\t\t\t\tlines.append(\"\\n\")\r\n\t\t\t\t\r\n\t\t\t\r\n\t\t\tif not prediction_mode:\r\n\t\t\t\t# punct_end=time.time()\r\n\t\t\t\t# punct_time+=punct_end-decode_end\r\n\t\t\t\tmetric(arc_preds, rel_preds, self.arcs, self.rels, mask)\r\n\t\t# if out_path is not None:\r\n\t\t# with open(out_path, \"w\", encoding=\"utf-8\") as outfile:\r\n\t\t# outfile.write(\"\".join(lines))\r\n\t\tif prediction_mode:\r\n\t\t\treturn None, None\r\n\t\t# total_end_time=time.time()\r\n\t\t# print(total_start_time-total_end_time)\r\n\t\t# print(forward_time)\r\n\t\t# print(punct_time)\r\n\t\t# print(decode_time)\r\n\t\t\r\n\t\tloss /= len(loader)\r\n\r\n\t\treturn loss, metric\r\n\r\n\tdef decode(self, arc_scores, rel_scores, mask):\r\n\t\tarc_preds = arc_scores.argmax(-1)\r\n\t\tbad = [not istree(sequence, not self.is_mst)\r\n\t\t\t for sequence in arc_preds.tolist()]\r\n\t\tif self.tree and any(bad):\r\n\r\n\t\t\tarc_preds[bad] = eisner(arc_scores[bad], mask[bad])\r\n\t\t\t# if not hasattr(self,'dist') or self.is_mst:\r\n\t\t\t# dist = generate_tree(arc_scores,mask,is_mst=False)\r\n\t\t\t# else:\r\n\t\t\t# dist = self.dist\r\n\t\t\t# arc_preds=get_struct_predictions(dist)\r\n\t\t\t\r\n\r\n\t\t\t# deal with masking\r\n\t\t\t# if not (arc_preds*mask == result*mask).all():\r\n\t\t\t# pdb.set_trace()\r\n\r\n\t\trel_preds = rel_scores.argmax(-1)\r\n\t\trel_preds = rel_preds.gather(-1, arc_preds.unsqueeze(-1)).squeeze(-1)\r\n\r\n\t\treturn arc_preds, rel_preds\r\n\tdef get_state(self,):\r\n\t\treturn None", "import json\nimport os\n\nimport pandas as pd\nfrom dadmatools.datasets.base import BaseDataset, DatasetInfo, BaseIterator\nfrom dadmatools.datasets.dataset_utils import download_dataset, unzip_dataset, is_exist_dataset, DEFAULT_CACHE_DIR\n\nURL = 'https://www.gelbukh.com/resources/persent/v1/PerSent.xlsx'\nDATASET_NAME = \"PerSent\"\n\ndef PerSentLexicon(dest_dir=DEFAULT_CACHE_DIR):\n base_addr = os.path.dirname(__file__)\n info_addr = os.path.join(base_addr, 'info.py')\n DATASET_INFO = json.load(open(info_addr))\n dest_dir = os.path.join(dest_dir, DATASET_NAME)\n\n def get_persent_lexicon(addr):\n addr = os.path.join(addr, 'PerSent.xlsx')\n df = pd.read_excel(addr, 'Dataset')\n for index, row_cells in df.iterrows():\n yield {'word': row_cells[0], 'pos': row_cells[1], 'sentiment':row_cells[2]}\n\n if not is_exist_dataset(DATASET_INFO, dest_dir):\n downloaded_file = download_dataset(URL, dest_dir)\n dest_dir = unzip_dataset(downloaded_file, dest_dir)\n info = DatasetInfo(info_addr=info_addr)\n lexicon_size = DATASET_INFO['size']\n iterator = BaseIterator(get_persent_lexicon(dest_dir), num_lines=lexicon_size)\n dataset = BaseDataset(info)\n dataset.set_iterators(iterator)\n return dataset\n" ]
[ [ "torch.tensor", "torch.nn.Dropout", "torch.zeros" ], [ "numpy.concatenate", "numpy.max", "numpy.where" ], [ "numpy.argsort", "numpy.array", "numpy.zeros", "numpy.ones" ], [ "matplotlib.pyplot.legend", "matplotlib.pyplot.tight_layout", "matplotlib.use", "matplotlib.pyplot.subplots", "matplotlib.pyplot.savefig", "matplotlib.pyplot.plot", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.close", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.show", "matplotlib.pyplot.FormatStrFormatter", "matplotlib.pyplot.figure" ], [ "torch.nn.Dropout", "torch.max", "torch.zeros", "torch.cat", "torch.zeros_like", "torch.tensor", "torch.exp", "torch.nn.Linear", "torch.nn.BCEWithLogitsLoss", "torch.no_grad", "torch.arange" ], [ "pandas.read_excel" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "0.19", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
uesleisutil/model2roms
[ "813f889ca3dc0c5b71a7daf171b4a4bd72f85507" ]
[ "grd.py" ]
[ "from datetime import datetime\nfrom netCDF4 import Dataset\nimport numpy as np\n\nimport IOverticalGrid\n\ntry:\n import ESMF\nexcept ImportError:\n print(\"Could not find module ESMF\")\n pass\n\n\"\"\"\nTrond Kristiansen\nhttps://github.com/trondkr/model2roms\n\"\"\"\n\nclass Grd:\n\n def __init__(self, grdtype, confM2R):\n \"\"\"\n The object is initialised and created through the __init__ method\n As an example of how to use, these lines return a grid object called grdTEST:\n => import grd\n => grdTEST = grd.grdClass(\"grdfilename\",\"ROMS\")\n \"\"\"\n self.type = grdtype\n self.grdName = confM2R.outgrid\n self.realm = confM2R.realm\n self.grdfilename = None\n\n def opennetcdf(self, grdfilename):\n \n self.grdfilename = grdfilename\n \"\"\"Open the netCDF file and store the contents in arrays associated with variable names\"\"\"\n try:\n self.cdf = Dataset(self.grdfilename, \"r\")\n\n except IOError:\n print('Could not open file {}'.format(self.grdfilename))\n print('Exception caught in: opennetcdf(grdfilename)')\n\n def createobject(self, confM2R):\n \"\"\"\n This method creates a new object by reading the grd input file. All\n dimensions (eta, xi, lon, lat etc.) are defined here and used througout these scripts.\n Also, the depth matrix is calculated in this function by calling IOverticalGrid.py (ROMS grid only). For\n input model depths, the depth array is a one dimensional. If input data has 2 or 3 dimensions, this\n has to be accounted for througout the soda2roms package as one dimension is currently only supported.\n \"\"\"\n if self.type == 'FORCINGDATA':\n self.lon = self.cdf.variables[str(confM2R.lonname)][:]\n self.lat = self.cdf.variables[str(confM2R.latname)][:]\n self.h = self.cdf.variables[str(confM2R.depthname)][:]\n self.nlevels = len(self.h)\n self.fillval = -9.99e+33\n\n if self.lon.ndim == 1:\n self.lon, self.lat = np.meshgrid(self.lon, self.lat)\n\n # Create grid for ESMF interpolation\n if confM2R.useesmf:\n self.esmfgrid = ESMF.Grid(filename=self.grdfilename, filetype=ESMF.FileFormat.GRIDSPEC,\n is_sphere=True, coord_names=[str(confM2R.lonname), str(confM2R.latname)],\n add_mask=False)\n self.esmfgrid_u = ESMF.Grid(filename=self.grdfilename, filetype=ESMF.FileFormat.GRIDSPEC,\n is_sphere=True, coord_names=[str(confM2R.lonname_u), str(confM2R.latname_u)],\n add_mask=False)\n self.esmfgrid_v = ESMF.Grid(filename=self.grdfilename, filetype=ESMF.FileFormat.GRIDSPEC,\n is_sphere=True, coord_names=[str(confM2R.lonname_v), str(confM2R.latname_v)],\n add_mask=False)\n\n if confM2R.indatatype == 'SODA3':\n self.fillval = -1.e+20\n if confM2R.indatatype == 'GLORYS':\n self.fillval = 9.96921e+36\n \n IOverticalGrid.get_z_levels(self)\n\n if self.type in ['ROMS']:\n\n self.write_clim = True\n self.write_bry = True\n self.write_init = True\n\n self.lonname = 'lon_rho'\n self.latname = 'lat_rho'\n\n \"\"\"Set initTime to 1 if you dont want the first timestep to be\n the initial field (no ubar and vbar if time=0)\"\"\"\n\n self.inittime = 0\n self.ocean_time = 0\n self.NT = 2\n self.tracer = self.NT\n\n self.message = None\n self.time = 0\n self.reftime = 0\n self.grdtype = 'regular'\n self.mask_rho = self.cdf.variables[\"mask_rho\"][:, :]\n self.lon_rho = self.cdf.variables[\"lon_rho\"][:, :]\n self.lat_rho = self.cdf.variables[\"lat_rho\"][:, :]\n self.h = self.cdf.variables[\"h\"][:, :]\n self.hmin = self.h[self.h > 0].min()\n self.vtransform = confM2R.vtransform\n self.nlevels = confM2R.nlevels\n self.vstretching = confM2R.vstretching \n self.theta_s = confM2R.theta_s\n self.theta_b = confM2R.theta_b\n self.tcline = confM2R.tcline\n self.hc = confM2R.hc\n if self.vtransform == 1:\n self.hc = min(self.hmin, self.tcline)\n self.hc = self.tcline\n if (self.tcline > self.hmin):\n print('Vertical transformation parameters are not defined correctly in either gridid.txt or in the history files: \\n Tc\\\nline = %d and hmin = %d. \\n You need to make sure that tcline <= hmin when using transformation 1.' % (\n self.tcline, self.hmin))\n else:\n self.hc = self.tcline\n\n zeta = None\n if zeta is None:\n self.zeta = np.zeros(self.h.shape)\n else:\n self.zeta = zeta\n\n # for findvar in self.cdf.variables:\n # if findvar==\"hraw\":\n # self.hraw = self.cdf.variables[\"hraw\"][:,:,:]\n\n self.lon_u = self.cdf.variables[\"lon_u\"][:, :]\n self.lat_u = self.cdf.variables[\"lat_u\"][:, :]\n self.mask_u = self.cdf.variables[\"mask_u\"][:, :]\n for findvar in self.cdf.variables:\n if findvar == \"lon_vert\":\n self.lon_vert = self.cdf.variables[\"lon_vert\"][:, :]\n self.lat_vert = self.cdf.variables[\"lat_vert\"][:, :]\n\n for findvar in self.cdf.variables:\n if findvar == \"x_rho\":\n self.x_rho = self.cdf.variables[\"x_rho\"][:, :]\n self.y_rho = self.cdf.variables[\"y_rho\"][:, :]\n\n for findvar in self.cdf.variables:\n if findvar == \"x_u\":\n self.x_u = self.cdf.variables[\"x_u\"][:, :]\n self.y_u = self.cdf.variables[\"y_u\"][:, :]\n\n for findvar in self.cdf.variables:\n if findvar == \"x_v\":\n self.x_v = self.cdf.variables[\"x_v\"][:, :]\n self.y_v = self.cdf.variables[\"y_v\"][:, :]\n\n for findvar in self.cdf.variables:\n if findvar == \"x_psi\":\n self.x_psi = self.cdf.variables[\"x_psi\"][:, :]\n self.y_psi = self.cdf.variables[\"y_psi\"][:, :]\n\n for findvar in self.cdf.variables:\n if findvar == \"x_vert\":\n self.x_vert = self.cdf.variables[\"x_vert\"][:, :]\n self.y_vert = self.cdf.variables[\"y_vert\"][:, :]\n\n for findvar in self.cdf.variables:\n if findvar == \"xl\":\n self.xl = self.cdf.variables[\"xl\"][:]\n self.el = self.cdf.variables[\"el\"][:]\n\n for findvar in self.cdf.variables:\n if findvar == \"dmde\":\n self.dmde = self.cdf.variables[\"dmde\"][:, :]\n self.dndx = self.cdf.variables[\"dndx\"][:, :]\n\n self.lon_v = self.cdf.variables[\"lon_v\"][:, :]\n self.lat_v = self.cdf.variables[\"lat_v\"][:, :]\n self.mask_v = self.cdf.variables[\"mask_v\"][:, :]\n\n self.spherical = self.cdf.variables[\"spherical\"][:]\n\n self.lon_psi = self.lon_u[:-1, :]\n self.lat_psi = self.lat_v[:, :-1]\n self.mask_psi = self.mask_v[:, :-1]\n\n self.f = self.cdf.variables[\"f\"][:, :]\n self.angle = self.cdf.variables[\"angle\"][:, :]\n\n self.pm = self.cdf.variables[\"pm\"][:, :]\n self.invpm = 1.0 / np.asarray(self.cdf.variables[\"pm\"][:, :])\n self.pn = self.cdf.variables[\"pn\"][:, :]\n self.invpn = 1.0 / np.asarray(self.cdf.variables[\"pn\"][:, :])\n\n self.Lp = len(self.lat_rho[1, :])\n self.Mp = len(self.lat_rho[:, 1])\n\n self.fillval = -9.99e33\n\n self.eta_rho = self.Mp\n self.eta_u = self.Mp\n self.eta_v = self.Mp - 1\n self.eta_psi = self.Mp - 1\n self.xi_rho = self.Lp\n self.xi_u = self.Lp - 1\n self.xi_v = self.Lp\n self.xi_psi = self.Lp - 1\n\n \"\"\"Boolean to check if we need to initialize the CLIM file before writing\"\"\"\n self.ioClimInitialized = False\n self.ioInitInitialized = False\n\n if self.lon_rho.ndim == 1:\n self.lon_rho, self.lat_rho = np.meshgrid(self.lon_rho, self.lat_rho)\n self.lon_u, self.lat_u = np.meshgrid(self.lon_u, self.lat_u)\n self.lon_v, self.lat_v = np.meshgrid(self.lon_v, self.lat_v)\n\n \"\"\"Setup the vertical coordinate system\"\"\"\n IOverticalGrid.calculateVgrid(self)\n\n if (confM2R.useesmf):\n self.esmfgrid_u = ESMF.Grid(filename=self.grdfilename, filetype=ESMF.FileFormat.GRIDSPEC,\n coord_names=['lon_u', 'lat_u'], add_mask=False)\n self.esmfgrid_v = ESMF.Grid(filename=self.grdfilename, filetype=ESMF.FileFormat.GRIDSPEC,\n is_sphere=True, coord_names=['lon_v', 'lat_v'], add_mask=False)\n self.esmfgrid = ESMF.Grid(filename=self.grdfilename, filetype=ESMF.FileFormat.GRIDSPEC,\n is_sphere=True, coord_names=[self.lonname, self.latname], add_mask=False)\n\n def getdims(self):\n if self.type in [\"ROMS\"]:\n self.Lp = len(self.lat_rho[1, :])\n self.Mp = len(self.lat_rho[:, 1])\n\n if self.type in ['FORCINGDATA']:\n self.Lp = len(self.lat[1, :])\n self.Mp = len(self.lat[:, 1])\n\n self.M = self.Mp - 1\n self.L = self.Lp - 1\n" ]
[ [ "numpy.asarray", "numpy.meshgrid", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
mghcdac/SAH_DCI_Prediction_EEG
[ "a8851bab67d6d3bd5ded39a9d5fb9c3ec1d99607" ]
[ "step1_process_each_file_bipolar.py" ]
[ "#!/usr/bin/env python\r\n# -*- coding: utf-8 -*-\r\n\"\"\"\r\nThis program is free software: you can redistribute it and/or modify\r\nit under the terms of the GNU General Public License as published by\r\nthe Free Software Foundation, either version 3 of the License, or\r\n(at your option) any later version.\r\nThis program is distributed in the hope that it will be useful,\r\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\r\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\r\nGNU General Public License for more details.\r\nYou should have received a copy of the GNU General Public License\r\nalong with this program. If not, see <http://www.gnu.org/licenses/>.\r\n\"\"\"\r\n\r\n\r\nimport pdb\r\nimport datetime\r\nimport time\r\nfrom collections import Counter#, deque\r\nimport os\r\nimport os.path\r\nimport pickle\r\nimport sys\r\nimport subprocess\r\nimport scipy\r\n#import matlab.engine\r\nimport h5py\r\nimport hdf5storage as hs\r\nimport scipy.io as sio\r\nimport numpy as np\r\nimport pandas as pd\r\nfrom segment_EEG import *\r\nfrom segment_EEG_without_detection import *\r\n#from mne import *\r\nimport mne as mne\r\nimport math\r\n\r\nFs = 200.\r\n#assess_time_before = 1800 # [s]\r\n#assess_time_after = 1800 # [s]\r\nwindow_time = 5 # [s]\r\nwindow_step = 5 # [s]\r\n#sub_window_time = 5 # [s] for calculating features\r\n#sub_window_step = 1 # [s]\r\nstart_end_remove_window_num = 0\r\namplitude_thres = 500 #500 # [uV]\r\nline_freq = 60. # [Hz]\r\nbandpass_freq = [0.5, 30.] # [Hz]\r\ntostudy_freq = [0.5, 30.] # [Hz]\r\n#available_channels = ['Fp1', 'F3', 'C3', 'P3', 'F7', 'T7', 'P7', 'O1', 'Fz', 'Cz', 'Pz', 'Fp2', 'F4', 'C4', 'P4', 'F8', 'T8', 'P8', 'O2']\r\navailable_channels = ['EEG Fp1', 'EEG F3', 'EEG C3', 'EEG P3', 'EEG F7', 'EEG T3', 'EEG T5', 'EEG O1', 'EEG Fz', 'EEG Cz', 'EEG Pz', 'EEG Fp2', 'EEG F4', 'EEG C4', 'EEG P4', 'EEG F8', 'EEG T4', 'EEG T6', 'EEG O2'] # MGH BWH ULB\r\nbipolar_channels = ['Fp1-F7', 'F7-T3', 'T3-T5', 'T5-O1', 'Fp2-F8', 'F8-T4', 'T4-T6', 'T6-O2', 'Fp1-F3', 'F3-C3', 'C3-P3', 'P3-O1', 'Fp2-F4', 'F4-C4', 'C4-P4', 'P4-O2', 'Fz-Cz', 'Cz-Pz']\r\n#available_channels = ['EEGFP1_', 'EEGFP2_', 'EEGFPZ_', 'EEGF7__', 'EEGF8__']\r\n#eeg_channels = ['C3', 'C4', 'O1', 'O2', 'CZ', 'F3', 'F4', 'F7', 'F8', 'FZ', 'FP1', 'FP2', 'FPZ', 'P3', 'P4', 'PZ', 'T3', 'T4', 'T5', 'T6']#['Fp1-F7','Fp2-F8','Fp1-Fp2','F7-F8']#'Fp1','Fp2','F7','F8',\r\n#algorithms = ['cnn_lstm_ae', 'lstm', 'dnn_clf', 'dnn_ord', 'moe_dnn']#'RandomForest','SVM','ELM']'blr', 'dnn_reg', 'logreg', \r\nrandom_state = 1\r\n#normal_only = False\r\n#labeled_only = False\r\n\r\nseg_mask_explanation = np.array([\r\n 'normal',\r\n 'NaN in EEG', #_[1,3] (append channel ids)\r\n 'overly high/low amplitude',\r\n 'flat signal',\r\n 'NaN in feature',\r\n 'NaN in spectrum',\r\n 'overly high/low total power',\r\n 'muscle artifact',\r\n 'multiple assessment scores',\r\n 'spurious spectrum',\r\n 'fast rising decreasing',\r\n '1Hz artifact',])\r\n\r\nif __name__=='__main__':\r\n #\"\"\"\r\n ##################\r\n # use data_list_paths to specify what data to use\r\n # data_list.txt:\r\n # data_path spec_path feature_path state\r\n # eeg1.mat specs1.mat Features1.mat good\r\n # ...\r\n # note: eeg segments are contained in features\r\n ##################\r\n #file = \"D:\\\\Research\\\\Cardiac_arrest_EEG\\\\EEG_weiLong_arrest_test_artifact\\\\bwh_7_1_2_20130201_205011.edf\"\r\n# file_path = \"D:\\\\Research\\\\Cardiac_arrest_EEG\\\\EEG_weiLong_arrest_test_artifact\\\\forWeilong\\\\\"\r\n# save_path = \"D:\\\\Research\\\\Cardiac_arrest_EEG\\\\EEG_weiLong_arrest_test_artifact\\\\Preprocessed\\\\\"\r\n file_path = \"Z:\\\\Datasets_ConvertedData\\\\ARed4Jenn\\\\ARed_output_2017_new\\\\\"\r\n save_path = \"Z:\\\\Projects\\\\Weilong\\\\SAH_DCI\\\\Preprocessed\\\\\"\r\n file_list = [f for f in os.listdir(file_path)]\r\n\r\n #file_list = sorted(file_list, reverse=True)\r\n #import pdb;pdb.set_trace()\r\n #file_list = os.listdir(file_path)\r\n file_list = file_list[727:]\r\n \r\n for ifile in file_list:\r\n file = file_path + ifile + \"\\\\\" + ifile + \"_ARed.edf\"\r\n print(file)\r\n #import pdb;pdb.set_trace()\r\n# if os.path.isfile(save_path+ifile+'.mat'):\r\n# continue\r\n# else:\r\n# try:\r\n #data = mne.io.read_raw_edf(file,preload=True)\r\n data = mne.io.read_raw_edf(file,stim_channel=None,exclude='EDF Annotations',preload=True)\r\n raw_data = data.get_data(picks=range(23))\r\n #import pdb;pdb.set_trace()\r\n info = data.info\r\n fs = info['sfreq']\r\n #raw_data = scipy.signal.resample(raw_data, int(math.floor(raw_data.shape[1]*Fs/fs)),axis=1)\r\n if fs!=Fs:\r\n raw_data = scipy.signal.resample_poly(raw_data, Fs, fs, axis=1)\r\n #raw_data = mne.filter.resample(raw_data, down=fs/Fs, npad='auto')\r\n raw_data = raw_data*10e5 # V->uV\r\n \r\n channels = data.ch_names\r\n channels = [x.upper() for x in channels]\r\n chan_index = list()\r\n for chNo in available_channels:\r\n chan_index.append(channels.index(chNo.upper()))\r\n raw_data = raw_data[chan_index,:]\r\n \r\n \r\n \r\n ## Bipolar reference\r\n bipolar_data = np.zeros((18,raw_data.shape[1]))\r\n bipolar_data[8,:] = raw_data[0,:] - raw_data[1,:]; # Fp1-F3\r\n bipolar_data[9,:] = raw_data[1,:] - raw_data[2,:]; # F3-C3\r\n bipolar_data[10,:] = raw_data[2,:] - raw_data[3,:]; # C3-P3\r\n bipolar_data[11,:] = raw_data[3,:] - raw_data[7,:]; # P3-O1\r\n \r\n bipolar_data[12,:] = raw_data[11,:] - raw_data[12,:]; # Fp2-F4\r\n bipolar_data[13,:] = raw_data[12,:] - raw_data[13,:]; # F4-C4\r\n bipolar_data[14,:] = raw_data[13,:] - raw_data[14,:]; # C4-P4\r\n bipolar_data[15,:] = raw_data[14,:] - raw_data[18,:]; # P4-O2\r\n \r\n bipolar_data[0,:] = raw_data[0,:] - raw_data[4,:]; # Fp1-F7\r\n bipolar_data[1,:] = raw_data[4,:] - raw_data[5,:]; # F7-T3\r\n bipolar_data[2,:] = raw_data[5,:] - raw_data[6,:]; # T3-T5\r\n bipolar_data[3,:] = raw_data[6,:] - raw_data[7,:]; # T5-O1\r\n \r\n bipolar_data[4,:] = raw_data[11,:] - raw_data[15,:]; # Fp2-F8\r\n bipolar_data[5,:] = raw_data[15,:] - raw_data[16,:]; # F8-T4\r\n bipolar_data[6,:] = raw_data[16,:] - raw_data[17,:]; # T4-T6\r\n bipolar_data[7,:] = raw_data[17,:] - raw_data[18,:]; # T6-O2\r\n \r\n bipolar_data[16,:] = raw_data[8,:] - raw_data[9,:]; # Fz-Cz\r\n bipolar_data[17,:] = raw_data[9,:] - raw_data[10,:]; # Cz-Pz\r\n \r\n ## save 5s monopolar/bipolar epoches using notch/band pass/artifact detection/resampling\r\n segs_monpolar = segment_EEG_without_detection(raw_data,available_channels,window_time, window_step, Fs,\r\n notch_freq=line_freq, bandpass_freq=bandpass_freq,\r\n to_remove_mean=False, amplitude_thres=amplitude_thres, n_jobs=-1, start_end_remove_window_num=start_end_remove_window_num)\r\n del raw_data\r\n segs_, bs_, seg_start_ids_, seg_mask, specs_, freqs_ = segment_EEG(bipolar_data,bipolar_channels,window_time, window_step, Fs,\r\n notch_freq=line_freq, bandpass_freq=bandpass_freq,\r\n to_remove_mean=False, amplitude_thres=amplitude_thres, n_jobs=-1, start_end_remove_window_num=start_end_remove_window_num)\r\n \r\n if len(segs_) <= 0:\r\n raise ValueError('No segments')\r\n \r\n seg_mask2 = map(lambda x:x.split('_')[0], seg_mask)\r\n sm = Counter(seg_mask2)\r\n for ex in seg_mask_explanation:\r\n if ex in sm:\r\n print('%s: %d/%d, %g%%'%(ex,sm[ex],len(seg_mask),sm[ex]*100./len(seg_mask)))\r\n \r\n if segs_.shape[0]<=0:\r\n raise ValueError('No EEG signal')\r\n if segs_.shape[1]!=len(bipolar_channels):\r\n raise ValueError('Incorrect #chanels')\r\n \r\n fd = os.path.split(save_path)[0]\r\n if not os.path.exists(fd):\r\n os.mkdir(fd)\r\n res = {'EEG_segs_bipolar':segs_.astype('float16'),\r\n 'EEG_segs_monopolar':segs_monpolar.astype('float16'),\r\n 'EEG_specs':specs_.astype('float16'),\r\n 'burst_suppression':bs_.astype('float16'),\r\n 'EEG_frequency':freqs_,\r\n 'seg_start_ids':seg_start_ids_,\r\n 'Fs':Fs,\r\n 'seg_masks':seg_mask,\r\n 'channel_names':bipolar_channels}\r\n sio.savemat(save_path+ifile, res, do_compression=True)\r\n \r\n# except Exception as e:\r\n# continue\r\n \r\n# \r\n# data_list_paths = ['data/data_list.txt']\r\n# subject_files = np.zeros((0,5))\r\n# for data_list_path in data_list_paths:\r\n# subject_files = np.r_[subject_files, np.loadtxt(data_list_path, dtype=str, delimiter='\\t', skiprows=1)]\r\n# subject_files = subject_files[subject_files[:,4]=='good',:4]\r\n# patient_ids = np.array([[x for x in xx.split('/') if x.startswith('icused')][0] for xx in subject_files[:,0]])\r\n# t0s = np.array([datenum(t0str, '%Y-%m-%d %H:%M:%S.%f', return_seconds=True) for t0str in subject_files[:,2]])\r\n# t1s = np.array([datenum(t1str, '%Y-%m-%d %H:%M:%S.%f', return_seconds=True) for t1str in subject_files[:,3]])\r\n# \"\"\"\r\n# # get the recording interval distribution\r\n# dists = []\r\n# for pid in np.unique(patient_ids):\r\n# tt0 = t0s[patient_ids==pid]\r\n# tt1 = t1s[patient_ids==pid]\r\n# ids = np.argsort(tt0)\r\n# tt0 = tt0[ids]\r\n# tt1 = tt1[ids]\r\n# assert np.all(np.diff(tt1)>0)\r\n# assert np.all(tt1-tt0>0)\r\n# dists.extend(tt0[1:]-tt1[:-1])\r\n# plt.hist(np.log1p(dists),bins=50);plt.show()\r\n# \"\"\"\r\n# record_num = subject_files.shape[0]\r\n# \r\n# subject_err_path = 'data/err_subject_reason.txt'\r\n# if os.path.isfile(subject_err_path):\r\n# err_subject_reason = []\r\n# with open(subject_err_path,'r') as f:\r\n# for row in f:\r\n# if row.strip()=='':\r\n# continue\r\n# i = row.split(':::')\r\n# err_subject_reason.append([i[0].strip(), i[1].strip()])\r\n# err_subject = [i[0] for i in err_subject_reason]\r\n# else:\r\n# err_subject_reason = []\r\n# err_subject = []\r\n#\r\n# all_rass_times = np.loadtxt('data/rass_times.txt', dtype=str, delimiter='\\t', skiprows=1)\r\n# all_camicu_times = pd.read_csv('data/vICU_Sed_CamICU.csv', sep=',')\r\n# for si in range(record_num):\r\n# data_path = subject_files[si,0]\r\n# feature_path = subject_files[si,1]\r\n# t0 = t0s[si]\r\n# t1 = t1s[si]\r\n# #assert t1 == t0+res['data'].shape[1]*1./Fs\r\n# patient_id = patient_ids[si]\r\n# subject_file_name = os.path.join(patient_id, data_path.split('/')[-1])\r\n# if subject_file_name in err_subject:\r\n# continue\r\n# if os.path.isfile(feature_path):\r\n# print('\\n[(%d)/%d] %s %s'%(si+1,record_num,subject_file_name.replace('.mat',''),datetime.datetime.now()))\r\n# else:\r\n# print('\\n[%d/%d] %s %s'%(si+1,record_num,subject_file_name.replace('.mat',''),datetime.datetime.now()))\r\n# try:\r\n# # check and load dataset\r\n# res = read_delirium_mat(data_path, channel_names=available_channels)#, with_time=False)\r\n# if res['Fs']<Fs-1 or res['Fs']>Fs+1:\r\n# raise ValueError('Fs is not %gHz.'%Fs)\r\n# #if res['data'].shape[1]<Fs*3600*0.5:\r\n# # raise ValueError('Recording is less than 30min.')\r\n# #dt = (t1-t0)-res['data'].shape[1]*1./Fs\r\n# #if np.abs(dt) >= 300:\r\n# # raise TypeError('Miss-matched t0 and t1 in %s: %gs'%(subject_file_name,dt))\r\n#\r\n# # segment EEG\r\n# segs_, bs_, labels_, assessment_times_, seg_start_ids_, seg_mask, specs_, freqs_ = segment_EEG(res['data'],\r\n# all_rass_times[all_rass_times[:,1]==patient_id,:],\r\n# all_camicu_times[all_camicu_times.PatientID==patient_id].values,\r\n# assess_time_before, assess_time_after,\r\n# [t0,t1], t0s[patient_ids==patient_id], t1s[patient_ids==patient_id], window_time, window_step, Fs,\r\n# notch_freq=line_freq, bandpass_freq=bandpass_freq,\r\n# to_remove_mean=False, amplitude_thres=amplitude_thres, n_jobs=-1, start_end_remove_window_num=start_end_remove_window_num)\r\n# if len(segs_) <= 0:\r\n# raise ValueError('No segments')\r\n# #bs_ = (bs_<=5).sum(axis=2)/1000.\r\n# \r\n# if labeled_only:\r\n# raise NotImplementedError('labeled_only==True')\r\n# good_ids = np.where(np.logical_not(np.isnan(labels_)))[0]\r\n# segs_ = segs_[good_ids]\r\n# bs_ = bs_[good_ids]\r\n# labels_ = labels_[good_ids]\r\n# assessment_times_ = [assessment_times_[ii] for ii in good_ids]\r\n# seg_start_ids_ = seg_start_ids_[good_ids]\r\n# seg_mask = [seg_mask[ii] for ii in good_ids]\r\n# specs_ = specs_[good_ids]\r\n# #specs_matlab = specs_matlab[:,:,good_ids]\r\n# \r\n# \"\"\"\r\n# # muscle artifacts\r\n# specs_matlab = 10*np.log(specs_.T)\r\n# specs_matlab[np.isinf(specs_matlab)] = np.nan\r\n# specs_matlab_mean = np.nanmean(specs_matlab, axis=2, keepdims=True)\r\n# specs_matlab = specs_matlab - specs_matlab_mean\r\n# sio.savemat('segs.mat', {'segs':segs_.transpose(1,2,0), 'Fs':Fs, 'specs':specs_matlab})#, 'specs_orig':specs_.T\r\n# with open('matlab_output.txt','w') as ff:\r\n# subprocess.check_call([MATLAB_BIN_PATH, '<', MATLAB_CODE_PATH], stdout=ff)\r\n# muscle_rej_ch2d = sio.loadmat('rej.mat')['rejE'].T==1 # (#sample, #channel) \r\n# muscle_rej_ch1d = np.where(np.any(muscle_rej_ch2d,axis=1))[0]\r\n# for i in muscle_rej_ch1d:\r\n# seg_mask[i] = '%s_%s'%(seg_mask_explanation[7], np.where(muscle_rej_ch2d[i])[0])\r\n# \"\"\"\r\n# \r\n# #segs_ = segs_[:3]\r\n# #features_, feature_names = extract_features(segs_, Fs, sub_window_time, tostudy_freq,\r\n# # sub_window_time, sub_window_step, data_path,\r\n# # seg_start_ids_,\r\n# # return_feature_names=True, n_jobs=-1)#, specs_, freqs_\r\n#\r\n# print('\\n%s\\n'%Counter(labels_[np.logical_not(np.isnan(labels_))]))\r\n# #bsp = features_[:,-1] \r\n# #print('BSP\\nmax: %g\\nmin: %g\\n'%(np.max(bsp),np.min(bsp)))\r\n# #if np.max(bsp)-np.min(bsp)<=1e-3:\r\n# # raise ValueError('Flat BSP')\r\n# \r\n# #features_[np.isinf(features_)] = np.nan\r\n# #nan1d = np.where(np.any(np.isnan(features_),axis=1))[0]\r\n# #for i in nan1d:\r\n# # seg_mask[i] = seg_mask_explanation[4]\r\n# \r\n# #seg_mask = np.array(seg_mask)\r\n# seg_mask2 = map(lambda x:x.split('_')[0], seg_mask)\r\n# sm = Counter(seg_mask2)\r\n# for ex in seg_mask_explanation:\r\n# if ex in sm:\r\n# print('%s: %d/%d, %g%%'%(ex,sm[ex],len(seg_mask),sm[ex]*100./len(seg_mask)))\r\n# \r\n# if normal_only:\r\n# good_ids = np.where(np.array(seg_mask)=='normal')[0]\r\n# segs_ = segs_[good_ids]\r\n# bs_ = bs_[good_ids]\r\n# labels_ = labels_[good_ids]\r\n# assessment_times_ = [assessment_times_[ii] for ii in good_ids]\r\n# seg_start_ids_ = seg_start_ids_[good_ids]\r\n# seg_mask = [seg_mask[ii] for ii in good_ids]\r\n# specs_ = specs_[good_ids]\r\n# #specs_matlab = specs_matlab[:,:,good_ids]\r\n# \r\n# if segs_.shape[0]<=0:\r\n# raise ValueError('No EEG signal')\r\n# if segs_.shape[1]!=len(eeg_channels):\r\n# raise ValueError('Incorrect #chanels')\r\n#\r\n# except Exception as e:\r\n# \"\"\"\r\n# err_info = e.message.split('\\n')[0].strip()\r\n# print('\\n%s.\\nSubject %s is IGNORED.\\n'%(err_info,subject_file_name))\r\n# err_subject_reason.append([subject_file_name,err_info])\r\n# err_subject.append(subject_file_name)\r\n#\r\n# with open(subject_err_path,'a') as f:\r\n# msg_ = '%s::: %s\\n'%(subject_file_name,err_info)\r\n# f.write(msg_)\r\n# \"\"\"\r\n# continue\r\n#\r\n# fd = os.path.split(feature_path)[0]\r\n# if not os.path.exists(fd):\r\n# os.mkdir(fd)\r\n# res = {'EEG_segs':segs_.astype('float32'),\r\n# 'EEG_specs':specs_.astype('float32'),\r\n# 'burst_suppression':bs_.astype('float32'),\r\n# 'EEG_frequency':freqs_,\r\n# #'EEG_features':features_,\r\n# #'feature_names':feature_names\r\n# 't0':subject_files[si,2],\r\n# 't1':subject_files[si,3],\r\n# 'labels':labels_,\r\n# 'assess_times':assessment_times_,\r\n# 'seg_start_ids':seg_start_ids_,\r\n# 'subject':subject_file_name,\r\n# 'Fs':Fs}\r\n# if not normal_only:\r\n# res['seg_masks'] = seg_mask\r\n# sio.savemat(feature_path, res, do_compression=True)\r\n# res = sio.loadmat(feature_path)\r\n# os.remove(feature_path)\r\n# time.sleep(1)\r\n# hs.savemat(feature_path, res)\r\n# \r\n#\r\n" ]
[ [ "numpy.array", "numpy.zeros", "scipy.signal.resample_poly", "scipy.io.savemat" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.6", "1.10", "1.4", "1.3", "1.9", "0.19", "1.5", "0.18", "1.7", "1.0", "1.2", "1.8" ], "tensorflow": [] } ]
drcut/QPyTorch
[ "63c293178e8ce9e6e5b218dee96536e9c4ad1e5c" ]
[ "test/test_device.py" ]
[ "import torch\nimport unittest\nimport qtorch\nfrom qtorch.quant import block_quantize, fixed_point_quantize, float_quantize\nfrom qtorch import FixedPoint, BlockFloatingPoint, FloatingPoint\n\nclass TestDevice(unittest.TestCase):\n \"\"\"\n invariant: cuda and cpp implementation should behave the same\n \"\"\"\n def error(self, cuda_t, cpu_t):\n return ((cuda_t.cpu()-cpu_t)**2).sum().item()\n\n def test_fixed_point(self):\n for wl, fl in [(5,4), (3,2)]:\n for rounding in [\"nearest\"]:\n t_max = 1-(2**(-fl))\n to_quantize_cuda = torch.linspace(-t_max, t_max, steps=20, device='cuda')\n to_quantize_cpu = to_quantize_cuda.clone().to(\"cpu\")\n fixed_quantized_cuda = fixed_point_quantize(to_quantize_cuda, wl=wl, fl=fl, rounding=rounding)\n fixed_quantized_cpu = fixed_point_quantize(to_quantize_cpu, wl=wl, fl=fl, rounding=rounding)\n mse = self.error(fixed_quantized_cuda, fixed_quantized_cpu)\n self.assertTrue(mse<1e-15)\n # self.assertTrue(torch.eq(fixed_quantized_cuda.cpu(), fixed_quantized_cpu).all().item())\n\n def test_block_floating_point(self):\n for wl in [5, 3]:\n for rounding in [\"nearest\"]:\n for dim in [-1, 0, 1]:\n t_max = 1-(2**(-4))\n to_quantize_cuda = torch.linspace(-t_max, t_max, steps=20, device='cuda')\n to_quantize_cpu = to_quantize_cuda.clone().to(\"cpu\")\n block_quantized_cuda = block_quantize(to_quantize_cuda, wl=wl, rounding=rounding)\n block_quantized_cpu = block_quantize(to_quantize_cpu, wl=wl, rounding=rounding)\n mse = self.error(block_quantized_cuda, block_quantized_cpu)\n self.assertTrue(mse<1e-15)\n # self.assertTrue(torch.eq(block_quantized_cuda.cpu(), block_quantized_cpu).all().item())\n\n def test_floating_point(self):\n for man, exp in [(2, 5), (6, 9)]:\n for rounding in [\"nearest\"]:\n to_quantize_cuda = torch.rand(20).cuda()\n to_quantize_cpu = to_quantize_cuda.clone().to(\"cpu\")\n float_quantized_cuda = float_quantize(to_quantize_cuda, man=man, exp=exp, rounding=rounding)\n float_quantized_cpu = float_quantize(to_quantize_cpu, man=man, exp=exp, rounding=rounding)\n mse = self.error(float_quantized_cuda, float_quantized_cpu)\n self.assertTrue(mse<1e-15)\n\nif __name__ == \"__main__\":\n unittest.main()\n" ]
[ [ "torch.linspace", "torch.rand" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
karllark/beast_metal
[ "35b87f4f168e86f262612ee86537ab822382b656" ]
[ "setup_batch_beast_refit.py" ]
[ "#!/usr/bin/env python\n\n\"\"\"\nCode to setup the xsede slurm inputs for results that are incomplete\n incomplete means 1D pPDF results done with the old linear mass spacing\n or missing full 1D pPDF or lnp files\n\n.. history::\n Written 4jul15 by KDG.\n\n\"\"\"\n\nfrom __future__ import print_function\nimport os\nimport glob\nimport argparse\nimport tables\n#####\nimport numpy as np\nfrom astropy.table import Table\nfrom astropy.io import fits\n#####\n\nif __name__ == '__main__':\n\n basename = '14675_LMC-5665ne-12232.gst_with_sourceden_goodbands'\n datapath = 'data'\n basepath = 'LMC-5665ne-12232_beast'\n cat_files = np.array(glob.glob(datapath+'/'+basename + '*_sub*.fits'))\n\n n_cat_files = len(cat_files)\n n_pernode_files = 1\n\n # setup the subdirectory for the xsede slurm and log files\n job_path = basepath+'/refit_batch_jobs/'\n if not os.path.isdir(job_path):\n os.mkdir(job_path)\n\n log_path = job_path+'logs/'\n if not os.path.isdir(log_path):\n os.mkdir(log_path)\n\n pf_open = False\n cur_f = 0\n cur_total_size = 0.0\n j = -1\n\n #cat_files = cat_files[0:2]\n\n for i, cat_file in enumerate(cat_files):\n # get the sd number\n bpos = cat_file.find('obscat/')\n dpos = cat_file.find('SD_')\n spos = cat_file.find('sub')\n ppos = cat_file.rfind('.')\n sd_num = cat_file[dpos+3:spos-1]\n sub_num = cat_file[spos+3:ppos]\n\n # read the stats file and see if this subregion is done yet\n results_path = basepath + 'results/'\n stats_file = results_path+'http_sd'+sd_num+'_sub'+sub_num+'_stats.fits'\n pdf1d_file = results_path+'http_sd'+sd_num+'_sub'+sub_num+'_pdf1d.fits'\n lnp_file = results_path+'http_sd'+sd_num+'_sub'+sub_num+'_lnp.hd5'\n\n reg_run = False\n run_done = False\n if not os.path.isfile(stats_file):\n reg_run = True\n print('no stats file')\n if not os.path.isfile(pdf1d_file):\n reg_run = True\n print('no pdf1d file')\n if not os.path.isfile(lnp_file):\n reg_run = True\n print('no lnp file')\n\n # first check if the pdf1d mass spacing is correct\n if not reg_run:\n hdulist = fits.open(pdf1d_file)\n delta1 = hdulist['M_ini'].data[-1,1] - hdulist['M_ini'].data[-1,0]\n if delta1 > 1.0: # old linear spacing\n print('pdf1d lin mass spacing - full refitting needed')\n old_mass_spacing = True\n else:\n old_mass_spacing = False\n print('pdf1d log mass spacing - ok')\n\n if old_mass_spacing:\n run_done = False\n reg_run = True\n\n # now check if the number of results is the same as \n # the number of observations\n if not reg_run:\n # get the observed catalog\n obs = Table.read(cat_file)\n\n # get the fit results catalog\n t = Table.read(stats_file)\n # get the number of stars that have been fit\n indxs, = np.where(t['Pmax'] != 0.0)\n\n # get the number of entries in the lnp file\n f = tables.openFile(lnp_file, 'r')\n nlnp = f.root._v_nchildren - 2\n f.close()\n\n print('# obs, stats, lnp = ',len(obs), len(indxs), nlnp)\n if (len(indxs) == len(obs)) & (nlnp == len(obs)):\n\n # final check, is the pdf1d file correctly populated\n tot_prob = np.sum(hdulist['M_ini'].data, axis=1)\n tindxs, = np.where(tot_prob > 0.0)\n print('# good pdf1d = ', len(tindxs) - 1)\n if len(tindxs) == (len(obs) + 1):\n run_done = True\n\n if run_done:\n print(stats_file + ' done')\n else:\n\n j += 1\n if j%n_pernode_files == 0:\n cur_f += 1\n\n # close previous files\n if j != 0:\n pf.close()\n print('total sed_trim size [Gb] = ', \n cur_total_size/(1024.*1024.*1024.))\n cur_total_size = 0.0\n\n # open the slurm and param files\n pf_open = True\n joblist_file = job_path+'beast_batch_refit_'+str(cur_f) \\\n +'.joblist'\n pf = open(joblist_file,'w')\n \n\n ext_str = ''\n if reg_run:\n print(stats_file \n + ' does not exist - adding job as a regular fit job (not resume job)')\n else:\n print(stats_file \n + ' not done - adding to continue fitting list (' + \\\n str(len(indxs)) + '/' + str(len(t['Pmax'])) + ')')\n ext_str = '-r'\n\n job_command = './run_beast_production.py -f ' + ext_str + ' ' + \\\n sd_num + ' '+sub_num+' > ' \\\n + log_path+'beast_fit_resume_http' + \\\n '_sd'+sd_num+'_sub'+sub_num+'.log'\n\n pf.write(job_command+'\\n')\n\n if pf_open:\n pf.close()\n" ]
[ [ "numpy.where", "numpy.sum" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
billreus/HuaWei_CodeCraft2019
[ "68f6d464d7950e65385decf31d478bc945ec6c4f" ]
[ "B/CodeCraft-2019/src/map.py" ]
[ "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\nfrom reader import read_txt\nimport numpy as np\nimport pandas as pd\nfrom model import *\ndef Graph(Cross_count, Road_count, Road_isDuplex, Road_roadFrom, Road_roadTo, Road_length, Road_id):\n #对角矩阵,对角取0,设置每两个点间的距离,等于路径上的权值\n plan_roadLength = np.zeros([Cross_count, Cross_count]) + 10000 - 10000 * np.eye(Cross_count)\n plan_road = np.zeros([Cross_count, Cross_count])#36*36的每个起点到终点的路号,等于路径图\n\n for i in range(Road_count):\n if Road_isDuplex[i] == 1:\n plan_roadLength[Cross.dict[Road_roadFrom[i]]][Cross.dict[Road_roadTo[i]]] = plan_roadLength[Cross.dict[Road_roadTo[i]]][Cross.dict[Road_roadFrom[i]]] = Road_length[i]\n plan_road[Cross.dict[Road_roadFrom[i]]][Cross.dict[Road_roadTo[i]]] = plan_road[Cross.dict[Road_roadTo[i]]][Cross.dict[Road_roadFrom[i]]] = Road_id[i]\n else:\n plan_roadLength[Cross.dict[Road_roadFrom[i]]][Cross.dict[Road_roadTo[i]]] = Road_length[i]\n plan_road[Cross.dict[Road_roadFrom[i]]][Cross.dict[Road_roadTo[i]]] = Road_id[i]\n road_loss = plan_roadLength\n return plan_roadLength, plan_road, road_loss\n\n\ndef Dijkstra(origin, destination, planRoadLength):\n #inf = 10000 #死路\n origin = Cross.dict[origin]\n destination = Cross.dict[destination]\n path_array = []\n temp_array = []\n path_array.extend(planRoadLength[origin])\n temp_array.extend(planRoadLength[origin])\n #print(path_array)\n #temp_array[origin] = inf\n already_traversal = [origin]\n path_parent = [origin] * Cross.count #cross*cross\n\n i = origin\n while (i != destination):\n i = temp_array.index(min(temp_array))#最短的一条路的cross_id\n temp_array[i] = 10000\n path = [i]\n #path.append(i)#记录走过的路\n k = i\n while (path_parent[k] != origin):\n path.append(path_parent[k])\n k = path_parent[k]\n path.append(origin)\n path.reverse()\n already_traversal.append(i)\n for j in range(Cross.count):\n if j not in already_traversal:\n if (path_array[i] + planRoadLength[i][j]) < path_array[j]:\n path_array[j] = temp_array[j] = path_array[i] + planRoadLength[i][j]\n path_parent[j] = i\n #print(path)\n return path\n\n\n\nif __name__ == \"__main__\":\n read_car, read_cross, read_road = read_txt('../config/car.txt', '../config/cross.txt', '../config/road.txt')\n intiData(read_car, read_cross, read_road)\n plan_roadLength, plan_road, road_loss = Graph(Cross.count, Road.count, Road.isDuplex, Road.roadFrom, Road.roadTo, Road.length, Road.id)\n print(plan_roadLength)\n #print(graph.plan_roadLength)\n #print(graph.plan_road)\n #test = [3]\n #print([3] * 4)" ]
[ [ "numpy.eye", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Madhav1812/selective-inference
[ "7b8f232fdf19b43489d4f434c493cdd80ab8fc96" ]
[ "selection/randomized/randomization.py" ]
[ "\"\"\"\nDifferent randomization options for selective sampler.\nMain method used in selective sampler is the gradient method which\nshould be a gradient of the negative of the log-density. For a\nGaussian density, this will be a convex function, not a concave function.\n\"\"\"\nfrom __future__ import division, print_function\nimport numpy as np\nimport regreg.api as rr\nfrom scipy.stats import laplace, logistic, norm as ndist\n\nclass randomization(rr.smooth_atom):\n\n def __init__(self,\n shape,\n density,\n cdf,\n pdf,\n derivative_log_density,\n grad_negative_log_density,\n sampler,\n lipschitz=1,\n log_density=None,\n CGF=None, # cumulant generating function and gradient\n CGF_conjugate=None, # convex conjugate of CGF and gradient\n cov_prec=None # will have a covariance matrix if Gaussian\n ):\n\n rr.smooth_atom.__init__(self,\n shape)\n self._density = density\n self._cdf = cdf\n self._pdf = pdf\n self._derivative_log_density = derivative_log_density\n self._grad_negative_log_density = grad_negative_log_density\n self._sampler = sampler\n self.lipschitz = lipschitz\n\n if log_density is None:\n log_density = lambda x: np.log(density(x))\n\n self._log_density = log_density\n self.CGF = CGF\n self.CGF_conjugate = CGF_conjugate\n if cov_prec is not None:\n self.cov_prec = cov_prec\n\n def smooth_objective(self, perturbation, mode='both', check_feasibility=False):\n \"\"\"\n Compute the negative log-density and its gradient.\n \"\"\"\n if mode == 'func':\n return self.scale(-np.log(self._density(perturbation)))\n elif mode == 'grad':\n return self.scale(self._grad_negative_log_density(perturbation))\n elif mode == 'both':\n return self.scale(-np.log(self._density(perturbation))), self.scale(self._grad_negative_log_density(perturbation))\n else:\n raise ValueError(\"mode incorrectly specified\")\n\n def sample(self, size=()):\n return self._sampler(size=size)\n\n def gradient(self, perturbation):\n \"\"\"\n Evaluate the gradient of the log-density.\n Parameters\n ----------\n perturbation : np.float\n Returns\n -------\n gradient : np.float\n \"\"\"\n return self.smooth_objective(perturbation, mode='grad')\n\n def log_density(self, perturbation):\n \"\"\"\n Evaluate the log-density.\n Parameters\n ----------\n perturbation : np.float\n Returns\n -------\n value : float\n \"\"\"\n return np.squeeze(self._log_density(perturbation))\n\n def randomize(self, loss, epsilon=0, perturb=None):\n \"\"\"\n Randomize the loss.\n \"\"\"\n randomized_loss = rr.smooth_sum([loss])\n if perturb is None:\n perturb = self.sample()\n randomized_loss.quadratic = rr.identity_quadratic(epsilon, 0, -perturb, 0)\n return randomized_loss, perturb\n\n @staticmethod\n def isotropic_gaussian(shape, scale):\n \"\"\"\n Isotropic Gaussian with SD `scale`.\n Parameters\n ----------\n shape : tuple\n Shape of noise.\n scale : float\n SD of noise.\n \"\"\"\n if type(shape) == type(1):\n shape = (shape,)\n rv = ndist(scale=scale, loc=0.)\n p = np.product(shape)\n density = lambda x: np.product(rv.pdf(x))\n cdf = lambda x: rv.cdf(x)\n pdf = lambda x: rv.pdf(x)\n derivative_log_density = lambda x: -x/(scale**2)\n grad_negative_log_density = lambda x: x / scale**2\n sampler = lambda size: rv.rvs(size=shape + size)\n CGF = isotropic_gaussian_CGF(shape, scale)\n CGF_conjugate = isotropic_gaussian_CGF_conjugate(shape, scale)\n\n p = np.product(shape)\n constant = 0 # -0.5 * p * np.log(2 * np.pi * scale**2)\n return randomization(shape,\n density,\n cdf,\n pdf,\n derivative_log_density,\n grad_negative_log_density,\n sampler,\n lipschitz=1./scale**2,\n log_density = lambda x: -0.5 * (np.atleast_2d(x)**2).sum(1) / scale**2 + constant,\n CGF=CGF,\n CGF_conjugate=CGF_conjugate,\n cov_prec=(scale**2, 1. / scale**2)\n )\n\n @staticmethod\n def gaussian(covariance):\n \"\"\"\n Gaussian noise with a given covariance.\n Parameters\n ----------\n covariance : np.float((*,*))\n Positive definite covariance matrix. Non-negative definite\n will raise an error.\n \"\"\"\n precision = np.linalg.inv(covariance)\n sqrt_precision = np.linalg.cholesky(precision)\n _det = np.linalg.det(covariance)\n p = covariance.shape[0]\n _const = 1. # np.sqrt((2*np.pi)**p * _det)\n density = lambda x: np.exp(-(x * precision.dot(x)).sum() / 2) / _const\n cdf = lambda x: None\n pdf = lambda x: None\n derivative_log_density = lambda x: None\n grad_negative_log_density = lambda x: precision.dot(x)\n sampler = lambda size: covariance.dot(sqrt_precision.dot(np.random.standard_normal((p,) + size)))\n\n return randomization((p,),\n density,\n cdf,\n pdf,\n derivative_log_density,\n grad_negative_log_density,\n sampler,\n lipschitz=np.linalg.svd(precision)[1].max(),\n log_density = lambda x: -np.sum(sqrt_precision.dot(np.atleast_2d(x).T)**2, 0) * 0.5 - np.log(_const),\n cov_prec=(covariance, precision))\n\n @staticmethod\n def laplace(shape, scale):\n \"\"\"\n Standard Laplace noise multiplied by `scale`\n Parameters\n ----------\n shape : tuple\n Shape of noise.\n scale : float\n Scale of noise.\n \"\"\"\n rv = laplace(scale=scale, loc=0.)\n density = lambda x: np.product(rv.pdf(x))\n\n grad_negative_log_density = lambda x: np.sign(x) / scale\n sampler = lambda size: rv.rvs(size=shape + size)\n cdf = lambda x: laplace.cdf(x, loc=0., scale = scale)\n pdf = lambda x: laplace.pdf(x, loc=0., scale = scale)\n derivative_log_density = lambda x: -np.sign(x)/scale\n grad_negative_log_density = lambda x: np.sign(x) / scale\n sampler = lambda size: rv.rvs(size=shape + size)\n CGF = laplace_CGF(shape, scale)\n CGF_conjugate = laplace_CGF_conjugate(shape, scale)\n constant = -np.product(shape) * np.log(2 * scale)\n\n return randomization(shape,\n density,\n cdf,\n pdf,\n derivative_log_density,\n grad_negative_log_density,\n sampler,\n lipschitz=1./scale**2,\n log_density = lambda x: -np.fabs(np.atleast_2d(x)).sum(1) / scale - np.log(scale) + constant,\n CGF=CGF,\n CGF_conjugate=CGF_conjugate,)\n\n @staticmethod\n def logistic(shape, scale):\n \"\"\"\n Standard logistic noise multiplied by `scale`\n Parameters\n ----------\n shape : tuple\n Shape of noise.\n scale : float\n Scale of noise.\n \"\"\"\n # from http://docs.scipy.org/doc/numpy/reference/generated/numpy.random.logistic.html\n density = lambda x: (np.product(np.exp(-x / scale) /\n (1 + np.exp(-x / scale))**2)\n / scale**(np.product(x.shape)))\n cdf = lambda x: logistic.cdf(x, loc=0., scale = scale)\n pdf = lambda x: logistic.pdf(x, loc=0., scale = scale)\n derivative_log_density = lambda x: (np.exp(-x/scale)-1)/(scale*np.exp(-x/scale)+1)\n # negative log density is (with \\mu=0)\n # x/s + log(s) + 2 \\log (1 + e(-x/s))\n grad_negative_log_density = lambda x: (1 - np.exp(-x / scale)) / ((1 + np.exp(-x / scale)) * scale)\n sampler = lambda size: np.random.logistic(loc=0, scale=scale, size=shape + size)\n\n constant = - np.product(shape) * np.log(scale)\n return randomization(shape,\n density,\n cdf,\n pdf,\n derivative_log_density,\n grad_negative_log_density,\n sampler,\n lipschitz=.25/scale**2,\n log_density = lambda x: -np.atleast_2d(x).sum(1) / scale - 2 * np.log(1 + np.exp(-np.atleast_2d(x) / scale)).sum(1) + constant)\n\nclass split(randomization):\n\n def __init__(self, shape, subsample_size, total_size):\n\n self.subsample_size = subsample_size\n self.total_size = total_size\n\n rr.smooth_atom.__init__(self,\n shape)\n\n def get_covariance(self):\n if hasattr(self, \"_covariance\"):\n return self._covariance\n\n def set_covariance(self, covariance):\n \"\"\"\n Once covariance has been set, then\n the usual API of randomization will work.\n \"\"\"\n self._covariance = covariance\n precision = np.linalg.inv(covariance)\n self._cov_prec = (covariance, precision)\n sqrt_precision = np.linalg.cholesky(precision).T\n _det = np.linalg.det(covariance)\n p = covariance.shape[0]\n _const = 1. # np.sqrt((2*np.pi)**p * _det)\n self._density = lambda x: np.exp(-(x * precision.dot(x)).sum() / 2) / _const\n self._grad_negative_log_density = lambda x: precision.dot(x)\n self._sampler = lambda size: sqrt_precision.dot(np.random.standard_normal((p,) + size))\n self.lipschitz = np.linalg.svd(precision)[1].max()\n def _log_density(x):\n return -np.sum(sqrt_precision.dot(np.atleast_2d(x).T)**2, 0) * 0.5 - np.log(_const)\n self._log_density = _log_density\n\n covariance = property(get_covariance, set_covariance)\n\n @property\n def cov_prec(self):\n if hasattr(self, \"_covariance\"):\n return self._cov_prec\n\n def smooth_objective(self, perturbation, mode='both', check_feasibility=False):\n if not hasattr(self, \"_covariance\"):\n raise ValueError('first set the covariance')\n return randomization.smooth_objective(self, perturbation, mode=mode, check_feasibility=check_feasibility)\n\n def sample(self, size=()):\n if not hasattr(self, \"_covariance\"):\n raise ValueError('first set the covariance')\n return randomization.sample(self, size=size)\n\n def gradient(self, perturbation):\n if not hasattr(self, \"_covariance\"):\n raise ValueError('first set the covariance')\n return randomization.gradient(self, perturbation)\n\n def randomize(self, loss, epsilon):\n \"\"\"\n Parameters\n ----------\n loss : rr.glm\n A glm loss with a `subsample` method.\n epsilon : float\n Coefficient in front of quadratic term\n Returns\n -------\n Subsampled loss multiplied by `n / m` where\n m is the subsample size out of a total\n sample size of n.\n The quadratic term is not multiplied by `n / m`\n \"\"\"\n n, m = self.total_size, self.subsample_size\n inv_frac = n / m\n quadratic = rr.identity_quadratic(epsilon, 0, 0, 0)\n m, n = self.subsample_size, self.total_size # shorthand\n idx = np.zeros(n, np.bool)\n idx[:m] = 1\n np.random.shuffle(idx)\n\n randomized_loss = loss.subsample(idx)\n randomized_loss.coef *= inv_frac\n\n randomized_loss.quadratic = quadratic\n\n return randomized_loss, None\n\n# Conjugate generating function for Gaussian\n\ndef isotropic_gaussian_CGF(shape, scale): # scale = SD\n return cumulant(shape,\n lambda x: (x**2).sum() * scale**2 / 2.,\n lambda x: scale**2 * x)\n\ndef isotropic_gaussian_CGF_conjugate(shape, scale): # scale = SD\n return cumulant_conjugate(shape,\n lambda x: (x**2).sum() / (2 * scale**2),\n lambda x: x / scale**2)\n\n# Conjugate generating function for Laplace\n\ndef _standard_laplace_CGF_conjugate(u):\n \"\"\"\n sup_z uz + log(1 - z**2)\n \"\"\"\n _zeros = (u == 0)\n root = (-1 + np.sqrt(1 + u**2)) / (u + _zeros)\n value = (root * u + np.log(1 - root**2)).sum()\n return value\n\ndef _standard_laplace_CGF_conjugate_grad(u):\n \"\"\"\n sup_z uz + log(1 - z**2)\n \"\"\"\n _zeros = (u == 0)\n root = (-1 + np.sqrt(1 + u**2)) / (u + _zeros)\n return root\n\nBIG = 10**10\ndef laplace_CGF(shape, scale):\n return cumulant(shape,\n lambda x: -np.log(1 - (scale * x)**2).sum() + BIG * (np.abs(x) > 1),\n lambda x: 2 * x * scale**2 / (1 - (scale * x)**2))\n\ndef laplace_CGF_conjugate(shape, scale):\n return cumulant_conjugate(shape,\n lambda x: _standard_laplace_CGF_conjugate(x / scale),\n lambda x: _standard_laplace_CGF_conjugate_grad(x / scale) / scale)\n\nclass from_grad_func(rr.smooth_atom):\n\n \"\"\"\n take a (func, grad) pair and make a smooth_objective\n \"\"\"\n\n\n def __init__(self,\n shape,\n func,\n grad,\n coef=1.,\n offset=None,\n initial=None,\n quadratic=None):\n\n rr.smooth_atom.__init__(self,\n shape,\n offset=offset,\n quadratic=quadratic,\n initial=initial,\n coef=coef)\n\n self._func, self._grad = (func, grad)\n\n def smooth_objective(self, param, mode='both', check_feasibility=False):\n \"\"\"\n Evaluate the smooth objective, computing its value, gradient or both.\n Parameters\n ----------\n mean_param : ndarray\n The current parameter values.\n mode : str\n One of ['func', 'grad', 'both'].\n check_feasibility : bool\n If True, return `np.inf` when\n point is not feasible, i.e. when `mean_param` is not\n in the domain.\n Returns\n -------\n If `mode` is 'func' returns just the objective value\n at `mean_param`, else if `mode` is 'grad' returns the gradient\n else returns both.\n \"\"\"\n\n param = self.apply_offset(param)\n\n if mode == 'func':\n return self.scale(self._func(param))\n elif mode == 'grad':\n return self.scale(self._grad(param))\n elif mode == 'both':\n return self.scale(self._func(param)), self.scale(self._grad(param))\n else:\n raise ValueError(\"mode incorrectly specified\")\n\n\nclass cumulant(from_grad_func):\n \"\"\"\n Class for CGF.\n \"\"\"\n pass\n\nclass cumulant_conjugate(from_grad_func):\n \"\"\"\n Class for conjugate of a CGF.\n \"\"\"\n pass\n\n" ]
[ [ "scipy.stats.laplace", "numpy.product", "numpy.sqrt", "numpy.exp", "numpy.random.logistic", "numpy.linalg.svd", "numpy.linalg.det", "scipy.stats.laplace.pdf", "scipy.stats.laplace.cdf", "scipy.stats.logistic.pdf", "numpy.zeros", "numpy.log", "numpy.linalg.inv", "numpy.atleast_2d", "numpy.linalg.cholesky", "numpy.abs", "scipy.stats.logistic.cdf", "numpy.random.standard_normal", "numpy.random.shuffle", "numpy.sign", "scipy.stats.norm" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
kai-xl8/fairseq
[ "fd080b308e1e3361d6c498b235496080fa6599e5", "fd080b308e1e3361d6c498b235496080fa6599e5", "fd080b308e1e3361d6c498b235496080fa6599e5" ]
[ "fairseq_cli/train.py", "fairseq/benchmark/dummy_mt.py", "fairseq/data/resampling_dataset.py" ]
[ "#!/usr/bin/env python3 -u\n# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\"\"\"\nTrain a new model on one or across multiple GPUs.\n\"\"\"\n\nimport argparse\nimport logging\nimport math\nimport os\nimport random\nimport sys\n\nimport numpy as np\nimport torch\nfrom fairseq import (\n checkpoint_utils,\n distributed_utils,\n options,\n quantization_utils,\n tasks,\n utils,\n)\nfrom fairseq.data import iterators\nfrom fairseq.logging import meters, metrics, progress_bar\nfrom fairseq.model_parallel.megatron_trainer import MegatronTrainer\nfrom fairseq.trainer import Trainer\n\n\nlogging.basicConfig(\n format=\"%(asctime)s | %(levelname)s | %(name)s | %(message)s\",\n datefmt=\"%Y-%m-%d %H:%M:%S\",\n level=os.environ.get(\"LOGLEVEL\", \"INFO\").upper(),\n stream=sys.stdout,\n)\nlogger = logging.getLogger(\"fairseq_cli.train\")\n\n\ndef main(args):\n utils.import_user_module(args)\n\n assert (\n args.max_tokens is not None or args.max_sentences is not None\n ), \"Must specify batch size either with --max-tokens or --max-sentences\"\n\n metrics.reset()\n\n np.random.seed(args.seed)\n utils.set_torch_seed(args.seed)\n\n if distributed_utils.is_master(args):\n checkpoint_utils.verify_checkpoint_directory(args.save_dir)\n\n # Print args\n logger.info(args)\n\n # Setup task, e.g., translation, language modeling, etc.\n task = tasks.setup_task(args)\n\n # Load valid dataset (we load training data below, based on the latest checkpoint)\n for valid_sub_split in args.valid_subset.split(\",\"):\n task.load_dataset(valid_sub_split, combine=False, epoch=1)\n\n # Build model and criterion\n model = task.build_model(args)\n criterion = task.build_criterion(args)\n logger.info(model)\n logger.info(\"task: {} ({})\".format(args.task, task.__class__.__name__))\n logger.info(\"model: {} ({})\".format(args.arch, model.__class__.__name__))\n logger.info(\n \"criterion: {} ({})\".format(args.criterion, criterion.__class__.__name__)\n )\n logger.info(\n \"num. model params: {} (num. trained: {})\".format(\n sum(p.numel() for p in model.parameters()),\n sum(p.numel() for p in model.parameters() if p.requires_grad),\n )\n )\n\n # (optionally) Configure quantization\n if args.quantization_config_path is not None:\n quantizer = quantization_utils.Quantizer(\n config_path=args.quantization_config_path,\n max_epoch=args.max_epoch,\n max_update=args.max_update,\n )\n else:\n quantizer = None\n\n # Build trainer\n if args.model_parallel_size == 1:\n trainer = Trainer(args, task, model, criterion, quantizer)\n else:\n trainer = MegatronTrainer(args, task, model, criterion)\n\n logger.info(\n \"training on {} devices (GPUs/TPUs)\".format(args.distributed_world_size)\n )\n logger.info(\n \"max tokens per GPU = {} and max sentences per GPU = {}\".format(\n args.max_tokens, args.max_sentences\n )\n )\n\n # Load the latest checkpoint if one is available and restore the\n # corresponding train iterator\n extra_state, epoch_itr = checkpoint_utils.load_checkpoint(\n args,\n trainer,\n # don't cache epoch iterators for sharded datasets\n disable_iterator_cache=task.has_sharded_data(\"train\"),\n )\n\n # Train until the learning rate gets too small\n max_epoch = args.max_epoch or math.inf\n lr = trainer.get_lr()\n train_meter = meters.StopwatchMeter()\n train_meter.start()\n\n while lr > args.min_lr and epoch_itr.next_epoch_idx <= max_epoch:\n # train for one epoch\n valid_losses, should_stop = train(args, trainer, task, epoch_itr)\n if should_stop:\n break\n\n # only use first validation loss to update the learning rate\n lr = trainer.lr_step(epoch_itr.epoch, valid_losses[0])\n\n epoch_itr = trainer.get_train_iterator(\n epoch_itr.next_epoch_idx,\n # sharded data: get train iterator for next epoch\n load_dataset=task.has_sharded_data(\"train\"),\n # don't cache epoch iterators for sharded datasets\n disable_iterator_cache=task.has_sharded_data(\"train\"),\n )\n train_meter.stop()\n logger.info(\"done training in {:.1f} seconds\".format(train_meter.sum))\n\n\ndef should_stop_early(args, valid_loss):\n # skip check if no validation was done in the current epoch\n if valid_loss is None:\n return False\n if args.patience <= 0:\n return False\n\n def is_better(a, b):\n return a > b if args.maximize_best_checkpoint_metric else a < b\n\n prev_best = getattr(should_stop_early, \"best\", None)\n if prev_best is None or is_better(valid_loss, prev_best):\n should_stop_early.best = valid_loss\n should_stop_early.num_runs = 0\n return False\n else:\n should_stop_early.num_runs += 1\n if should_stop_early.num_runs >= args.patience:\n logger.info(\n \"early stop since valid performance hasn't improved for last {} runs\".format(\n args.patience\n )\n )\n return True\n else:\n return False\n\n\[email protected](\"train\")\ndef train(args, trainer, task, epoch_itr):\n \"\"\"Train the model for one epoch and return validation losses.\"\"\"\n # Initialize data iterator\n itr = epoch_itr.next_epoch_itr(\n fix_batches_to_gpus=args.fix_batches_to_gpus,\n shuffle=(epoch_itr.next_epoch_idx > args.curriculum),\n )\n update_freq = (\n args.update_freq[epoch_itr.epoch - 1]\n if epoch_itr.epoch <= len(args.update_freq)\n else args.update_freq[-1]\n )\n itr = iterators.GroupedIterator(itr, update_freq)\n if getattr(args, \"tpu\", False):\n itr = utils.tpu_data_loader(itr)\n progress = progress_bar.progress_bar(\n itr,\n log_format=args.log_format,\n log_interval=args.log_interval,\n epoch=epoch_itr.epoch,\n tensorboard_logdir=(\n args.tensorboard_logdir if distributed_utils.is_master(args) else None\n ),\n default_log_format=(\"tqdm\" if not args.no_progress_bar else \"simple\"),\n )\n\n trainer.begin_epoch(epoch_itr.epoch)\n\n valid_subsets = args.valid_subset.split(\",\")\n should_stop = False\n num_updates = trainer.get_num_updates()\n for i, samples in enumerate(progress):\n with metrics.aggregate(\"train_inner\"), torch.autograd.profiler.record_function(\n \"train_step-%d\" % i\n ):\n log_output = trainer.train_step(samples)\n\n if log_output is not None: # not OOM, overflow, ...\n # log mid-epoch stats\n num_updates = trainer.get_num_updates()\n if num_updates % args.log_interval == 0:\n stats = get_training_stats(metrics.get_smoothed_values(\"train_inner\"))\n progress.log(stats, tag=\"train_inner\", step=num_updates)\n\n # reset mid-epoch stats after each log interval\n # the end-of-epoch stats will still be preserved\n metrics.reset_meters(\"train_inner\")\n\n end_of_epoch = not itr.has_next()\n valid_losses, should_stop = validate_and_save(\n args, trainer, task, epoch_itr, valid_subsets, end_of_epoch\n )\n\n if should_stop:\n break\n\n # log end-of-epoch stats\n logger.info(\"end of epoch {} (average epoch stats below)\".format(epoch_itr.epoch))\n stats = get_training_stats(metrics.get_smoothed_values(\"train\"))\n progress.print(stats, tag=\"train\", step=num_updates)\n\n # reset epoch-level meters\n metrics.reset_meters(\"train\")\n return valid_losses, should_stop\n\n\ndef validate_and_save(args, trainer, task, epoch_itr, valid_subsets, end_of_epoch):\n num_updates = trainer.get_num_updates()\n max_update = args.max_update or math.inf\n do_save = (\n (end_of_epoch and epoch_itr.epoch % args.save_interval == 0)\n or num_updates >= max_update\n or (\n args.save_interval_updates > 0\n and num_updates > 0\n and num_updates % args.save_interval_updates == 0\n and num_updates >= args.validate_after_updates\n )\n )\n do_validate = (\n (not end_of_epoch and do_save) # validate during mid-epoch saves\n or (end_of_epoch and epoch_itr.epoch % args.validate_interval == 0)\n or num_updates >= max_update\n or (\n args.validate_interval_updates > 0\n and num_updates > 0\n and num_updates % args.validate_interval_updates == 0\n )\n ) and not args.disable_validation\n\n # Validate\n valid_losses = [None]\n if do_validate:\n valid_losses = validate(args, trainer, task, epoch_itr, valid_subsets)\n\n # Stopping conditions\n should_stop = (\n should_stop_early(args, valid_losses[0])\n or num_updates >= max_update\n or (\n args.stop_time_hours > 0\n and trainer.cumulative_training_time() / (60 * 60) > args.stop_time_hours\n )\n )\n\n # Save checkpoint\n if do_save or should_stop:\n logger.info(\"begin save checkpoint\")\n checkpoint_utils.save_checkpoint(args, trainer, epoch_itr, valid_losses[0])\n\n return valid_losses, should_stop\n\n\ndef get_training_stats(stats):\n stats[\"wall\"] = round(metrics.get_meter(\"default\", \"wall\").elapsed_time, 0)\n return stats\n\n\ndef validate(args, trainer, task, epoch_itr, subsets):\n \"\"\"Evaluate the model on the validation set(s) and return the losses.\"\"\"\n\n if args.fixed_validation_seed is not None:\n # set fixed seed for every validation\n utils.set_torch_seed(args.fixed_validation_seed)\n\n valid_losses = []\n for subset in subsets:\n logger.info('begin validation on \"{}\" subset'.format(subset))\n\n # Initialize data iterator\n itr = trainer.get_valid_iterator(subset).next_epoch_itr(shuffle=False)\n if getattr(args, \"tpu\", False):\n itr = utils.tpu_data_loader(itr)\n progress = progress_bar.progress_bar(\n itr,\n log_format=args.log_format,\n log_interval=args.log_interval,\n epoch=epoch_itr.epoch,\n prefix=f\"valid on '{subset}' subset\",\n tensorboard_logdir=(\n args.tensorboard_logdir if distributed_utils.is_master(args) else None\n ),\n default_log_format=(\"tqdm\" if not args.no_progress_bar else \"simple\"),\n )\n\n # create a new root metrics aggregator so validation metrics\n # don't pollute other aggregators (e.g., train meters)\n with metrics.aggregate(new_root=True) as agg:\n for sample in progress:\n trainer.valid_step(sample)\n\n # log validation stats\n stats = get_valid_stats(args, trainer, agg.get_smoothed_values())\n progress.print(stats, tag=subset, step=trainer.get_num_updates())\n\n valid_losses.append(stats[args.best_checkpoint_metric])\n return valid_losses\n\n\ndef get_valid_stats(args, trainer, stats):\n stats[\"num_updates\"] = trainer.get_num_updates()\n if hasattr(checkpoint_utils.save_checkpoint, \"best\"):\n key = \"best_{0}\".format(args.best_checkpoint_metric)\n best_function = max if args.maximize_best_checkpoint_metric else min\n stats[key] = best_function(\n checkpoint_utils.save_checkpoint.best, stats[args.best_checkpoint_metric]\n )\n return stats\n\n\ndef cli_main(modify_parser=None):\n parser = options.get_training_parser()\n args = options.parse_args_and_arch(parser, modify_parser=modify_parser)\n if args.profile:\n with torch.cuda.profiler.profile():\n with torch.autograd.profiler.emit_nvtx():\n distributed_utils.call_main(args, main)\n else:\n distributed_utils.call_main(args, main)\n\n\nif __name__ == \"__main__\":\n cli_main()\n", "# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport logging\n\nimport numpy as np\nimport torch\n\nfrom fairseq.data import Dictionary, FairseqDataset\nfrom fairseq.tasks import register_task, LegacyFairseqTask\n\n\nlogger = logging.getLogger(__name__)\n\n\n@register_task('dummy_mt')\nclass DummyMTTask(LegacyFairseqTask):\n\n @staticmethod\n def add_args(parser):\n \"\"\"Add task-specific arguments to the parser.\"\"\"\n parser.add_argument('--dict-size', default=49996, type=int)\n parser.add_argument('--dataset-size', default=100000, type=int)\n parser.add_argument('--src-len', default=30, type=int)\n parser.add_argument('--tgt-len', default=30, type=int)\n\n def __init__(self, args, dictionary):\n super().__init__(args)\n self.dictionary = dictionary\n self.seed = args.seed\n\n dictionary.pad_to_multiple_(8) # often faster if divisible by 8\n\n self.dummy_src = torch.arange(args.src_len + 1) + dictionary.pad() + 1\n self.dummy_tgt = torch.arange(args.tgt_len + 1) + dictionary.pad() + 1\n\n @classmethod\n def setup_task(cls, args, **kwargs):\n \"\"\"Setup the task. \"\"\"\n dictionary = Dictionary()\n for i in range(args.dict_size):\n dictionary.add_symbol('word{}'.format(i))\n logger.info('dictionary: {} types'.format(len(dictionary)))\n\n args.max_source_positions = args.src_len + dictionary.pad() + 2\n args.max_target_positions = args.tgt_len + dictionary.pad() + 2\n\n return cls(args, dictionary)\n\n def load_dataset(self, split, epoch=1, combine=False, **kwargs):\n \"\"\"Load a given dataset split.\n Args:\n split (str): name of the split (e.g., train, valid, test)\n \"\"\"\n item_size = max(self.args.src_len, self.args.tgt_len)\n if self.args.max_sentences is not None:\n bsz = self.args.max_sentences\n else:\n bsz = max(1, self.args.max_tokens // item_size)\n tgt = torch.stack([self.dummy_tgt for _ in range(bsz)])\n self.datasets[split] = DummyDataset(\n {\n 'id': 1,\n 'net_input': {\n 'src_tokens': torch.stack([self.dummy_src for _ in range(bsz)]),\n 'src_lengths': torch.full(\n (bsz, ), self.args.src_len, dtype=torch.long\n ),\n 'prev_output_tokens': tgt.clone(),\n },\n 'target': tgt,\n 'nsentences': bsz,\n 'ntokens': bsz * self.args.tgt_len,\n },\n num_items=self.args.dataset_size,\n item_size=item_size,\n )\n\n @property\n def source_dictionary(self):\n return self.dictionary\n\n @property\n def target_dictionary(self):\n return self.dictionary\n\n\nclass DummyDataset(FairseqDataset):\n\n def __init__(self, batch, num_items, item_size):\n super().__init__()\n self.batch = batch\n self.num_items = num_items\n self.item_size = item_size\n\n def __getitem__(self, index):\n return index\n\n def __len__(self):\n return self.num_items\n\n def collater(self, samples):\n return self.batch\n\n @property\n def sizes(self):\n return np.array([self.item_size] * self.num_items)\n\n def num_tokens(self, index):\n return self.item_size\n\n def size(self, index):\n return self.item_size\n\n def ordered_indices(self):\n return np.arange(self.num_items)\n\n @property\n def supports_prefetch(self):\n return False\n", "# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport logging\n\nimport numpy as np\n\nfrom fairseq.data import BaseWrapperDataset, plasma_utils\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass ResamplingDataset(BaseWrapperDataset):\n \"\"\"Randomly samples from a given dataset at each epoch.\n\n Sampling is done with or without replacement, depending on the \"replace\"\n parameter.\n\n Optionally, the epoch size can be rescaled. This is potentially desirable\n to increase per-epoch coverage of the base dataset (since sampling with\n replacement means that many items in the dataset will be left out). In the\n case of sampling without replacement, size_ratio should be strictly less\n than 1.\n\n Args:\n dataset (~torch.utils.data.Dataset): dataset on which to sample.\n weights (List[float]): list of probability weights\n (default: None, which corresponds to uniform sampling).\n replace (bool): sampling mode; True for \"with replacement\", or False\n for \"without replacement\" (default: True)\n size_ratio (float): the ratio to subsample to; must be positive\n (default: 1.0).\n batch_by_size (bool): whether or not to batch by sequence length\n (default: True).\n seed (int): RNG seed to use (default: 0).\n epoch (int): starting epoch number (default: 1).\n \"\"\"\n\n def __init__(\n self,\n dataset,\n weights=None,\n replace=True,\n size_ratio=1.0,\n batch_by_size=True,\n seed=0,\n epoch=1,\n ):\n super().__init__(dataset)\n\n if weights is None:\n self.weights = None\n\n else:\n assert len(weights) == len(dataset)\n weights_arr = np.array(weights, dtype=np.float64)\n weights_arr /= weights_arr.sum()\n self.weights = plasma_utils.PlasmaArray(weights_arr)\n\n self.replace = replace\n\n assert size_ratio > 0.0\n if not self.replace:\n assert size_ratio < 1.0\n self.size_ratio = float(size_ratio)\n self.actual_size = np.ceil(len(dataset) * self.size_ratio).astype(int)\n\n self.batch_by_size = batch_by_size\n self.seed = seed\n\n self._cur_epoch = None\n self._cur_indices = None\n\n self.set_epoch(epoch)\n\n def __getitem__(self, index):\n return self.dataset[self._cur_indices.array[index]]\n\n def __len__(self):\n return self.actual_size\n\n @property\n def sizes(self):\n if isinstance(self.dataset.sizes, list):\n return [s[self._cur_indices.array] for s in self.dataset.sizes]\n return self.dataset.sizes[self._cur_indices.array]\n\n def num_tokens(self, index):\n return self.dataset.num_tokens(self._cur_indices.array[index])\n\n def size(self, index):\n return self.dataset.size(self._cur_indices.array[index])\n\n def ordered_indices(self):\n if self.batch_by_size:\n order = [\n np.arange(len(self)),\n self.sizes,\n ] # No need to handle `self.shuffle == True`\n return np.lexsort(order)\n else:\n return np.arange(len(self))\n\n def prefetch(self, indices):\n self.dataset.prefetch(self._cur_indices.array[indices])\n\n @property\n def can_reuse_epoch_itr_across_epochs(self):\n return False\n\n def set_epoch(self, epoch):\n logger.debug('ResamplingDataset.set_epoch: {}'.format(epoch))\n super().set_epoch(epoch)\n\n if epoch == self._cur_epoch:\n return\n\n self._cur_epoch = epoch\n\n # Generate a weighted sample of indices as a function of the\n # random seed and the current epoch.\n\n rng = np.random.RandomState(\n [\n 42, # magic number\n self.seed % (2 ** 32), # global seed\n self._cur_epoch, # epoch index\n ]\n )\n self._cur_indices = plasma_utils.PlasmaArray(\n rng.choice(\n len(self.dataset),\n self.actual_size,\n replace=self.replace,\n p=(None if self.weights is None else self.weights.array),\n )\n )\n" ]
[ [ "torch.autograd.profiler.record_function", "torch.autograd.profiler.emit_nvtx", "torch.cuda.profiler.profile", "numpy.random.seed" ], [ "numpy.arange", "numpy.array", "torch.full", "torch.arange" ], [ "numpy.array", "numpy.random.RandomState", "numpy.lexsort" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
jwpleow/aanet
[ "b83e7b11dfee117114ae7b35645b85e886d3d436" ]
[ "nets/deform_conv/test/testcuda.py" ]
[ "#!/usr/bin/env python\r\nfrom __future__ import absolute_import, division, print_function\r\n\r\nimport torch\r\nimport torch.nn as nn\r\nfrom torch.autograd import gradcheck\r\n\r\nfrom dcn_v2 import DCN, DCNPooling, DCNv2, DCNv2Pooling, dcn_v2_conv, dcn_v2_pooling\r\n\r\ndeformable_groups = 1\r\nN, inC, inH, inW = 2, 2, 4, 4\r\noutC = 2\r\nkH, kW = 3, 3\r\n\r\n\r\ndef conv_identify(weight, bias):\r\n weight.data.zero_()\r\n bias.data.zero_()\r\n o, i, h, w = weight.shape\r\n y = h // 2\r\n x = w // 2\r\n for p in range(i):\r\n for q in range(o):\r\n if p == q:\r\n weight.data[q, p, y, x] = 1.0\r\n\r\n\r\ndef check_zero_offset():\r\n conv_offset = nn.Conv2d(\r\n inC,\r\n deformable_groups * 2 * kH * kW,\r\n kernel_size=(kH, kW),\r\n stride=(1, 1),\r\n padding=(1, 1),\r\n bias=True,\r\n ).cuda()\r\n\r\n conv_mask = nn.Conv2d(\r\n inC,\r\n deformable_groups * 1 * kH * kW,\r\n kernel_size=(kH, kW),\r\n stride=(1, 1),\r\n padding=(1, 1),\r\n bias=True,\r\n ).cuda()\r\n\r\n dcn_v2 = DCNv2(inC, outC, (kH, kW), stride=1, padding=1, dilation=1, deformable_groups=deformable_groups).cuda()\r\n\r\n conv_offset.weight.data.zero_()\r\n conv_offset.bias.data.zero_()\r\n conv_mask.weight.data.zero_()\r\n conv_mask.bias.data.zero_()\r\n conv_identify(dcn_v2.weight, dcn_v2.bias)\r\n\r\n input = torch.randn(N, inC, inH, inW).cuda()\r\n offset = conv_offset(input)\r\n mask = conv_mask(input)\r\n mask = torch.sigmoid(mask)\r\n output = dcn_v2(input, offset, mask)\r\n output *= 2\r\n d = (input - output).abs().max()\r\n if d < 1e-10:\r\n print(\"Zero offset passed\")\r\n else:\r\n print(\"Zero offset failed\")\r\n print(input)\r\n print(output)\r\n\r\n\r\ndef check_gradient_dconv():\r\n\r\n input = torch.rand(N, inC, inH, inW).cuda() * 0.01\r\n input.requires_grad = True\r\n\r\n offset = torch.randn(N, deformable_groups * 2 * kW * kH, inH, inW).cuda() * 2\r\n # offset.data.zero_()\r\n # offset.data -= 0.5\r\n offset.requires_grad = True\r\n\r\n mask = torch.rand(N, deformable_groups * 1 * kW * kH, inH, inW).cuda()\r\n # mask.data.zero_()\r\n mask.requires_grad = True\r\n mask = torch.sigmoid(mask)\r\n\r\n weight = torch.randn(outC, inC, kH, kW).cuda()\r\n weight.requires_grad = True\r\n\r\n bias = torch.rand(outC).cuda()\r\n bias.requires_grad = True\r\n\r\n stride = 1\r\n padding = 1\r\n dilation = 1\r\n\r\n print(\r\n \"check_gradient_dconv: \",\r\n gradcheck(\r\n dcn_v2_conv,\r\n (input, offset, mask, weight, bias, stride, padding, dilation, deformable_groups),\r\n eps=1e-3,\r\n atol=1e-4,\r\n rtol=1e-2,\r\n ),\r\n )\r\n\r\n\r\ndef check_pooling_zero_offset():\r\n\r\n input = torch.randn(2, 16, 64, 64).cuda().zero_()\r\n input[0, :, 16:26, 16:26] = 1.0\r\n input[1, :, 10:20, 20:30] = 2.0\r\n rois = (\r\n torch.tensor(\r\n [\r\n [0, 65, 65, 103, 103],\r\n [1, 81, 41, 119, 79],\r\n ]\r\n )\r\n .cuda()\r\n .float()\r\n )\r\n pooling = DCNv2Pooling(\r\n spatial_scale=1.0 / 4,\r\n pooled_size=7,\r\n output_dim=16,\r\n no_trans=True,\r\n group_size=1,\r\n trans_std=0.0,\r\n ).cuda()\r\n\r\n out = pooling(input, rois, input.new())\r\n s = \", \".join([\"%f\" % out[i, :, :, :].mean().item() for i in range(rois.shape[0])])\r\n print(s)\r\n\r\n dpooling = DCNv2Pooling(\r\n spatial_scale=1.0 / 4,\r\n pooled_size=7,\r\n output_dim=16,\r\n no_trans=False,\r\n group_size=1,\r\n trans_std=0.0,\r\n ).cuda()\r\n offset = torch.randn(20, 2, 7, 7).cuda().zero_()\r\n dout = dpooling(input, rois, offset)\r\n s = \", \".join([\"%f\" % dout[i, :, :, :].mean().item() for i in range(rois.shape[0])])\r\n print(s)\r\n\r\n\r\ndef check_gradient_dpooling():\r\n input = torch.randn(2, 3, 5, 5).cuda().float() * 0.01\r\n N = 4\r\n batch_inds = torch.randint(2, (N, 1)).cuda().float()\r\n x = torch.rand((N, 1)).cuda().float() * 15\r\n y = torch.rand((N, 1)).cuda().float() * 15\r\n w = torch.rand((N, 1)).cuda().float() * 10\r\n h = torch.rand((N, 1)).cuda().float() * 10\r\n rois = torch.cat((batch_inds, x, y, x + w, y + h), dim=1)\r\n offset = torch.randn(N, 2, 3, 3).cuda()\r\n input.requires_grad = True\r\n offset.requires_grad = True\r\n\r\n spatial_scale = 1.0 / 4\r\n pooled_size = 3\r\n output_dim = 3\r\n no_trans = 0\r\n group_size = 1\r\n trans_std = 0.0\r\n sample_per_part = 4\r\n part_size = pooled_size\r\n\r\n print(\r\n \"check_gradient_dpooling:\",\r\n gradcheck(\r\n dcn_v2_pooling,\r\n (\r\n input,\r\n rois,\r\n offset,\r\n spatial_scale,\r\n pooled_size,\r\n output_dim,\r\n no_trans,\r\n group_size,\r\n part_size,\r\n sample_per_part,\r\n trans_std,\r\n ),\r\n eps=1e-4,\r\n ),\r\n )\r\n\r\n\r\ndef example_dconv():\r\n input = torch.randn(2, 64, 128, 128).cuda()\r\n # wrap all things (offset and mask) in DCN\r\n dcn = DCN(64, 64, kernel_size=(3, 3), stride=1, padding=1, deformable_groups=2).cuda()\r\n # print(dcn.weight.shape, input.shape)\r\n output = dcn(input)\r\n targert = output.new(*output.size())\r\n targert.data.uniform_(-0.01, 0.01)\r\n error = (targert - output).mean()\r\n error.backward()\r\n print(output.shape)\r\n\r\n\r\ndef example_dpooling():\r\n input = torch.randn(2, 32, 64, 64).cuda()\r\n batch_inds = torch.randint(2, (20, 1)).cuda().float()\r\n x = torch.randint(256, (20, 1)).cuda().float()\r\n y = torch.randint(256, (20, 1)).cuda().float()\r\n w = torch.randint(64, (20, 1)).cuda().float()\r\n h = torch.randint(64, (20, 1)).cuda().float()\r\n rois = torch.cat((batch_inds, x, y, x + w, y + h), dim=1)\r\n offset = torch.randn(20, 2, 7, 7).cuda()\r\n input.requires_grad = True\r\n offset.requires_grad = True\r\n\r\n # normal roi_align\r\n pooling = DCNv2Pooling(\r\n spatial_scale=1.0 / 4,\r\n pooled_size=7,\r\n output_dim=32,\r\n no_trans=True,\r\n group_size=1,\r\n trans_std=0.1,\r\n ).cuda()\r\n\r\n # deformable pooling\r\n dpooling = DCNv2Pooling(\r\n spatial_scale=1.0 / 4,\r\n pooled_size=7,\r\n output_dim=32,\r\n no_trans=False,\r\n group_size=1,\r\n trans_std=0.1,\r\n ).cuda()\r\n\r\n out = pooling(input, rois, offset)\r\n dout = dpooling(input, rois, offset)\r\n print(out.shape)\r\n print(dout.shape)\r\n\r\n target_out = out.new(*out.size())\r\n target_out.data.uniform_(-0.01, 0.01)\r\n target_dout = dout.new(*dout.size())\r\n target_dout.data.uniform_(-0.01, 0.01)\r\n e = (target_out - out).mean()\r\n e.backward()\r\n e = (target_dout - dout).mean()\r\n e.backward()\r\n\r\n\r\ndef example_mdpooling():\r\n input = torch.randn(2, 32, 64, 64).cuda()\r\n input.requires_grad = True\r\n batch_inds = torch.randint(2, (20, 1)).cuda().float()\r\n x = torch.randint(256, (20, 1)).cuda().float()\r\n y = torch.randint(256, (20, 1)).cuda().float()\r\n w = torch.randint(64, (20, 1)).cuda().float()\r\n h = torch.randint(64, (20, 1)).cuda().float()\r\n rois = torch.cat((batch_inds, x, y, x + w, y + h), dim=1)\r\n\r\n # mdformable pooling (V2)\r\n dpooling = DCNPooling(\r\n spatial_scale=1.0 / 4,\r\n pooled_size=7,\r\n output_dim=32,\r\n no_trans=False,\r\n group_size=1,\r\n trans_std=0.1,\r\n deform_fc_dim=1024,\r\n ).cuda()\r\n\r\n dout = dpooling(input, rois)\r\n target = dout.new(*dout.size())\r\n target.data.uniform_(-0.1, 0.1)\r\n error = (target - dout).mean()\r\n error.backward()\r\n print(dout.shape)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\r\n example_dconv()\r\n example_dpooling()\r\n example_mdpooling()\r\n\r\n check_pooling_zero_offset()\r\n # zero offset check\r\n if inC == outC:\r\n check_zero_offset()\r\n\r\n check_gradient_dpooling()\r\n check_gradient_dconv()\r\n # \"\"\"\r\n # ****** Note: backward is not reentrant error may not be a serious problem,\r\n # ****** since the max error is less than 1e-7,\r\n # ****** Still looking for what trigger this problem\r\n # \"\"\"\r\n" ]
[ [ "torch.sigmoid", "torch.randint", "torch.cat", "torch.randn", "torch.nn.Conv2d", "torch.tensor", "torch.rand", "torch.autograd.gradcheck" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
flatironinstitute/athena_3dviz
[ "7c933264ca36f953043b92e522ca42eb36cfb9c7" ]
[ "test/test_utils.py" ]
[ "\nimport unittest\nfrom athena_3dviz import utils\nimport numpy as np\n\nclass TestUtils(unittest.TestCase):\n\n def test_switch_phi_r(self):\n v_in = np.arange(2*3*4*5).reshape((2,3,4,5))\n v_in[0,1,2,3] = 9\n v_out = utils.switch_phi_r(v_in)\n for ib in range(2):\n for iphi in range(3):\n for itheta in range(4):\n for ir in range(5):\n self.assertEqual(v_out[ib, ir, itheta, iphi], v_in[ib, iphi, itheta, ir])\n self.assertEqual(v_out[0,3,2,1], 9)\n" ]
[ [ "numpy.arange" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
APJansen/TDUr
[ "4f0cc09141ac7b28dd16704ed63e901aaf9ebdec" ]
[ "td_ur/game.py" ]
[ "import numpy as np\nfrom matplotlib import pyplot as plt\nfrom matplotlib import colors\nfrom jax import jit, vmap\nimport jax.numpy as jnp\nfrom jax.ops import index, index_update\nfrom functools import partial\n\n\nclass Ur:\n \"\"\"Class representing the Royal game of Ur.\n\n The Ur board looks like this: this is the displayed board\n ------------------------------------\n | 4 | 3 | 2 | 1 | s | f | 14 | 13 |\n ------------------------------------\n | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 |\n ------------------------------------\n | 4 | 3 | 2 | 1 | s | f | 14 | 13 |\n ------------------------------------\n where s and f are not part of the board, but can be seen as the places where stones that still\n have to go through (s) or have already finished (f) are located.\n To fully specify a game state, this needs to be supplemented with the last die throw and whose turn it is.\n\n we unroll this and copy the middle row that's shared between the two players, to give the internal board:\n --------------------------------------------------------------------\n s | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | f |\n --------------------------------------------------------------------\n s | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | f |\n --------------------------------------------------------------------\n\n Attributes:\n board: The internal game board, a 2x16 integer valued numpy array.\n turn: Indicating whose turn it is, 0 or 1.\n rolled: The last die roll.\n winner: Indicating who won, 0 or 1 for either player or -1 for not finished yet.\n start: Width coordinate of the start square of the board.\n finish: Width coordinate of the finish square of the board.\n safe_square: Width coordinate of the middle rosette.\n rosettes: Tuple of width coordinates for the rosettes.\n rolls: The possible die rolls.\n probabilities: The probabilities of each die roll.\n move_count: The amount of moves that have been played.\n \"\"\"\n\n def __init__(self):\n \"\"\"Construct an Ur game.\"\"\"\n # board\n self.start = 0\n self.finish = 15\n self.rosettes = (4, 8, 14)\n self.safe_square = 8\n self._mid_start = 5\n self._mid_ended = 13\n self._board_width_internal = 16\n\n # piece\n self._n_pieces = 7\n\n # to pass to jitted functions\n self._board_params = (self.start, self.finish, self.rosettes, self.safe_square, self._mid_start,\n self._mid_ended, self._board_width_internal, self._n_pieces)\n\n # die\n self._n_die = 4\n self._die_faces = 2\n self.rolls = np.arange(5)\n self.probabilities = np.array([1, 4, 6, 4, 1]) / 16\n\n # display\n self._display_width = 8\n\n # jitted function\n self._get_new_boards = jit(vmap(self._get_new_board, in_axes=(None, None, 0, None, None)), static_argnums=0)\n\n # state\n self.rolled = self.turn = self.winner = self.board = self.move_count = self._backup_state = None\n self.reset()\n\n def reset(self):\n \"\"\"Reset the game to the initial state.\"\"\"\n self.turn = 0\n self.winner = -1\n self.board = np.zeros(shape=(2, self.finish + 1), dtype=np.int8)\n self.board[0, self.start] = self._n_pieces\n self.board[1, self.start] = self._n_pieces\n\n self.move_count = 0\n self._roll()\n\n def _roll(self):\n \"\"\"Roll the dice, store result in attribute `rolled`.\"\"\"\n self.rolled = np.sum(np.random.randint(self._die_faces, size=self._n_die))\n\n def legal_moves(self):\n \"\"\"Return a list of legal moves.\n\n Relies on jitted function `_legal_moves_array`.\n\n Returns:\n List of integers representing legal squares to move from, counted along the route.\n \"\"\"\n if self.rolled == 0:\n moves = []\n else:\n moves_array = self._legal_moves_array(self._board_params, self.board, self.turn, self.rolled)\n moves = np.where(moves_array)[0].tolist()\n return moves if moves else ['pass']\n\n def play_move(self, move):\n \"\"\"Play the given legal move.\n\n Plays the move on the board, changes turn (if appropriate) and rolls the dice again.\n Also increments `move_count` and sets `winner` to the player who moved if the game is won.\n Relies on the jitted function `_get_new_board`.\n\n Args:\n move: Integer representing the square to move from, as counted along the route.\n\n \"\"\"\n self.move_count += 1\n\n if move == 'pass':\n self._change_turn()\n self._roll()\n else:\n new_board, new_turn, new_winner = self._get_new_board(self._board_params,\n self.board, move, self.rolled, self.turn)\n # need to convert from DeviceArray\n self.board = np.array(new_board)\n self.winner = int(new_winner)\n self.turn = int(new_turn)\n\n if not self.has_finished():\n self._roll()\n\n def _change_turn(self):\n \"\"\"Change turn, stored in attribute `turn`.\"\"\"\n self.turn = self.other()\n\n def other(self):\n \"\"\"Return the number of the player whose turn it is not.\"\"\"\n return (self.turn + 1) % 2\n\n def reward(self):\n \"\"\"Return 1 if game was won by player 0, or 0 otherwise. So always seen from player 0's perspective!\"\"\"\n if not self.has_finished():\n return 0\n elif self.winner == 0:\n # note if there's a winner the turn no longer changes\n return 1\n else:\n return 0\n\n def has_finished(self):\n \"\"\"Return True if the game has finished, False if not.\"\"\"\n return self.winner != -1\n\n def get_state(self):\n \"\"\"Return the current state of the game.\n\n Returns:\n A game state of the form `(board, turn, rolled, winner, move_count)`.\n \"\"\"\n return self.board.copy(), self.turn, self.rolled, self.winner, self.move_count\n\n def set_state(self, state):\n \"\"\"Set the game to the input state.\n\n Args:\n state: A game state of the form `(board, turn, rolled, winner, move_count)`.\n \"\"\"\n self.board, self.turn, self.rolled, self.winner, self.move_count = state\n\n def backup(self):\n \"\"\"Store the current state of the game in the attribute `backup_state`.\"\"\"\n self._backup_state = self.get_state()\n\n def restore_backup(self):\n \"\"\"Restore the current state of the game from the attribute `backup_state`.\"\"\"\n self.board, self.turn, self.rolled, self.winner, self.move_count = self._backup_state\n\n def simulate_moves(self, moves):\n \"\"\"Give afterstates resulting from moves.\n\n Relies on function `_get_new_boards`, a `vmap` of `get_new_board`\n\n Args:\n moves: A list of legal moves.\n\n Returns:\n A list of tuples (board, turn, winner), one for each move.\n \"\"\"\n return self._get_new_boards(self._board_params, self.board, jnp.array(moves), self.rolled, self.turn)\n\n @staticmethod\n @partial(jit, static_argnums=(0, 2, 3))\n def _legal_moves_array(board_params, board, turn, rolled):\n \"\"\"Return a boolean array indicating which moves are legal.\n\n Jitted function.\n\n Args:\n board: The current board.\n turn: Whose turn it is.\n rolled: The die roll\n\n Returns:\n A jnp boolean vector with Trues for the legal moves.\n \"\"\"\n (start, finish, _, safe_square, _, _, _, _) = board_params\n\n # moves that don't move a stone beyond the finish, based only on the die roll\n start_squares = board[turn, 0:finish + 1 - rolled]\n # the corresponding end squares\n end_squares = board[turn, rolled: finish + 1]\n\n # start square contains a stone to move\n moves_with_legal_start = start_squares > 0\n\n # end square does not contain player stone (or is finish)\n moves_with_legal_end = end_squares == 0\n moves_with_legal_end = index_update(moves_with_legal_end, index[-1], True)\n\n # it's not a capture on the safe space\n safe_space = jnp.zeros(finish + 1 - rolled, dtype='bool')\n safe_space = index_update(safe_space, index[safe_square - rolled], True)\n opponent_present = board[(turn + 1) % 2, rolled: finish + 1] > 0\n no_illegal_capture = ~(opponent_present & safe_space)\n\n legal_moves = moves_with_legal_start & moves_with_legal_end & no_illegal_capture\n\n return legal_moves\n\n @staticmethod\n @partial(jit, static_argnums=0)\n def _get_new_board(board_params, board, move, rolled, turn):\n \"\"\"Return board after given move is played.\n\n Jitted function.\n\n Args:\n board: The board before the move is played.\n move: The move (integer indicating the square to move from as counted along the route).\n rolled: The die roll.\n turn: Whose turn it is (0 or 1).\n\n Returns:\n A tuple of the form `(board, turn, winner)` giving the resulting board and turn, and `winner` indicating\n whether the game has been won and by who.\n \"\"\"\n (start, finish, rosettes, safe_square, mid_start, mid_ended, board_width_internal, n_pieces) = board_params\n\n end = move + rolled\n # move player's stone forward\n indices_x, indices_y, values = [turn, turn], [move, end], [board[turn, move] - 1, board[turn, end] + 1]\n\n # construct auxiliary boards to help with logic\n rosette_board = jnp.zeros(shape=board_width_internal, dtype='int8')\n for i in rosettes:\n rosette_board = index_update(rosette_board, i, 1)\n capture_board = jnp.zeros(shape=board_width_internal, dtype='int8')\n capture_board = index_update(capture_board, (index[mid_start:mid_ended]), 1)\n\n # capture, if opponent present and in capturable area\n other = (turn + 1) % 2\n indices_x, indices_y = indices_x + [other, other], indices_y + [end, start]\n values = values + [(1 - capture_board[end]) * board[other, end],\n board[other, start] + capture_board[end] * board[other, end]]\n\n new_board = index_update(board, (tuple(indices_x), tuple(indices_y)), tuple(values))\n\n has_finished = 1 + jnp.sign(new_board[turn, finish] - n_pieces)\n # if the played move won the game, the winner must be the player who played it\n new_winner = has_finished * turn + (1 - has_finished) * -1\n\n # change turn, unless ending on a rosette or game finished\n new_turn = ((turn + 1) + rosette_board[end] + has_finished) % 2\n\n return new_board, new_turn, new_winner\n\n def check_valid_board(self):\n \"\"\"Return True if current board is valid, otherwise returns a string describing the first found violation.\"\"\"\n board = self.board\n\n if board.dtype != 'int8':\n return 'not ints'\n\n on_board = board[:, self.start + 1:self.finish]\n if jnp.max(on_board) > 1:\n return 'more than one stone on square'\n if jnp.min(on_board) < 0:\n return 'less than 0 stones on square'\n\n for player in [0, 1]:\n if jnp.sum(board[player, self.start: self.finish + 1]) != self._n_pieces:\n return f'number of pieces not conserved (player {player})'\n if not (0 <= board[player, self.start] <= self._n_pieces):\n return f'illegal start pieces (player {player})'\n if not (0 <= board[player, self.finish] <= self._n_pieces):\n return f'illegal finish pieces (player {player})'\n\n overlap_board = board[:, self._mid_start:self._mid_ended]\n if not (jnp.sum(overlap_board, axis=0) <= jnp.ones(self._mid_ended - self._mid_start, dtype='int8')).all():\n return 'overlapping stones'\n\n if self.winner != -1:\n if board[self.winner, self.finish] != self._n_pieces:\n return \"winner hasn't finished yet\"\n if board[(self.winner + 1) % 2, self.finish] == self._n_pieces:\n return \"loser has won before winner\"\n return True\n\n def in_middle(self, w):\n \"\"\"Return true if given internal width coordinate is on the middle row in the display board.\"\"\"\n return self._mid_start <= w < self._mid_ended\n\n def transform_to_display(self, h, w):\n \"\"\"Return display coordinates corresponding to given internal coordinates.\n\n Internally the board is a 2x16 grid, the display board is 3x8.\n\n Args:\n h: The internal height coordinate.\n w: The internal width coordinate.\n\n Returns:\n Tuple (h_display, w_display).\n \"\"\"\n if w < self._mid_start:\n w_display = self._mid_start - 1 - w\n h_display = 2 * h\n elif w >= self._mid_ended:\n w_display = (self._display_width - 1) - (w - self._mid_ended)\n h_display = 2 * h\n else:\n w_display = w - self._mid_start\n h_display = 1\n\n return h_display, w_display\n\n def transform_to_internal(self, h_display, w_display):\n \"\"\"Return internal coordinates corresponding to given display coordinates.\n\n Internally the board is a 2x16 grid, the display board is 3x8.\n\n Args:\n h_display: The display height coordinate.\n w_display: The display width coordinate.\n\n Returns:\n Tuple (h, w).\n \"\"\"\n if h_display == 1: # middle row\n h = self.turn\n w = w_display + self._mid_start\n else:\n h = h_display // 2\n if w_display < self._mid_start:\n w = self._mid_start - 1 - w_display\n else:\n w = self._mid_ended - (w_display - (self._display_width - 1))\n\n return h, w\n\n # Last 3 functions only for display purposes\n def display(self):\n \"\"\"Display the game board in the current state, in the conventional shape.\"\"\"\n board_display = self._reshape_board()\n\n cmap = colors.ListedColormap(['b', 'w', 'r', 'y'])\n\n plt.imshow(board_display, cmap=cmap, extent=(0, self._display_width, 3, 0), vmin=-1, vmax=3)\n self._annotate_board()\n\n def _reshape_board(self):\n \"\"\"Turn the internal 2x16 board into the conventional 3x8 shape.\"\"\"\n reshaped_board = np.zeros(shape=(3, self._display_width), dtype=np.int8) + 3\n reshaped_board[1] = (self.board[0, self._mid_start:self._mid_ended] -\n self.board[1, self._mid_start:self._mid_ended])\n for player in [0, 1]:\n sign = (1 - 2 * player)\n reshaped_board[2 * player, :4] = sign * np.flip(self.board[player, 1:self._mid_start])\n reshaped_board[2 * player, -(self.finish - self._mid_ended):] = sign * np.flip(\n self.board[player, self._mid_ended:-1])\n return reshaped_board\n\n def _annotate_board(self):\n \"\"\"Add labels and decorations.\"\"\"\n t_x, t_y = 4.2, 0.7\n # stones at start and finish\n stats = [self.board[ij] for ij in [(0, self.start), (0, self.finish), (1, self.start), (1, self.finish)]]\n player_colors = ['r', 'r', 'b', 'b']\n x_start = 4\n x_finish = self._display_width - (self.finish + 1 - self._mid_ended)\n positions = [(x_start, 0), (x_finish, 0), (x_start, 2), (x_finish, 2)]\n for s, c, (x, y) in zip(stats, player_colors, positions):\n plt.text(x + .3, y + .7, f'{s}', fontsize=24, color=c)\n\n # turn and throw\n plt.text(-0.8, 0.7 + 2 * self.turn, f'{self.rolled}', fontsize=24, color=('r' if self.turn == 0 else 'b'))\n\n # rosettes\n for (y, x) in [(0, 0), (2, 0), (1, 3), (0, 6), (2, 6)]:\n plt.text(t_x + x - 4 - .12, t_y + y + .28, 'X', fontsize=54, color='black')\n\n # make it pretty\n ax = plt.gca()\n ax.set_xticks(np.arange(0, self._display_width + 1, 1))\n ax.set_yticks(np.arange(0, 3 + 1, 1))\n ax.grid(color='black', linewidth=5, fillstyle='full')\n ax.tick_params(labelbottom=False, labelleft=False, color='w')\n" ]
[ [ "matplotlib.pyplot.gca", "matplotlib.pyplot.imshow", "numpy.arange", "matplotlib.colors.ListedColormap", "numpy.flip", "matplotlib.pyplot.text", "numpy.array", "numpy.zeros", "numpy.where", "numpy.random.randint" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
PanosAntoniadis/pps-ntua
[ "cb428232ff0986519b364434daffcbf520dd8381" ]
[ "Lab1/ex1/plots/plot_cores.py" ]
[ "# A simple script that plots the time and the speedup\n# of the parallel OpenMP program as the number of available\n# cores increases.\n\nimport matplotlib.pyplot as plt\nimport sys\nimport numpy as np\nimport matplotlib\nmatplotlib.use('Agg')\n\nt_64 = []\nt_1024 = []\nt_4096 = []\n\ns_64 = []\ns_1024 = []\ns_4096 = []\n\nfp = open(sys.argv[1])\nline = fp.readline()\nwhile line:\n tokens = line.split()\n if tokens[2] == '64':\n t_64.append(float(tokens[6]) * 100)\n if tokens[2] == '1024':\n t_1024.append(float(tokens[6]))\n if tokens[2] == '4096':\n t_4096.append(float(tokens[6]))\n line = fp.readline()\n\nfp.close()\n\nprint(t_64)\nprint(t_1024)\nprint(t_4096)\n\nfor i in range(0, len(t_64)):\n s_64.append(t_64[0] / t_64[i])\n s_1024.append(t_1024[0] / t_1024[i])\n s_4096.append(t_4096[0] / t_4096[i])\n\nprint(s_64)\nprint(s_1024)\nprint(s_4096)\n\nfig, ax = plt.subplots()\nax.grid(True)\nax.set_xlabel(\"number of cores\")\nax.xaxis.set_ticks(np.arange(0, 5, 1))\nax.set_xticklabels(['1', '2', '4', '6', '8'], rotation=45)\nax.set_xlim(-0.5, 4.5)\nax.set_ylabel(\"time (ms)\")\nplt.plot(t_64, label=\"Time\", color=\"blue\", marker='x')\nplt.title(\"Game of Life in 64×64 table\")\nplt.savefig(\"time_64.png\", bbox_inches=\"tight\")\n\nfig, ax = plt.subplots()\nax.grid(True)\nax.set_xlabel(\"number of cores\")\nax.xaxis.set_ticks(np.arange(0, 5, 1))\nax.set_xticklabels(['1', '2', '4', '6', '8'], rotation=45)\nax.set_xlim(-0.5, 4.5)\nax.set_ylabel(\"time (s)\")\nplt.plot(t_1024, label=\"Time\", color=\"blue\", marker='x')\nplt.title(\"Game of Life 1024×1024 table\")\nplt.savefig(\"time_1024.png\", bbox_inches=\"tight\")\n\nfig, ax = plt.subplots()\nax.grid(True)\nax.set_xlabel(\"number of cores\")\nax.xaxis.set_ticks(np.arange(0, 5, 1))\nax.set_xticklabels(['1', '2', '4', '6', '8'], rotation=45)\nax.set_xlim(-0.5, 4.5)\nax.set_ylabel(\"time (s)\")\nplt.plot(t_4096, label=\"Time\", color=\"blue\", marker='x')\nplt.title(\"Game of Life 4096×4096 table\")\nplt.savefig(\"time_4096.png\", bbox_inches=\"tight\")\n\nfig, ax = plt.subplots()\nax.grid(True)\nax.set_xlabel(\"number of cores\")\nax.xaxis.set_ticks(np.arange(0, 5, 1))\nax.set_xticklabels(['1', '2', '4', '6', '8'], rotation=45)\nax.set_xlim(-0.5, 4.5)\nax.set_ylabel(\"speedup\")\nplt.plot(s_64, label=\"Speedup\", color=\"blue\", marker='x')\nplt.title(\"Game of Life in 64×64 table\")\nplt.savefig(\"speedup_64.png\", bbox_inches=\"tight\")\n\nfig, ax = plt.subplots()\nax.grid(True)\nax.set_xlabel(\"number of cores\")\nax.xaxis.set_ticks(np.arange(0, 5, 1))\nax.set_xticklabels(['1', '2', '4', '6', '8'], rotation=45)\nax.set_xlim(-0.5, 4.5)\nax.set_ylabel(\"speedup\")\nplt.plot(s_1024, label=\"Speedup\", color=\"blue\", marker='x')\nplt.title(\"Game of Life 1024×1024 table\")\nplt.savefig(\"speedup_1024.png\", bbox_inches=\"tight\")\n\nfig, ax = plt.subplots()\nax.grid(True)\nax.set_xlabel(\"number of cores\")\nax.xaxis.set_ticks(np.arange(0, 5, 1))\nax.set_xticklabels(['1', '2', '4', '6', '8'], rotation=45)\nax.set_xlim(-0.5, 4.5)\nax.set_ylabel(\"speedup\")\nplt.plot(s_4096, label=\"Speedup\", color=\"blue\", marker='x')\nplt.title(\"Game of Life 4096×4096 table\")\nplt.savefig(\"speedup_4096.png\", bbox_inches=\"tight\")\n" ]
[ [ "matplotlib.pyplot.title", "matplotlib.use", "numpy.arange", "matplotlib.pyplot.subplots", "matplotlib.pyplot.savefig", "matplotlib.pyplot.plot" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
fetus-hina/IkaLog
[ "bd476da541fcc296f792d4db76a6b9174c4777ad", "bd476da541fcc296f792d4db76a6b9174c4777ad", "bd476da541fcc296f792d4db76a6b9174c4777ad", "bd476da541fcc296f792d4db76a6b9174c4777ad" ]
[ "ikalog/inputs/osx/avfoundation_capture.py", "ikalog/utils/ikamatcher2/decode_1bit.py", "ikalog/utils/character_recoginizer/deadly_weapon.py", "ikalog/scenes/game/special_gauge.py" ]
[ "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n#\n# IkaLog\n# ======\n# Copyright (C) 2015 Takeshi HASEGAWA\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport copy\nimport ctypes\nimport os\nimport threading\nimport time\n\nimport cv2\nimport numpy as np\nfrom numpy.ctypeslib import ndpointer\n\nfrom ikalog.utils import *\nfrom ikalog.inputs import VideoInput\n\n\nclass AVFoundationCaptureDevice(object):\n\n def get_source_list(self):\n num_devices = self.dll.get_source_count()\n sources = []\n for i in range(num_devices):\n source_name = self.dll.get_source_name(i).decode('utf-8')\n sources.append(source_name)\n return sources\n\n def read(self):\n self.dll.read_frame(self.dest_buffer)\n # ToDo: Error check.\n\n frame = self.dest_buffer[:, :, 0:3]\n assert(frame.shape[2] == 3)\n\n return True, copy.deepcopy(frame)\n\n def select_device(self, device_num):\n try:\n n = int(device_num)\n self.dll.select_capture_source(n)\n except:\n pass\n\n def __del__hoge__(self):\n if hasattr(self, 'dll'):\n self.dll.deinitalize()\n\n def _init_library(self):\n self.dest_buffer = np.zeros((720, 1280, 4), np.uint8)\n\n libavf_dll = os.path.join('lib', 'libavf_ctypes.so')\n ctypes.cdll.LoadLibrary(libavf_dll)\n\n self.dll = ctypes.CDLL(libavf_dll)\n self.dll.initialize.argtypes = None\n self.dll.initialize.restype = None\n self.dll.deinitialize.argtypes = None\n self.dll.deinitialize.restype = None\n self.dll.read_frame.argtypes = [\n ndpointer(ctypes.c_uint8, flags=\"C_CONTIGUOUS\")]\n self.dll.read_frame.restype = None\n self.dll.select_capture_source.argtypes = [ctypes.c_int]\n self.dll.select_capture_source.restype = None\n self.dll.get_source_count.argtypes = None\n self.dll.get_source_count.restype = ctypes.c_int\n self.dll.get_source_name.argtypes = [ctypes.c_int]\n self.dll.get_source_name.restype = ctypes.c_char_p\n\n def __init__(self):\n self.dll = None\n self._init_library()\n self.dll.initialize()\n\n\nclass AVFoundationCapture(VideoInput):\n\n # override\n def _enumerate_sources_func(self):\n return self.cap.get_source_list()\n\n # override\n def _read_frame_func(self):\n\n ret, frame = self.cap.read()\n if not ret:\n return None\n\n return frame\n\n # override\n def _cleanup_driver_func(self):\n self.lock.acquire()\n if not self.cap is None:\n self.cap = None\n sleep(0.1)\n self.lock.release()\n\n # override\n def _initialize_driver_func(self):\n IkaUtils.dprint('%s: initializing class' % self)\n self.lock.acquire()\n if not self.cap is None:\n self.cap = None\n sleep(0.1)\n\n self.cap = AVFoundationCaptureDevice()\n\n self.lock.release()\n\n # override\n def _is_active_func(self):\n return True\n\n # override\n def _select_device_by_index_func(self, source):\n IkaUtils.dprint('%s: initializing capture device %s' % (self, source))\n\n # initialize target capture device\n frame = self.read_frame()\n cv2.imshow(self.__class__.__name__, np.zeros((240, 320), dtype=np.uint8))\n cv2.waitKey(3000)\n\n self.cap.select_device(source)\n self.last_tick = self.get_tick()\n\n # override\n def _select_device_by_name_func(self, source):\n IkaUtils.dprint(\n '%s: Doesn''t support _select_device_by_name_func()' % self)\n\n def __init__(self):\n self.cap = None\n super(AVFoundationCapture, self).__init__()\n frame = self.read_frame()\n cv2.imshow(self.__class__.__name__, np.zeros((240, 320), dtype=np.uint8))\n cv2.waitKey(3000)\n\nif __name__ == \"__main__\":\n\n obj = AVFoundationCapture()\n list = obj.enumerate_sources()\n for n in range(len(list)):\n IkaUtils.dprint(\"%d: %s\" % (n, list[n]))\n dev = input(\"Please input number of capture device: \")\n obj.select_source(dev)\n\n k = 0\n while k != 27:\n frame = obj.read_frame()\n image = cv2.resize(frame, (1280, 720))\n cv2.imshow(AVFoundationCapture.__name__, image)\n k = cv2.waitKey(1)\n\n if k == ord('s'):\n import time\n cv2.imwrite('screenshot_%d.png' % int(time.time()), frame)\n", "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n#\n# IkaLog\n# ======\n# Copyright (C) 2015-2016 Takeshi HASEGAWA\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport numpy as np\n\n\ndef decode_1bit(self, img):\n \"\"\"\n Decode the image from popcnt internal image format.\n \"\"\"\n assert len(img.shape) == 1\n assert img.shape[0] >= (self._h * self._w / 8)\n assert img.shape[0] % self._align == 0\n\n bitrev8 = lambda x: sum(1 << (8 - 1 - i) for i in range(8) if x >> i & 1)\n img_reverse = np.array(\n list(map(lambda x: bitrev8(x), img)), dtype=np.uint8)\n\n img_8b_1d = np.unpackbits(img_reverse) * 255 # to 8bit gray scale.\n img_8b_1d_trimmed = img_8b_1d[0: (self._h * self._w)]\n img_8b_2d = np.reshape(img_8b_1d_trimmed, (self._h, self._w))\n\n return img_8b_2d\n", "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n#\n# IkaLog\n# ======\n# Copyright (C) 2015 Takeshi HASEGAWA\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport cv2\nimport os\nimport numpy as np\n\nfrom ikalog.utils.character_recoginizer import *\nfrom ikalog.utils import *\n\n\nclass DeadlyWeaponRecoginizer(CharacterRecoginizer):\n\n def name2id(self, name):\n try:\n return self.name2id_table.index(name)\n except:\n self.name2id_table.append(name)\n return self.name2id_table.index(name)\n\n def id2name(self, id):\n return self.name2id_table[id]\n\n def _normalize(self, img):\n img_weapon_b = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n # cv2.imshow('hoge', img)\n array0to1280 = np.array(range(1280), dtype=np.int32)\n img_chars = np.sum(img_weapon_b[:, :], axis=1) # 行毎の検出dot数\n img_char_extract_y = np.extract(\n img_chars > 0, array0to1280[0:len(img_chars)])\n\n if len(img_char_extract_y) < 1:\n return None\n\n y1 = np.amin(img_char_extract_y)\n y2 = np.amax(img_char_extract_y) + 1\n\n if (y2 - y1) < 2:\n return None\n\n img_weapon_b = img_weapon_b[y1:y2, :]\n\n new_height = self.sample_height\n new_width = int(img_weapon_b.shape[\n 1] * (new_height / img_weapon_b.shape[0]))\n img_weapon_b32 = cv2.resize(img_weapon_b, (new_width, new_height))\n\n # 横方向を crop\n\n array0to1280 = np.array(range(1280), dtype=np.int32)\n img_chars = np.sum(img_weapon_b32[:, :], axis=0)\n img_char_extract_x = np.extract(\n img_chars > 0, array0to1280[0:len(img_chars)])\n\n if len(img_char_extract_x) < 1:\n return None\n\n x1 = np.amin(img_char_extract_x)\n x2 = np.amax(img_char_extract_x) + 1\n\n if (x2 - x1 > 160):\n x2 = x1 + 160\n img_weapon_final = np.zeros((new_height, new_height * 10), np.uint8)\n img_weapon_final[:, 0: x2 - x1] = img_weapon_b32[:, x1:x2]\n if False:\n cv2.imshow('deadly weapon', img_weapon_b32)\n cv2.imshow('deadly weapon_final', img_weapon_final)\n\n return img_weapon_final\n\n def match(self, img):\n try:\n img_normalized = self._normalize(img)\n r = super(DeadlyWeaponRecoginizer, self).match1(img_normalized)\n except:\n print(img.shape)\n return 'unknown'\n\n index = r - ord('0')\n try:\n return self.id2name(index)\n\n except IndexError:\n IkaUtils.dprint('%s: FIXME: match1() returned invalid index (%s)' % (self, index))\n IkaUtils.dprint('%s: id2name_table (len %d) == %s' % (self, len(self.name2id_table), self.name2id_table))\n return 'unknown'\n\n def _find_png_files(self, dir):\n list = []\n for root, dirs, files in os.walk(dir):\n for file in sorted(files):\n if file.endswith(\".png\"):\n f = os.path.join(root, file)\n list.append(f)\n return list\n\n # 保存項目追加のために save/load をオーバーライド\n def save_model_to_file(self, file):\n f = open(file, 'wb')\n pickle.dump([self.samples, self.responses, self.name2id_table], f)\n f.close()\n\n def load_model_from_file(self, file):\n f = open(file, 'rb')\n l = pickle.load(f)\n f.close()\n self.samples = l[0]\n self.responses = l[1]\n self.name2id_table = l[2]\n\n def __new__(cls, *args, **kwargs):\n\n if not hasattr(cls, '__instance__'):\n cls.__instance__ = super(\n DeadlyWeaponRecoginizer, cls).__new__(cls, *args, **kwargs)\n\n return cls.__instance__\n\n def __init__(self):\n if hasattr(self, 'trained') and self.trained:\n return\n\n super(DeadlyWeaponRecoginizer, self).__init__()\n\n self.name2id_table = []\n\n self.x_cutter = self # 変則的だがカッターとして自分を使う\n self.sample_height = 16\n\n lang = Localization.get_game_languages()[0]\n for lang_ in Localization.get_game_languages():\n model_name = 'data/deadly_weapons.%s.model' % lang_\n if os.path.isfile(model_name):\n lang = lang_\n break\n\n model_name = 'data/deadly_weapons.%s.model' % lang\n\n if os.path.isfile(model_name):\n self.load_model_from_file(model_name)\n self.train()\n return\n\n samples_path = 'training/deadly_weapons/%s' % lang\n IkaUtils.dprint('Building %s from %s' % (model_name, samples_path))\n data = []\n\n for file in self._find_png_files(samples_path):\n s = os.path.basename(file).split('.')\n if len(s) != 3:\n continue\n if s[2] != 'png':\n continue\n weapon_name = s[0]\n num = s[1]\n\n print(file)\n img = cv2.imread(file)\n img_normalized = self._normalize(img)\n\n if img_normalized is None:\n continue\n\n # サンプル数が足りないので3回学習\n img_normalized = cv2.cvtColor(img_normalized, cv2.COLOR_GRAY2BGR)\n self.add_sample(self.name2id(weapon_name), img_normalized)\n\n IkaUtils.dprint('Writing %s' % model_name)\n self.save_model_to_file(model_name)\n\n self.train()\n\nif __name__ == \"__main__\":\n import sys\n obj = DeadlyWeaponRecoginizer()\n\n # 引数で PNG ファイルを渡されている場合は、それに対して\n # 認識処理を行う\n\n list = []\n for file in sys.argv[1:]:\n img = cv2.imread(file)\n if img is None:\n continue\n cv2.imshow('input', img)\n\n r = obj.match(img)\n\n t = (r, file)\n list.append(t)\n\n for t in sorted(list):\n print('<!-- %s --><img src=%s>' % (t[0], t[1]))\n", "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n#\n# IkaLog\n# ======\n# Copyright (C) 2015 Takeshi HASEGAWA\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport sys\n\nimport cv2\nimport numpy as np\n\nfrom ikalog.utils import *\nfrom ikalog.scenes.scene import Scene\n\n\nclass GameSpecialGauge(Scene):\n\n def reset(self):\n super(GameSpecialGauge, self).reset()\n\n self._last_event_msec = - 100 * 1000\n\n def match_no_cache(self, context):\n if self.is_another_scene_matched(context, 'GameTimerIcon') == False:\n return False\n\n frame = context['engine']['frame']\n\n img = frame[34:34+102, 1117:1117+102]\n img_hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)\n img_filtered = img_hsv[:, :, 1]\n img_filtered[img_hsv[:, :, 1] > 64] = 255\n img_filtered[img_hsv[:, :, 2] > 64] = 255\n img_filtered[img_filtered <= 64] = 0\n img_masked = img_filtered & self._mask_gauge[:, :, 0]\n # cv2.imshow('gauge', img_masked)\n\n pixels = np.sum(img_masked) / 255\n value = int(pixels / self._gauge_pixels * 100)\n last_value = context['game'].get('special_gauge', None)\n last_charged = context['game'].get('special_gauge_charged', False)\n\n charged = False\n if value > 95:\n img_white = matcher.MM_WHITE()(frame[34:34+102, 1117:1117+102, :])\n img_white_masked = img_white & self._mask_gauge[:, :, 0]\n white_score = np.sum(img_white_masked / 255)\n charged = (white_score > 0)\n\n if value != last_value:\n context['game']['special_gauge'] = value\n self._call_plugins('on_game_special_gauge_update')\n\n if (not last_charged) and (charged):\n self._call_plugins('on_game_special_gauge_charged')\n context['game']['special_gauge_charged'] = charged\n\n return False\n\n def _analyze(self, context):\n pass\n\n def _init_scene(self, debug=False):\n self._mask_gauge = np.zeros((102, 102, 3), dtype=np.uint8)\n cv2.circle(self._mask_gauge, (51, 51), 48, (255, 255, 255), 3)\n self._mask_gauge[0:55, 0:55] = np.zeros((55,55, 3), dtype=np.uint8)\n\n self._mm_dark = matcher.MM_DARK()\n \n self._gauge_pixels = np.sum(self._mask_gauge[:, :, 0]) / 255\n\nif __name__ == \"__main__\":\n GameSpecialGauge.main_func()\n" ]
[ [ "numpy.ctypeslib.ndpointer", "numpy.zeros" ], [ "numpy.reshape", "numpy.unpackbits" ], [ "numpy.amin", "numpy.amax", "numpy.zeros", "numpy.sum" ], [ "numpy.zeros", "numpy.sum" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
LiahNikol/line-segment-intersections
[ "0a2eb14b54619568d2b79839319c079b27a53d77" ]
[ "linesegmentintersections/helper.py" ]
[ "# Class contains auxiliary methods\nfrom numpy import array\nfrom numpy.linalg import det\nfrom .Intersection import Intersection\n\n\ndef isValidPos(oPos, sl):\n if oPos < 0 or oPos >= len(sl):\n return False\n return True\n\n# credit to Dr. Sheehy for provinding orientation class code\ndef orientation(*points):\n d = array(det(points))\n if d > 0:\n return 1 # ccw\n elif d < 0:\n return -1 # cw\n else:\n return 0 # colinear\n \ndef checkIntersect(oPos, seg, sl):\n # check for intersection\n oSeg = sl.get(oPos)\n coords = intersects(seg, oSeg)\n if len(coords) > 0:\n return coords # return a tuple of the intersection coordinates\n return ()\n\ndef intersects(seg1, seg2):\n l1, r1 = seg1.endpoints() # extract endpoints from each segment\n l2, r2 = seg2.endpoints()\n \n ret1 = orientation(l1, r1, l2)\n ret2 = orientation(l1, r1, r2)\n \n ret3 = orientation(l2, r2, l1)\n ret4 = orientation(l2, r2, r1)\n \n if ret1 * ret2 < 0 and ret3 * ret4 < 0: # determinants have same sign so these 2 segments cannot intersect\n # calculate coords \n xNum = seg2.getYIntercept() - seg1.getYIntercept()\n xDenom = seg1.getSlope() - seg2.getSlope()\n x = xNum / xDenom\n y = seg1.getSlope() * x + seg1.getYIntercept()\n \n # determine which segment is \"above\" and which \"below\"\n l3 = seg1.getLeftEndpoint().coords()\n l4 = seg2.getLeftEndpoint().coords()\n intersection = (x, y)\n if orientation(l3, intersection, l4) > 0: # ccw\n return Intersection(x, y, seg2, seg1)\n elif orientation(l3, intersection, l4) < 0: # cw\n return Intersection(x, y, seg1, seg2)\n return ()\n \n " ]
[ [ "numpy.linalg.det" ] ]
[ { "matplotlib": [], "numpy": [ "1.10", "1.12", "1.11", "1.19", "1.24", "1.13", "1.16", "1.9", "1.18", "1.23", "1.21", "1.22", "1.20", "1.7", "1.15", "1.14", "1.17", "1.8" ], "pandas": [], "scipy": [], "tensorflow": [] } ]
jgonzal3/devml
[ "77902de0af041e1e272ed1356068fc101498b144" ]
[ "devml/stats.py" ]
[ "\"\"\"Generate statistics about repos\n\n#TO DO LIST:\n\nhttp://stackoverflow.com/questions/26489134/whats-the-inverse-of-the-quantile-function-on-a-pandas-series\n* Look into T-Score as ranking or Z-Score\n* Create large columnar structure something like:\n ** active days, inactivate days, commits total, inserts total, inserts per active day etc.\n\n\n\"\"\"\n\nfrom pandas import DataFrame\nimport scipy\n\nfrom .ts import (unique_days)\n\n\ndef commits_per_day_authors(df):\n \"\"\"TO..in progress\"\"\"\n\n #group by author_name\n authors = df.groupby(\"author_name\")\n #sum up commits each day\n commits_per_day = authors.resample('D').count()\n return commits_per_day\n\ndef author_commit_count(df):\n\n \"\"\"\n Returns a Pandas Series sorted by counts of commits\n\n In [55]: out\n Out[55]: \n author_name\n Armin Ronacher 5061\n Markus Unterwaditzer 1054\n \n \n\n \"\"\"\n author_count = DataFrame({'commits' : df.groupby(\"author_name\"\n ).size().sort_values(ascending=False)}).reset_index()\n return author_count\n\ndef author_descriptive_stats_commits(author):\n \"\"\"Takes a data frame with commit counts\"\"\"\n\n describe = author.commits.describe().to_dict()\n author_commit_median = author.commits.median()\n describe[\"median\"] = author_commit_median\n return describe\n\ndef author_percentile_commits(df):\n \"\"\"Finds the Quantiles For Each Author\n\n \"\"\"\n percent = []\n for row in df[\"commits\"]:\n percent.append(\n scipy.stats.percentileofscore(df['commits'],row))\n df['percentile'] = percent\n return df\n\ndef author_percentile_of_total(df):\n \"\"\"Generates % of total commits by author name\n\n \"\"\"\n\n total = df['commits']/df['commits'].sum()\n df['percentage_total'] = total.values.round(2)\n return df\n\n\ndef author_active_days(df):\n \"\"\"Active Days (Days in which there was a commit)\n\n This dataframe is return as index such that each column is a date\n in which a commit was created.\n \n Example of how to query this:\n ad = author_active_days(df)\n active_days = ad.loc[\"Armin Ronacher\"].count()\n Out[98]: 960\n\n \"\"\"\n\n active_days = {}\n for name, group in df.groupby(\"author_name\"):\n uday = unique_days(group)\n active_days[name] = uday\n df = DataFrame.from_dict(active_days, orient='index')\n df.index.name = \"author_name\"\n return df\n\ndef author_unique_active_days(df, sort_by=\"active_days\"):\n \"\"\"DataFrame of Unique Active Days by Author With Descending Order\n \n author_name\tunique_days\n 46\tArmin Ronacher\t271\n 260\tMarkus Unterwaditzer\t145\n \"\"\"\n\n author_list = []\n count_list = []\n duration_active_list = []\n ad = author_active_days(df)\n for author in ad.index:\n author_list.append(author) \n vals = ad.loc[author]\n vals.dropna(inplace=True)\n vals.drop_duplicates(inplace=True)\n vals.sort_values(axis=0,inplace=True)\n vals.reset_index(drop=True, inplace=True)\n count_list.append(vals.count())\n duration_active_list.append(vals[len(vals)-1]-vals[0])\n df_author_ud = DataFrame() \n df_author_ud[\"author_name\"] = author_list\n df_author_ud[\"active_days\"] = count_list\n df_author_ud[\"active_duration\"] = duration_active_list\n df_author_ud[\"active_ratio\"] = \\\n round(df_author_ud[\"active_days\"]/df_author_ud[\"active_duration\"].dt.days, 2)\n df_author_ud = df_author_ud.iloc[1:] #first row is =\n df_author_ud = df_author_ud.sort_values(by=sort_by, ascending=False)\n return df_author_ud\n\n\n\n\n\n" ]
[ [ "scipy.stats.percentileofscore", "pandas.DataFrame", "pandas.DataFrame.from_dict" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [ "0.13", "1.6", "0.14", "1.10", "0.15", "1.4", "0.16", "1.9", "0.19", "1.5", "0.18", "1.2", "1.7", "0.12", "1.0", "0.17", "1.3", "1.8" ], "tensorflow": [] } ]
topnotches/yolov3-tf2
[ "6e7d00c72ba06715985f4944721f8463f37be034" ]
[ "tools/export_tfserving.py" ]
[ "import time\nfrom absl import app, flags, logging\nfrom absl.flags import FLAGS\nimport cv2\nimport numpy as np\nimport tensorflow as tf\nfrom yolov3_tf2.models import (\n YoloV3, YoloV3Tiny\n)\nfrom yolov3_tf2.dataset import transform_images\n\nfrom tensorflow.python.eager import def_function\nfrom tensorflow.python.framework import tensor_spec\nfrom tensorflow.python.util import nest\n\nflags.DEFINE_string('weights', './checkpoints/yolov3.tf',\n 'path to weights file')\nflags.DEFINE_boolean('tiny', False, 'yolov3 or yolov3-tiny')\nflags.DEFINE_string('output', './serving/yolov3/1', 'path to saved_model')\nflags.DEFINE_string('classes', './data/coco.names', 'path to classes file')\nflags.DEFINE_string('image', './data/girl.png', 'path to input image')\nflags.DEFINE_integer('num_classes', 80, 'number of classes in the model')\n\n\ndef main(_argv):\n if FLAGS.tiny:\n yolo = YoloV3Tiny(classes=FLAGS.num_classes)\n else:\n yolo = YoloV3(classes=FLAGS.num_classes)\n\n yolo.load_weights(FLAGS.weights)\n logging.info('weights loaded')\n\n tf.saved_model.save(yolo, FLAGS.output)\n logging.info(\"model saved to: {}\".format(FLAGS.output))\n\n model = tf.saved_model.load(FLAGS.output)\n infer = model.signatures[tf.saved_model.DEFAULT_SERVING_SIGNATURE_DEF_KEY]\n logging.info(infer.structured_outputs)\n\n class_names = [c.strip() for c in open(FLAGS.classes).readlines()]\n logging.info('classes loaded')\n\n img = tf.image.decode_image(open(FLAGS.image, 'rb').read(), channels=3)\n img = tf.expand_dims(img, 0)\n img = transform_images(img, 224)\n\n t1 = time.time()\n outputs = infer(img)\n boxes, scores, classes, nums = outputs[\"yolo_nms\"], outputs[\n \"yolo_nms_1\"], outputs[\"yolo_nms_2\"], outputs[\"yolo_nms_3\"]\n t2 = time.time()\n logging.info('time: {}'.format(t2 - t1))\n\n logging.info('detections:')\n for i in range(nums[0]):\n logging.info('\\t{}, {}, {}'.format(class_names[int(classes[0][i])],\n scores[0][i].numpy(),\n boxes[0][i].numpy()))\n\n\nif __name__ == '__main__':\n try:\n app.run(main)\n except SystemExit:\n pass\n" ]
[ [ "tensorflow.saved_model.save", "tensorflow.expand_dims", "tensorflow.saved_model.load" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
msharp1/docker-mozillatts
[ "e59fbf114d7999d8d1ce75eb9e86c0f56f755f89" ]
[ "tts_web/synthesize.py" ]
[ "#!/usr/bin/env python3\nimport io\nimport json\nimport logging\nimport os\nimport sys\nimport time\nimport typing\nimport unittest.mock\n\nimport torch\n\nfrom TTS.tts.utils.generic_utils import setup_model\nfrom TTS.tts.utils.synthesis import synthesis\nfrom TTS.tts.utils.text.symbols import make_symbols\nfrom TTS.utils.audio import AudioProcessor\nfrom TTS.utils.io import load_config\nfrom TTS.vocoder.utils.generic_utils import setup_generator\n\n_LOGGER = logging.getLogger(\"mozillatts\")\n\n# -----------------------------------------------------------------------------\n\n\ndef tts(\n model,\n vocoder_model,\n text,\n CONFIG,\n use_cuda,\n ap,\n use_gl,\n speaker_fileid,\n speaker_embedding=None,\n gst_style=None,\n ap_vocoder=None,\n scale_factors=None,\n):\n t_1 = time.time()\n waveform, _, _, mel_postnet_spec, _, _ = synthesis(\n model=model,\n text=text,\n CONFIG=CONFIG,\n use_cuda=use_cuda,\n ap=ap,\n speaker_id=speaker_fileid,\n style_wav=gst_style,\n truncated=False,\n enable_eos_bos_chars=CONFIG.enable_eos_bos_chars,\n use_griffin_lim=use_gl,\n speaker_embedding=speaker_embedding,\n backend=\"torch\",\n do_trim_silence=False,\n )\n\n if CONFIG.model == \"Tacotron\" and not use_gl:\n mel_postnet_spec = ap.out_linear_to_mel(mel_postnet_spec.T).T\n\n mel_postnet_spec = ap._denormalize(mel_postnet_spec.T).T\n\n if not use_gl:\n vocoder_input = ap_vocoder._normalize(mel_postnet_spec.T)\n if scale_factors and ap_vocoder:\n # TTS and vocoder sample rates differ\n _LOGGER.debug(\"Interpolating with scale factors %s\", scale_factors)\n vocoder_input = interpolate(vocoder_input, scale_factors)\n else:\n vocoder_input = torch.tensor(vocoder_input).unsqueeze(0)\n\n waveform = vocoder_model.inference(vocoder_input)\n\n if use_cuda and not use_gl:\n waveform = waveform.cpu()\n\n if not use_gl:\n waveform = waveform.numpy()\n\n waveform = waveform.squeeze()\n rtf = (time.time() - t_1) / (len(waveform) / ap.sample_rate)\n tps = (time.time() - t_1) / len(waveform)\n print(\" > Run-time: {}\".format(time.time() - t_1))\n print(\" > Real-time factor: {}\".format(rtf))\n print(\" > Time per step: {}\".format(tps))\n return waveform\n\n\ndef interpolate(mel, scale_factors):\n mel = torch.tensor(mel).unsqueeze(0).unsqueeze(0)\n mel = torch.nn.functional.interpolate(\n mel, scale_factor=scale_factors, mode=\"bilinear\"\n ).squeeze(0)\n\n return mel\n\n\n# -----------------------------------------------------------------------------\n\n\nclass Synthesizer:\n def __init__(\n self,\n config_path,\n model_path,\n use_cuda=False,\n vocoder_path=\"\",\n vocoder_config_path=\"\",\n batched_vocoder=True,\n speakers_json=\"\",\n speaker_fileid=None,\n gst_style=None,\n ):\n self.config_path = config_path\n self.model_path = model_path\n self.use_cuda = use_cuda\n self.vocoder_path = vocoder_path\n self.vocoder_config_path = vocoder_config_path\n self.batched_vocoder = batched_vocoder\n self.speakers_json = speakers_json\n self.speaker_fileid = speaker_fileid\n self.gst_style = gst_style\n\n self.model = None\n\n def load(self):\n # load the config\n C = load_config(self.config_path)\n self.config = C\n\n # Resolve scale_stats path\n stats_path = C.audio.get(\"stats_path\")\n if stats_path and not os.path.isfile(stats_path):\n # Look for stats next to config\n model_stats_path = os.path.join(\n os.path.dirname(self.config_path), \"scale_stats.npy\"\n )\n if os.path.isfile(model_stats_path):\n # Patch config\n C.audio[\"stats_path\"] = model_stats_path\n else:\n _LOGGER.warning(\"No scale stats found at %s\", C.audio[\"stats_path\"])\n C.audio[\"stats_path\"] = \"\"\n\n C.forward_attn_mask = True\n\n if \"gst\" not in C.keys():\n # Patch config\n gst = {\n \"gst_use_speaker_embedding\": False,\n \"gst_style_input\": None,\n \"gst_embedding_dim\": 512,\n \"gst_num_heads\": 4,\n \"gst_style_tokens\": 10,\n }\n\n C[\"gst\"] = gst\n setattr(C, \"gst\", gst)\n\n if \"use_external_speaker_embedding_file\" not in C.keys():\n C[\"use_external_speaker_embedding_file\"] = False\n setattr(C, \"use_external_speaker_embedding_file\", False)\n\n if \"gst_use_speaker_embedding\" not in C.gst:\n C.gst[\"gst_use_speaker_embedding\"] = False\n\n # load the audio processor\n ap = AudioProcessor(**C.audio)\n self.ap = ap\n\n # if the vocabulary was passed, replace the default\n if \"characters\" in C.keys():\n symbols, phonemes = make_symbols(**C.characters)\n else:\n from TTS.tts.utils.text.symbols import phonemes, symbols\n\n speaker_embedding = None\n speaker_embedding_dim = None\n num_speakers = 0\n\n # load speakers\n if self.speakers_json != \"\":\n speaker_mapping = json.load(open(self.speakers_json, \"r\"))\n num_speakers = len(speaker_mapping)\n if C.use_external_speaker_embedding_file:\n if self.speaker_fileid is not None:\n speaker_embedding = speaker_mapping[self.speaker_fileid][\n \"embedding\"\n ]\n else: # if speaker_fileid is not specificated use the first sample in speakers.json\n speaker_embedding = speaker_mapping[\n list(speaker_mapping.keys())[0]\n ][\"embedding\"]\n speaker_embedding_dim = len(speaker_embedding)\n\n self.speaker_embedding = speaker_embedding\n\n # load the model\n num_chars = len(phonemes) if C.use_phonemes else len(symbols)\n model = setup_model(num_chars, num_speakers, C, speaker_embedding_dim)\n cp = torch.load(self.model_path, map_location=torch.device(\"cpu\"))\n model.load_state_dict(cp[\"model\"])\n model.eval()\n if self.use_cuda:\n model.cuda()\n\n if hasattr(model.decoder, \"set_r\"):\n model.decoder.set_r(cp[\"r\"])\n\n self.model = model\n\n # load vocoder model\n if self.vocoder_path:\n VC = load_config(self.vocoder_config_path)\n\n # Resolve scale_stats path\n stats_path = VC.audio.get(\"stats_path\")\n if stats_path and not os.path.isfile(stats_path):\n # Look for stats next to config\n vocoder_stats_path = os.path.join(\n os.path.dirname(self.vocoder_config_path), \"scale_stats.npy\"\n )\n if os.path.isfile(vocoder_stats_path):\n # Patch config\n VC.audio[\"stats_path\"] = vocoder_stats_path\n else:\n # Try next to TTS config\n vocoder_stats_path = os.path.join(\n os.path.dirname(self.config_path), \"scale_stats.npy\"\n )\n if os.path.isfile(vocoder_stats_path):\n # Patch config\n VC.audio[\"stats_path\"] = vocoder_stats_path\n else:\n _LOGGER.warning(\n \"No vocoder scale stats found at %s\", VC.audio[\"stats_path\"]\n )\n VC.audio[\"stats_path\"] = \"\"\n\n self.ap_vocoder = AudioProcessor(**VC.audio)\n\n vocoder_model = setup_generator(VC)\n vocoder_model.load_state_dict(\n torch.load(self.vocoder_path, map_location=\"cpu\")[\"model\"]\n )\n vocoder_model.remove_weight_norm()\n vocoder_model.inference_padding = 0\n if self.use_cuda:\n vocoder_model.cuda()\n vocoder_model.eval()\n else:\n vocoder_model = None\n VC = None\n self.ap_vocoder = None\n\n self.vocoder_model = vocoder_model\n self.vocoder_config = VC\n\n # synthesize voice\n self.use_griffin_lim = self.vocoder_model is None\n\n if not C.use_external_speaker_embedding_file:\n if self.speaker_fileid and self.speaker_fileid.isdigit():\n self.speaker_fileid = int(self.speaker_fileid)\n else:\n self.speaker_fileid = None\n else:\n self.speaker_fileid = None\n\n if (self.gst_style is None) and (\"gst\" in C.keys()):\n gst_style = C.gst.get(\"gst_style_input\", None)\n else:\n # check if gst_style string is a dict, if is dict convert else use string\n try:\n gst_style = json.loads(self.gst_style)\n if max(map(int, gst_style.keys())) >= C.gst[\"gst_style_tokens\"]:\n raise RuntimeError(\n \"The highest value of the gst_style dictionary key must be less than the number of GST Tokens, \\n Highest dictionary key value: {} \\n Number of GST tokens: {}\".format(\n max(map(int, gst_style.keys())), C.gst[\"gst_style_tokens\"]\n )\n )\n except ValueError:\n gst_style = self.gst_style\n\n self.gst_style = gst_style\n\n # Compute scale factors in case TTS/vocoder sample rates differ\n self.scale_factors = self.compute_scale_factors()\n\n # -------------------------------------------------------------------------\n # See: https://github.com/mozilla/TTS/issues/520\n\n def compute_scale_factors(self) -> typing.Optional[typing.List[float]]:\n if not self.ap_vocoder or (self.ap.sample_rate == self.ap_vocoder.sample_rate):\n return None\n\n return [1, self.ap_vocoder.sample_rate / self.ap.sample_rate]\n\n @property\n def sample_rate(self) -> int:\n \"\"\"Get output sample rate\"\"\"\n if self.ap_vocoder:\n return self.ap_vocoder.sample_rate\n\n return self.ap.sample_rate\n\n # -------------------------------------------------------------------------\n\n def synthesize(self, text: str) -> bytes:\n \"\"\"Synthesize WAV bytes from text\"\"\"\n if not self.model:\n self.load()\n\n wav = tts(\n self.model,\n self.vocoder_model,\n text,\n self.config,\n self.use_cuda,\n self.ap,\n self.use_griffin_lim,\n self.speaker_fileid,\n speaker_embedding=self.speaker_embedding,\n gst_style=self.gst_style,\n ap_vocoder=self.ap_vocoder,\n scale_factors=self.scale_factors,\n )\n\n with io.BytesIO() as wav_io:\n if self.ap_vocoder:\n # Use vocoder sample rate\n self.ap_vocoder.save_wav(wav, wav_io)\n else:\n # Use original sample rate\n self.ap.save_wav(wav, wav_io)\n\n return wav_io.getvalue()\n" ]
[ [ "torch.device", "torch.load", "torch.nn.functional.interpolate", "torch.tensor" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
uwds3/uwds3
[ "3ec70111d63db0c8d97d9f1e0110b7fe9ad56179" ]
[ "src/pyuwds3/reasoning/detection/foreground_detector.py" ]
[ "import cv2\nimport math\nimport numpy as np\nimport rospy\nfrom pyuwds3.types.detection import Detection\nfrom sensor_msgs.msg import Image\nfrom cv_bridge import CvBridge\n\n\nclass DetectorState(object):\n INIT = 0\n WAITING = 1\n READY = 2\n RUNNING = 3\n\n state = {0: \"INIT\", 1: \"WAITING\", 2: \"READY\", 3: \"RUNNING\"}\n\n\nclass ForegroundDetector(object):\n def __init__(self, interactive_mode=True):\n self.interactive_mode = interactive_mode\n self.roi_points = []\n self.state = DetectorState.INIT\n self.background_substraction = cv2.createBackgroundSubtractorMOG2(history=200, varThreshold=50, detectShadows=True)\n\n if self.interactive_mode is True:\n cv2.namedWindow(\"select_roi\")\n cv2.setMouseCallback(\"select_roi\", self.click_and_select)\n else:\n self.state = DetectorState.RUNNING\n\n self.bridge = CvBridge()\n self.pub = rospy.Publisher(\"test\", Image, queue_size=1)\n\n def detect(self, rgb_image, depth_image=None, roi_points=[], prior_detections=[]):\n filtered_bbox = []\n output_dets = []\n\n h, w, _ = rgb_image.shape\n foreground_mask = np.zeros((h, w), dtype=np.uint8)\n\n bgr_image = cv2.cvtColor(rgb_image, cv2.COLOR_RGB2BGR)\n foreground_mask[int(h/2.0):h, 0:int(w)] = self.background_substraction.apply(bgr_image[int(h/2.0):h, 0:int(w)], learningRate=10e-7)\n foreground_mask[foreground_mask != 255] = 0 # shadows suppression\n\n self.pub.publish(self.bridge.cv2_to_imgmsg(foreground_mask))\n\n for d in prior_detections:\n x = int(d.bbox.xmin)\n x = x - 5 if x > 5 else x\n y = int(d.bbox.ymin)\n y = y - 5 if y > 5 else y\n w = int(d.bbox.width())\n w = w + 5 if w + 5 < rgb_image.shape[1] else w\n h = int(d.bbox.height())\n h = h + 5 if h + 5 < rgb_image.shape[0] else h\n foreground_mask[y:y+h, x:x+w] = 0\n # remove the noise of the mask\n kernel_small = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))\n kernel_big = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (6, 6))\n closing = cv2.morphologyEx(foreground_mask, cv2.MORPH_CLOSE, kernel_small)\n opening = cv2.morphologyEx(closing, cv2.MORPH_OPEN, kernel_big)\n\n if len(self.roi_points) == 2:\n roi_mask = np.full(foreground_mask.shape, 255, dtype=\"uint8\")\n roi_mask[self.roi_points[0][1]:self.roi_points[1][1], self.roi_points[0][0]:self.roi_points[1][0]] = 0\n opening -= roi_mask\n\n opening[opening != 255] = 0\n # find the contours\n contours, hierarchy = cv2.findContours(opening, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n\n for c in contours:\n peri = cv2.arcLength(c, True)\n approx = cv2.approxPolyDP(c, 10e-3 * peri, True)\n xmin, ymin, w, h = cv2.boundingRect(approx)\n if w > 10 and h > 10 and w < rgb_image.shape[1] and h < rgb_image.shape[0]:\n filtered_bbox.append(np.array([xmin, ymin, xmin+w, ymin+h]))\n\n if self.interactive_mode is True:\n debug_image = cv2.cvtColor(opening.copy(), cv2.COLOR_GRAY2BGR)\n if len(self.roi_points) == 1:\n opening = cv2.rectangle(debug_image, self.roi_points[0], self.roi_points[0], (0, 255, 0), 3)\n elif len(self.roi_points) == 2:\n opening = cv2.rectangle(debug_image, self.roi_points[0], self.roi_points[1], (0, 255, 0), 3)\n cv2.rectangle(debug_image, (0, 0), (300, 40), (200, 200, 200), -1)\n cv2.putText(debug_image, \"Detector state : {}\".format(DetectorState.state[self.state]), (15, 15), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), 1)\n cv2.putText(debug_image, \"Select ROI & press 'r' to start\", (15, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), 1)\n cv2.imshow(\"select_roi\", debug_image)\n key = cv2.waitKey(1) & 0xFF\n if key == ord('r'):\n self.background_substraction = cv2.createBackgroundSubtractorMOG2(history=500, varThreshold=50, detectShadows=True)\n self.state = DetectorState.RUNNING\n\n if self.state == DetectorState.RUNNING:\n filtered_bbox = self.non_max_suppression(np.array(filtered_bbox), 0.5)\n\n for bbox in filtered_bbox:\n xmin, ymin, xmax, ymax = bbox\n w = int(xmax - xmin)\n h = int(ymax - ymin)\n x = int(xmin + w/2.0)\n y = int(ymin + h/2.0)\n if depth_image is not None:\n x = depth_image.shape[1]-1 if x > depth_image.shape[1] else x\n y = depth_image.shape[0]-1 if y > depth_image.shape[0] else y\n depth = depth_image[int(y)][int(x)]/1000.0\n if math.isnan(depth) or depth == 0.0:\n depth = None\n else:\n depth = None\n mask = opening[int(ymin):int(ymax), int(xmin):int(xmax)]\n output_dets.append(Detection(int(xmin), int(ymin), int(xmax), int(ymax), \"thing\", 0.4, mask=mask, depth=depth))\n\n return output_dets\n else:\n return []\n\n def non_max_suppression(self, boxes, max_bbox_overlap):\n \"\"\" Perform non maximum suppression\n Original code from pyimagesearch modified to works with detection that have a confidence\n \"\"\"\n if len(boxes) == 0:\n return []\n\n boxes = boxes.astype(np.float)\n pick = []\n\n x1 = boxes[:, 0]\n y1 = boxes[:, 1]\n x2 = boxes[:, 2]\n y2 = boxes[:, 3]\n\n area = (x2 - x1 + 1) * (y2 - y1 + 1)\n\n idxs = np.argsort(area)\n\n while len(idxs) > 0:\n last = len(idxs) - 1\n i = idxs[last]\n pick.append(i)\n\n xx1 = np.maximum(x1[i], x1[idxs[:last]])\n yy1 = np.maximum(y1[i], y1[idxs[:last]])\n xx2 = np.minimum(x2[i], x2[idxs[:last]])\n yy2 = np.minimum(y2[i], y2[idxs[:last]])\n\n w = np.maximum(0, xx2 - xx1 + 1)\n h = np.maximum(0, yy2 - yy1 + 1)\n\n overlap = (w * h) / area[idxs[:last]]\n\n idxs = np.delete(\n idxs, np.concatenate(\n ([last], np.where(overlap > max_bbox_overlap)[0])))\n\n return boxes[pick]\n\n def click_and_select(self, event, x, y, flags, param):\n if event == cv2.EVENT_LBUTTONDOWN:\n self.roi_points = [(x, y)]\n self.state = DetectorState.WAITING\n elif event == cv2.EVENT_LBUTTONUP:\n self.roi_points.append((x, y))\n self.state = DetectorState.READY\n\n\nif __name__ == '__main__':\n from uwds3_perception.tracking.multi_object_tracker import MultiObjectTracker, iou_cost, color_cost\n from uwds3_perception.estimation.color_features_estimator import ColorFeaturesEstimator\n capture = cv2.VideoCapture(0)\n detector = ForegroundDetector()\n color_extractor = ColorFeaturesEstimator()\n tracker = MultiObjectTracker(iou_cost, color_cost, 0.1, 0.2, 15, 2, 3, use_appearance_tracker=False)\n tracks = []\n while True:\n ok, frame = capture.read()\n if ok:\n rgb_image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n #confirmed_tracks = [t for t in tracks if t.is_confirmed()]\n detections = detector.detect(frame)#, prior_detections=confirmed_tracks)\n color_extractor.estimate(rgb_image, detections=detections)\n tracks = tracker.update(rgb_image, detections)\n for t in tracks:\n if t.is_confirmed():\n t.draw(frame, (36, 255, 12))\n cv2.imshow(\"result\", frame)\n cv2.waitKey(1)\n capture.release()\n" ]
[ [ "numpy.maximum", "numpy.minimum", "numpy.full", "numpy.argsort", "numpy.array", "numpy.zeros", "numpy.where" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
queirozfcom/recommendation_systems
[ "e96b34f71fdc3a490b39d448d47e48485ce4f7fa" ]
[ "code/python/src/lib/array.py" ]
[ "import numpy as np\n\ndef subtract_nonzero_elements(arr,minuend):\n newarr = arr\n np.putmask(newarr,newarr != 0, minuend-newarr)\n return(newarr)\n\ndef replace_zero_elements(arr,replacement):\n newarr = np.copy(arr)\n newarr[newarr == 0] = replacement\n\n return(newarr)\n\ndef num_nonzero_elements(arr):\n return(len(arr[ arr != 0 ]))" ]
[ [ "numpy.copy", "numpy.putmask" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
neduchal/ecs
[ "ea33416697ccc6ca19fdb22245896ecb5fc4e345" ]
[ "ecs/src/ecs/classification_classic_node.py" ]
[ "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport os\nimport cv2\nimport rospy\nimport rospkg\nimport numpy as np\nfrom joblib import load\nfrom sklearn import svm\nfrom std_msgs.msg import Empty, String\nfrom sensor_msgs.msg import CompressedImage\nfrom cv_bridge import CvBridge\nfrom ecs.srv import Descriptor\n\n\nclass ImageBasedEnvironmentClassification:\n\n def __init__(self):\n # load parameters\n self.settings = None\n self.load_settings()\n rospack = rospkg.RosPack()\n self.classifier_path = os.path.join(rospack.get_path(\n self.settings[\"classifier_pkg\"]), self.settings[\"classifier_file\"])\n self.image_subscriber = rospy.Subscriber(\n self.settings[\"camera_topic\"], CompressedImage, callback=self.image_subscriber_callback, queue_size=1)\n self.trigger_subscriber = rospy.Subscriber(\n self.settings[\"trigger_topic\"], Empty, callback=self.trigger_callback, queue_size=1)\n self.decision_publisher = rospy.Publisher(\n self.settings[\"decision_topic\"], String, queue_size=10)\n self.print_info(f\"Loaded classifier: {self.classifier_path}\")\n self.print_info(f\"Camera topic: {self.settings['camera_topic']}\")\n self.print_info(f\"Trigger topic: {self.settings['trigger_topic']}\")\n self.print_info(f\"Decision topic: {self.settings['decision_topic']}\")\n self.img = None\n self.cv_bridge = CvBridge()\n self.classifier = load(self.classifier_path)\n\n def print_info(self, msg):\n rospy.loginfo(f\"[{rospy.get_name()}]: {msg}\")\n\n def load_settings(self):\n self.settings = rospy.get_param(\"ecs_ibec\")\n\n def image_subscriber_callback(self, msg):\n data_arr = np.frombuffer(msg.data, np.uint8)\n #data_arr = np.fromstring(msg.data, np.uint8)\n self.img = cv2.imdecode(data_arr, cv2.IMREAD_COLOR)\n #self.img = self.cv_bridge.imgmsg_to_cv2(\n # msg, desired_encoding=\"CV_8UC3\")\n\n def descriptor_service_client(self):\n rospy.wait_for_service(self.settings[\"descriptor_service\"])\n if self.img is None:\n return None\n try:\n descriptor_service = rospy.ServiceProxy(\n self.settings[\"descriptor_service\"], Descriptor)\n resp1 = descriptor_service(\n self.cv_bridge.cv2_to_imgmsg(self.img, encoding=\"bgr8\"))\n return resp1.data\n except rospy.ServiceException as e:\n print(\"Service call failed: %s\" % e)\n\n def trigger_callback(self, msg):\n self.process()\n\n def process(self):\n desc_vector = np.asarray(self.descriptor_service_client())\n if desc_vector is None:\n return\n prediction = self.classifier.predict(desc_vector.reshape(1, -1))[0]\n self.print_info(f\"prediction: {prediction}\")\n prediction_text = self.settings.get(\"class_mapping\").get(str(prediction))\n if prediction_text is None:\n self.print_info(f\"Unknown class prediction [class mapping is missing]\")\n return\n self.decision_publisher.publish(prediction_text)\n\n\nif __name__ == \"__main__\":\n\n rospy.init_node(\"ecs_classification_classic_node\")\n ibec = ImageBasedEnvironmentClassification()\n rospy.spin()\n" ]
[ [ "numpy.frombuffer" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
lfabris-mhpc/emukit
[ "ccb07f6bed0e9ae41dbeefdb3ad2ab247d3991e2" ]
[ "emukit/test_functions/multi_fidelity/currin.py" ]
[ "from typing import Tuple\n\nimport numpy as np\n\nfrom ...core import ContinuousParameter, InformationSourceParameter, ParameterSpace\nfrom ...core.loop.user_function import MultiSourceFunctionWrapper\n\n\ndef multi_fidelity_currin_function() -> Tuple[MultiSourceFunctionWrapper, ParameterSpace]:\n r\"\"\"\n\n High fidelity function is given by:\n\n .. math::\n f_{high}(\\mathbf{x}) = \\left[ 1 - \\exp \\left(-\\frac{1}{2x_2}\\right) \\right]\n \\frac{2300x_1^3 + 1900x_1^2 + 2092x_1 + 60}{100x_1^3+500x_1^2 + 4x_1 + 20}\n\n Low fidelity function given by:\n\n .. math::\n f_{low}(\\mathbf{x}) = \\frac{1}{4} \\left[ f_{high}(x_1 + 0.05, x_2 + 0.05) + f_{high}(x_1 + 0.05, \\max (0, x_2 - 0.05)) \\\\\n + f_{high}(x_1 - 0.05, x_2 + 0.05) + f_{high}\\left(x_1 - 0.05, \\max \\left(0, x_2 - 0.05\\right)\\right) \\right]\n\n Input domain:\n\n .. math::\n \\mathbf{x}_i \\in [0, 1]\n\n Reference: https://www.sfu.ca/~ssurjano/curretal88exp.html\n \"\"\"\n\n def high(x):\n x1 = x[:, 0]\n x2 = x[:, 1]\n return (\n 1\n - np.exp(-0.5 / x2)\n * ((2300 * x1 ** 3 + 1900 * x1 ** 2 + 2092 * x1 + 60) / (100 * x1 ** 3 + 500 * x1 ** 2 + 4 * x1 + 20))\n )[:, None]\n\n def low(x):\n return (\n 0.25 * high(np.stack([x[:, 0] + 0.05, x[:, 1] + 0.05], axis=1))\n + 0.25 * high(np.stack([x[:, 0] + 0.05, np.maximum(0, x[:, 1] - 0.05)], axis=1))\n + 0.25 * high(np.stack([x[:, 0] - 0.05, x[:, 1] + 0.05], axis=1))\n + 0.25 * high(np.stack([x[:, 0] - 0.05, np.maximum(0, x[:, 1] - 0.05)], axis=1))\n )\n\n space = ParameterSpace(\n [ContinuousParameter(\"x1\", 0, 1), ContinuousParameter(\"x2\", 0, 1), InformationSourceParameter(2)]\n )\n return MultiSourceFunctionWrapper([low, high]), space\n" ]
[ [ "numpy.exp", "numpy.maximum", "numpy.stack" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
goujou/CompartmentalSystems
[ "4724555c33f11395ddc32738e8dfed7349ee155f", "4724555c33f11395ddc32738e8dfed7349ee155f" ]
[ "prototypes/newOdeInterface/SolverComparison.py", "src/CompartmentalSystems/bins/TsTpField.py" ]
[ "\n# coding: utf-8\n\n# # The directional field\n\n# In[1]:\n\n\nget_ipython().run_line_magic('matplotlib', 'notebook')\nimport matplotlib\nimport matplotlib.pylab as plt\nimport numpy as np\n\n\n\ndef directional_field(ts,xs,func):\n v_ts=np.zeros((len(xs),len(ts)))\n v_xs=np.zeros((len(xs),len(ts)))\n for i in range(len(ts)):\n for j in range(len(xs)):\n t=delta_t\n x=func(xs[j],ts[i])*delta_t\n n=np.sqrt(t**2+x**2)\n v_ts[j,i]=delta_t/n\n v_xs[j,i]=func(xs[j],ts[i])*delta_t/n\n T,X=np.meshgrid(ts,xs)\n return (T, X, v_ts, v_xs)\n\ndelta_t=np.float(1)\nt_max=4\nx_max=20\nts=np.arange(0,t_max,delta_t)\nxs=np.arange(0,x_max,1)\n\n\n# # Example function\n\n# In[2]:\n\n\nk=np.float(1.01)\ndef f(x,t):\n return np.array(k*x,dtype='float')\n\nField=directional_field(ts,xs,f)\n\n#ax=directional_field_plot(ts,xs,f,ax)\nplt.figure(figsize=(10,10))\nplt.quiver(*Field,pivot='tail',angles='xy',units='x')\n\n\n# # Euler forward\n\n# In[3]:\n\n\ndef euler_forward(func,x0,ts):\n x=np.zeros_like(ts)\n x[0]=x0\n for i in range(len(ts)-1):\n x[i+1]=x[i]+delta_t*func(x[i],ts[i])\n return x\n\n\n# In[4]:\n\n\nx0=np.float(0.5)\nsol_euler=euler_forward(f,x0,ts) \nm=2\nplt.plot(ts[0:m],sol_euler[0:m],'b')\n\n\n# In[5]:\n\n\nm=3\nplt.plot(ts[0:m],sol_euler[0:m],'b')\n\n\n# In[6]:\n\n\nm=4\nplt.plot(ts[0:m],sol_euler[0:m],'b')\n\n\n# In[ ]:\n\n\n#m=5\n#plt.plot(ts[0:m],sol_euler[0:m],'b')\n\n\n# # Analytical Solution\n\n# In[7]:\n\n\nts_fine=np.arange(0,t_max-delta_t,0.1*delta_t)\nanasol=x0*np.exp(k*ts_fine)\np=plt.plot(ts_fine,anasol,'y')\n\n\n# # The solver library\n\n# In[9]:\n\n\nfrom scipy.integrate import solve_ivp,OdeSolver,odeint\nx0_arr=np.array([x0])\nx0_arr.shape\nf(x0_arr,5).shape\n\n\n# ## first candidate \n\n# In[10]:\n\n\n\nss=solve_ivp(fun=f,t_span=[0,t_max],y0=x0_arr,max_step=delta_t)\nn=len(ss.t)\np=plt.plot((ss.t).reshape(1,n),ss.y.reshape(1,n),'ro')\n\n\n# ## second candiate\n\n# In[11]:\n\n\nss=solve_ivp(fun=f,t_span=[0,t_max],y0=x0_arr,max_step=delta_t,vectorized=False,method='LSODA')\nn=len(ss.t)\np=plt.plot((ss.t).reshape(1,n),ss.y.reshape(1,n),'go')\n\n\n# ## third candidate\n\n# In[12]:\n\n\nso=odeint(f,x0_arr,ts)\np=plt.plot(ts,so,'g')\n\n\n# # Speed, Stability, Flexibility \n", "# vim: set ff=unix expandtab ts=4 sw=4:\nimport numpy as np\nimport matplotlib\n\n# matplotlib.use(\"Agg\")\nimport matplotlib.pyplot as plt\nimport matplotlib.animation as manimation\nfrom matplotlib.collections import PolyCollection\nfrom matplotlib.colors import colorConverter\n\nfrom .TimeField import TimeField\nfrom .TsTpBin import TsTpBin\n\n\nclass TsTpField(TimeField):\n # instances respresent a distribution field\n # with a tss (time step size) spacing\n def __init__(self, arr, tss):\n s = arr.shape\n if len(s) != 2:\n raise (Exception(\"arr has to be 2 dimensional\"))\n if s[0] < s[1]:\n raise (\n Exception(\n \"\"\"Pool age can not exceed System age by definition. \n Therefore at least arr.shape[0]>=arr.shape[1] should hold!\"\"\"\n )\n )\n super().__init__(arr, tss)\n\n @property\n def number_of_Tp_entries(self):\n return self.arr.shape[1]\n\n @property\n def max_Tp(self):\n return self.number_of_Tp_entries * self.tss\n\n def default_plot_args(self, max_shape=None, z_max=None):\n if not (max_shape):\n max_shape = self.shape\n if not (z_max):\n z_max = self.arr.max()\n return (max_shape, z_max)\n\n def plot_surface(self, ax, max_shape=None, z_max=None):\n max_shape, z_max = self.default_plot_args(max_shape, z_max)\n rect = self.arr\n tss = self.tss\n systemAges = np.arange(self.number_of_Ts_entries) * tss\n poolAges = np.arange(self.number_of_Tp_entries) * tss\n X, Y = np.meshgrid(systemAges, poolAges, indexing=\"ij\") # see help of meshgrid\n\n ax.plot_surface(X, Y, rect, rstride=1, cstride=1, linewidth=1)\n # ax.plot_wireframe(X, Y, rect, rstride=1, cstride=1, linewidth=1)\n # ax.plot_surface(X, Y, Z,linewidth=0)\n self.set_limits(ax, max_shape, z_max)\n self.set_ticks_and_labels(max_shape, ax)\n\n def set_ticks_and_labels(self, max_shape, ax):\n tss = self.tss\n systemAges = np.arange(max_shape[0]) * tss\n poolAges = np.arange(max_shape[1]) * tss\n ax.set_xticks(systemAges)\n ax.set_yticks(poolAges)\n\n ax.set_xlabel(\"system age\")\n ax.set_ylabel(\"pool age\")\n\n ax.invert_xaxis()\n\n def plot_bins(\n self, ax, max_shape=None, z_max=None, facecolors=None, offset_field=None\n ):\n if not (isinstance(offset_field, np.ndarray)):\n offset_field = np.zeros(self.shape)\n max_shape, z_max = self.default_plot_args(max_shape, z_max)\n arr = self.arr\n tss = self.tss\n\n ax.set_zlim3d((0, z_max))\n for Ts in range(self.number_of_Ts_entries):\n for Tp in range(self.number_of_Tp_entries):\n offset = offset_field[Ts, Tp]\n val = arr[Ts, Tp]\n if val != 0:\n b = TsTpBin(\n tss,\n Ts * tss,\n Tp * tss,\n arr[Ts, Tp],\n facecolors=facecolors,\n offset=offset,\n )\n b.plot(ax)\n self.set_limits(ax, max_shape, z_max)\n self.set_ticks_and_labels(max_shape, ax)\n\n def set_limits(self, ax, max_shape, z_max):\n nTs, nTp = max_shape\n max_system_age = nTs * self.tss\n max_pool_age = nTp * self.tss\n ax.set_xlim(\n max_system_age, 0\n ) # the order (big,small) avoids the axis inversion\n ax.set_ylim(max_pool_age, 0)\n ax.set_zlim(0, z_max)\n" ]
[ [ "numpy.sqrt", "numpy.meshgrid", "numpy.arange", "scipy.integrate.solve_ivp", "scipy.integrate.odeint", "matplotlib.pylab.figure", "numpy.zeros_like", "matplotlib.pylab.plot", "numpy.exp", "matplotlib.pylab.quiver", "numpy.array", "numpy.float" ], [ "numpy.arange", "numpy.meshgrid", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.6", "1.10", "1.4", "1.3", "1.9", "1.5", "1.7", "1.0", "1.2", "1.8" ], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
H0merJayS1mpson/deepobscustom
[ "e85816ce42466326dac18841c58b79f87a4a1a7c", "e85816ce42466326dac18841c58b79f87a4a1a7c" ]
[ "deepobs/pytorch/testproblems/testproblem.py", "deepobs/tensorflow/testproblems/_quadratic.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"Base class for DeepOBS test problems.\"\"\"\nimport torch\nimport abc\nfrom .testproblems_utils import flatten\nfrom .. import config\n\n\nclass TestProblem(abc.ABC):\n \"\"\"Base class for DeepOBS test problems.\n\n Args:\n batch_size (int): Batch size to use.\n weight_decay (float): Weight decay (L2-regularization) factor to use. If\n not specified, the test problems revert to their respective defaults.\n Note: Some test problems do not use regularization and this value will\n be ignored in such a case.\n\n Attributes:\n _batch_size: Batch_size for the data of this test problem.\n _weight_decay: The regularization factor for this test problem\n data: The dataset used by the test problem (datasets.DataSet instance).\n loss_function: The loss function for this test problem.\n net: The torch module (the neural network) that is trained.\n\n Methods:\n train_init_op: Initializes the test problem for the\n training phase.\n train_eval_init_op: Initializes the test problem for\n evaluating on training data.\n test_init_op: Initializes the test problem for\n evaluating on test data.\n _get_next_batch: Returns the next batch of data of the current phase.\n get_batch_loss_and_accuracy: Calculates the loss and accuracy of net on the next batch of the current phase.\n set_up: Sets all public attributes.\n \"\"\"\n\n def __init__(self, batch_size, weight_decay=None, initialization=None):\n \"\"\"Creates a new test problem instance.\n\n Args:\n batch_size (int): Batch size to use.\n weight_decay (float): Weight decay (L2-regularization) factor to use. If\n not specified, the test problems revert to their respective defaults.\n Note: Some test problems do not use regularization and this value will\n be ignored in such a case.\n \"\"\"\n self._batch_size = batch_size\n self._weight_decay = weight_decay\n self._device = torch.device(config.get_default_device())\n\n # Public attributes by which to interact with test problems. These have to\n # be created by the set_up function of sub-classes.\n self.data = None\n self.loss_function = None\n self.net = None\n self.regularization_groups = None\n self.initialization = initialization\n self.current_batch = None\n self._batch_count = 0\n\n def train_init_op(self):\n \"\"\"Initializes the testproblem instance to train mode. I.e.\n sets the iterator to the training set and sets the model to train mode.\n \"\"\"\n self._iterator = iter(self.data._train_dataloader)\n self.phase = \"train\"\n self.net.train()\n\n def train_eval_init_op(self):\n \"\"\"Initializes the testproblem instance to train eval mode. I.e.\n sets the iterator to the train evaluation set and sets the model to eval mode.\n \"\"\"\n self._iterator = iter(self.data._train_eval_dataloader)\n self.phase = \"train_eval\"\n self.net.eval()\n\n def valid_init_op(self):\n \"\"\"Initializes the testproblem instance to validation mode. I.e.\n sets the iterator to the validation set and sets the model to eval mode.\n \"\"\"\n self._iterator = iter(self.data._valid_dataloader)\n self.phase = \"valid\"\n self.net.eval()\n\n def test_init_op(self):\n \"\"\"Initializes the testproblem instance to test mode. I.e.\n sets the iterator to the test set and sets the model to eval mode.\n \"\"\"\n self._iterator = iter(self.data._test_dataloader)\n self.phase = \"test\"\n self.net.eval()\n\n def _get_next_batch(self):\n \"\"\"Returns the next batch from the iterator.\"\"\"\n self._batch_count += 1\n batch = next(self._iterator)\n self.current_batch = batch\n return batch\n\n def get_batch_loss_and_accuracy_func(self,\n reduction='mean',\n add_regularization_if_available=True, get_next_batch=True):\n \"\"\"Get new batch and create forward function that calculates loss and accuracy (if available)\n on that batch. This is a default implementation for image classification.\n Testproblems with different calculation routines (e.g. RNNs) overwrite this method accordingly.\n\n Args:\n reduction (str): The reduction that is used for returning the loss. Can be 'mean', 'sum' or 'none' in which \\\n case each indivual loss in the mini-batch is returned as a tensor.\n add_regularization_if_available (bool): If true, regularization is added to the loss.\n Returns:\n callable: The function that calculates the loss/accuracy on the current batch.\n :param next_batch: if an Optimizer requires to run the loss function multiple times we don't want\n to fetch a new batch for every loss function call. Therefore next_batch indicates if we want a new batch\n or need to compute something using the \"old\" i.e. the self.current_batch batch.\n \"\"\"\n if get_next_batch:\n inputs, labels = self._get_next_batch()\n inputs = inputs.to(self._device)\n labels = labels.to(self._device)\n else:\n inputs, labels = self.current_batch\n inputs = inputs.to(self._device)\n labels = labels.to(self._device)\n\n\n def forward_func():\n correct = 0.0\n total = 0.0\n\n # in evaluation phase is no gradient needed\n if self.phase in [\"train_eval\", \"test\", \"valid\"]:\n with torch.no_grad():\n outputs = self.net(inputs)\n loss = self.loss_function(reduction=reduction)(outputs, labels)\n else:\n outputs = self.net(inputs)\n loss = self.loss_function(reduction=reduction)(outputs, labels)\n\n _, predicted = torch.max(outputs.data, 1)\n total += labels.size(0)\n correct += (predicted == labels).sum().item()\n\n accuracy = correct/total\n\n if add_regularization_if_available:\n regularizer_loss = self.get_regularization_loss()\n else:\n regularizer_loss = torch.tensor(0.0, device=torch.device(self._device))\n\n return loss + regularizer_loss, accuracy\n\n return forward_func\n\n def get_batch_loss_and_accuracy(self,\n reduction='mean',\n add_regularization_if_available=True, get_next_batch=True):\n \"\"\"Gets a new batch and calculates the loss and accuracy (if available)\n on that batch.\n\n Args:\n reduction (str): The reduction that is used for returning the loss. Can be 'mean', 'sum' or 'none' in which \\\n case each indivual loss in the mini-batch is returned as a tensor.\n add_regularization_if_available (bool): If true, regularization is added to the loss.\n\n Returns:\n float/torch.tensor, float: loss and accuracy of the model on the current batch.\n :param get_next_batch:\n \"\"\"\n\n forward_func = self.get_batch_loss_and_accuracy_func(\n reduction=reduction,\n add_regularization_if_available=add_regularization_if_available, get_next_batch=get_next_batch)\n\n return forward_func()\n\n def get_regularization_loss(self):\n \"\"\"Returns the current regularization loss of the network based on the parameter groups.\n\n Returns:\n int or torch.tensor: If no regularzations is applied, it returns the integer 0. Else a torch.tensor \\\n that holds the regularization loss.\n \"\"\"\n # iterate through all layers\n layer_norms = []\n for regularization, parameter_group in self.regularization_groups.items():\n if regularization > 0.0:\n # L2 regularization\n for parameters in parameter_group:\n layer_norms.append(regularization * parameters.pow(2).sum())\n\n regularization_loss = 0.5 * sum(layer_norms)\n return regularization_loss\n\n @abc.abstractmethod\n def get_regularization_groups(self):\n \"\"\"Creates regularization groups for the parameters.\n\n Returns:\n dict: A dictionary where the key is the regularization factor and the value is a list of parameters.\n \"\"\"\n return\n\n @abc.abstractmethod\n # TODO get rid of setup structure by parsing individual loss func, network and dataset\n def set_up(self):\n \"\"\"Sets up the test problem.\n \"\"\"\n pass\n\n\nclass UnregularizedTestproblem(TestProblem):\n\n def __init__(self, batch_size, weight_decay = None):\n super(UnregularizedTestproblem, self).__init__(batch_size, weight_decay)\n\n def get_regularization_groups(self):\n \"\"\"Creates regularization groups for the parameters.\n\n Returns:\n dict: A dictionary where the key is the regularization factor and the value is a list of parameters.\n \"\"\"\n no = 0.0\n group_dict = {no: []}\n\n for parameters_name, parameters in self.net.named_parameters():\n # penalize no parameters\n group_dict[no].append(parameters)\n return group_dict\n\n @abc.abstractmethod\n def set_up(self):\n \"\"\"Sets up the test problem.\n \"\"\"\n pass\n", "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Dec 20 10:07:47 2018\n\n@author: lballes\n\"\"\"\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom ..datasets.quadratic import quadratic\nfrom .testproblem import TestProblem\n\n\nclass _quadratic_base(TestProblem):\n r\"\"\"DeepOBS base class for a stochastic quadratic test problems creating loss\\\n functions of the form\n\n :math:`0.5* (\\theta - x)^T * Q * (\\theta - x)`\n\n with Hessian ``Q`` and \"data\" ``x`` coming from the quadratic data set, i.e.,\n zero-mean normal.\n\n Args:\n batch_size (int): Batch size to use.\n weight_decay (float): No weight decay (L2-regularization) is used in this\n test problem. Defaults to ``None`` and any input here is ignored.\n hessian (np.array): Hessian of the quadratic problem.\n Defaults to the ``100`` dimensional identity.\n\n Attributes:\n dataset: The DeepOBS data set class for the quadratic test problem.\n train_init_op: A tensorflow operation initializing the test problem for the\n training phase.\n train_eval_init_op: A tensorflow operation initializing the test problem for\n evaluating on training data.\n test_init_op: A tensorflow operation initializing the test problem for\n evaluating on test data.\n losses: A tf.Tensor of shape (batch_size, ) containing the per-example loss\n values.\n regularizer: A scalar tf.Tensor containing a regularization term.\n Will always be ``0.0`` since no regularizer is used.\n \"\"\"\n\n def __init__(self, batch_size, weight_decay=None, hessian=np.eye(100)):\n \"\"\"Create a new quadratic test problem instance.\n\n Args:\n batch_size (int): Batch size to use.\n weight_decay (float): No weight decay (L2-regularization) is used in this\n test problem. Defaults to ``None`` and any input here is ignored.\n hessian (np.array): Hessian of the quadratic problem.\n Defaults to the ``100`` dimensional identity.\n \"\"\"\n super(_quadratic_base, self).__init__(batch_size, weight_decay)\n self._hessian = hessian\n if weight_decay is not None:\n print(\n \"WARNING: Weight decay is non-zero but no weight decay is used\",\n \"for this model.\",\n )\n\n def set_up(self):\n \"\"\"Sets up the stochastic quadratic test problem. The parameter ``Theta``\n will be initialized to (a vector of) ``1.0``.\n \"\"\"\n self.dataset = quadratic(self._batch_size)\n self.train_init_op = self.dataset.train_init_op\n self.train_eval_init_op = self.dataset.train_eval_init_op\n self.valid_init_op = self.dataset.valid_init_op\n self.test_init_op = self.dataset.test_init_op\n\n x = self.dataset.batch\n hessian = tf.convert_to_tensor(self._hessian, dtype=tf.float32)\n theta = tf.get_variable(\n \"theta\",\n shape=(1, hessian.shape[0]),\n initializer=tf.constant_initializer(1.0),\n )\n\n self.losses = tf.linalg.tensor_diag_part(\n 0.5\n * tf.matmul(\n tf.subtract(theta, x),\n tf.matmul(hessian, tf.transpose(tf.subtract(theta, x))),\n )\n )\n self.regularizer = tf.losses.get_regularization_loss()\n" ]
[ [ "torch.device", "torch.no_grad", "torch.max" ], [ "tensorflow.convert_to_tensor", "tensorflow.losses.get_regularization_loss", "numpy.eye", "tensorflow.subtract", "tensorflow.constant_initializer" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
sj1104/Het
[ "81b7e9f0f593108db969fc46a1af3df74b825230", "81b7e9f0f593108db969fc46a1af3df74b825230", "81b7e9f0f593108db969fc46a1af3df74b825230", "81b7e9f0f593108db969fc46a1af3df74b825230", "81b7e9f0f593108db969fc46a1af3df74b825230" ]
[ "geometric/autodist/run_autodist.py", "python/athena/gpu_ops/LayerNorm.py", "python/athena/initializers.py", "het_examples/utils/prepare_amazon_data.py", "python/athena/onnx/onnx_opset/Pad.py" ]
[ "from GNN.graph import *\nfrom GNN import distributed\nfrom GNN.distributed.sampler import DistributedGraphSageSampler\n\nfrom athena import gpu_ops as ad\nfrom athena.launcher import launch\n\nimport numpy as np\nimport time, os, sys\nimport argparse\nimport yaml\nfrom tqdm import tqdm\n\nimport tensorflow as tf\nfrom autodist import AutoDist\nfrom autodist.strategy import PS, PSLoadBalancing, PartitionedPS, AllReduce, Parallax\nfrom autodist.strategy.base import Strategy\nfrom autodist.kernel.common.utils import get_op_name\nfrom tensorflow.python.framework import ops\nresource_spec_file = os.path.join(os.path.dirname(__file__), 'spec.yml')\n\n\ndef pop_env():\n for k in ['https_proxy', 'http_proxy']:\n if k in os.environ:\n os.environ.pop(k)\npop_env()\n\n\nclass Parallaxx(PSLoadBalancing, AllReduce):\n \"\"\"\n Generates the Parallax Strategy from https://arxiv.org/pdf/1808.02621.pdf.\n The Parallax strategy mixes Parameter Server and AllReduce. The rationale is that\n a PS architecture is more suitable for sparse gradient updates, while AllReduce\n has reportedly better performance on dense gradient updates.\n \"\"\"\n\n def __init__(self, chunk_size=128, local_proxy_variable=False, sync=True, staleness=0):\n PSLoadBalancing.__init__(self, local_proxy_variable, sync, staleness)\n AllReduce.__init__(self, chunk_size)\n\n # pylint: disable=attribute-defined-outside-init\n def build(self, graph_item, resource_spec):\n \"\"\"Generate the strategy.\"\"\"\n expr = Strategy()\n\n # For each variable, generate variable synchronizer config\n # resouce_spec.gpu_devices = dict_items([('162.105.146.118:GPU:0', <DeviceSpec: 162.105.146.118:GPU:0>), ('162.105.146.118:GPU:1', <DeviceSpec: 162.105.146.118:GPU:1>), ('162.105.146.118:GPU:2', <DeviceSpec: 162.105.146.118:GPU:2>), ('162.105.146.118:GPU:3', <DeviceSpec: 162.105.146.118:GPU:3>), ('162.105.146.118:GPU:4', <DeviceSpec: 162.105.146.118:GPU:4>), ('162.105.146.118:GPU:5', <DeviceSpec: 162.105.146.118:GPU:5>), ('162.105.146.118:GPU:6', <DeviceSpec: 162.105.146.118:GPU:6>), ('162.105.146.118:GPU:7', <DeviceSpec: 162.105.146.118:GPU:7>)]), ('162.105.146.119:GPU:0', <DeviceSpec: 162.105.146.119:GPU:0>), ('162.105.146.119:GPU:1', <DeviceSpec: 162.105.146.119:GPU:1>), ('162.105.146.119:GPU:2', <DeviceSpec: 162.105.146.119:GPU:2>), ('162.105.146.119:GPU:3', <DeviceSpec: 162.105.146.119:GPU:3>), ('162.105.146.119:GPU:4', <DeviceSpec: 162.105.146.119:GPU:4>), ('162.105.146.119:GPU:5', <DeviceSpec: 162.105.146.119:GPU:5>), ('162.105.146.119:GPU:6', <DeviceSpec: 162.105.146.119:GPU:6>), ('162.105.146.119:GPU:7', <DeviceSpec: 162.105.146.119:GPU:7>)])\n gpu_devices = dict()\n for k, v in resource_spec.gpu_devices:\n if '119' not in k:\n gpu_devices[k]=v\n print(resource_spec.gpu_devices)\n #expr.graph_config.replicas.extend([k for k, v in resource_spec.gpu_devices])\n expr.graph_config.replicas.extend([k for k, v in gpu_devices.items()])\n for k, v in resource_spec.node_cpu_devices.items():\n if k not in resource_spec.node_gpu_devices:\n expr.graph_config.replicas.extend(v)\n reduction_device_names = [k for k, _ in resource_spec.cpu_devices if '119' in k]\n self.loads = {ps: 0.0 for ps in reduction_device_names}\n\n # Generate node config\n node_config = []\n for idx, var in enumerate(graph_item.trainable_var_op_to_var.values()):\n var_op_name = get_op_name(var.name)\n grad, _, _ = graph_item.var_op_name_to_grad_info[var_op_name]\n if isinstance(grad, ops.Tensor): # this is a dense variable\n group_id = idx // self.chunk_size\n config = self._gen_all_reduce_node_config(var.name, group=group_id)\n else: # sparse updates\n # For Parallax Strategy, all PS vars are sparse so we don't use a proxy.\n # Sparse variables are likely larger, so keeping copies would be costlier,\n # and usually each device only requires a small part of the overall variable.\n config = self._gen_ps_node_config(\n var,\n False, # For Parallax Strategy, all PS vars are sparse which does not need proxy.\n self._sync,\n self._staleness\n )\n node_config.append(config)\n expr.node_config.extend(node_config)\n\n return expr\n\n\ndef tf_gcn(x, normed_adj, in_features, out_features):\n initializer = tf.keras.initializers.glorot_uniform()\n # weight = tf.Variable(initializer(shape=[in_features, out_features]), dtype = tf.float32)\n rand = np.random.RandomState(seed=123)\n weight = tf.Variable(rand.normal(scale=0.1, size=[in_features, out_features]), dtype = tf.float32)\n bias = tf.Variable(tf.keras.initializers.zeros()([out_features]), dtype = tf.float32)\n x = tf.matmul(x, weight)\n x = x + bias\n x = tf.sparse.matmul(normed_adj, x)\n return x\n\ndef model(normed_adj, sparse_input, y_, train_mask ,cluster=None, task_id=None):\n embedding_size = args.hidden_size\n num_feature = meta[\"feature\"] - 1\n use_ps = cluster is not None\n if use_ps:\n device = tf.device(\"/job:ps/task:0/cpu:0\")\n else:\n device = tf.device(\"/cpu:0\")\n with device:\n Embedding = tf.get_variable(\n name=\"Embedding\",\n dtype=tf.float32,\n trainable=True,\n # pylint: disable=unnecessary-lambda\n shape=(meta[\"idx_max\"], embedding_size),\n initializer=tf.random_normal_initializer(stddev=0.1)\n )\n sparse_input_embedding = tf.nn.embedding_lookup(Embedding, sparse_input)\n if use_ps:\n device = tf.device(tf.train.replica_device_setter(\n worker_device=\"/job:worker/task:%d/gpu:0\" % (task_id),\n cluster=cluster))\n else:\n device = tf.device(\"/gpu:0\")\n global_step = tf.Variable(0, name=\"global_step\", trainable=False)\n with device:\n if use_ps:\n global_step = tf.Variable(0, name=\"global_step\", trainable=False)\n\n x = tf.reshape(sparse_input_embedding, (-1, num_feature * embedding_size))\n x = tf_gcn(x, normed_adj, num_feature * embedding_size, embedding_size)\n x = tf.nn.relu(x)\n y = tf_gcn(x, normed_adj, embedding_size, meta[\"class\"])\n y_ = tf.one_hot(y_, meta[\"class\"])\n loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=y, labels=y_))\n loss = loss * train_mask\n optimizer = tf.train.GradientDescentOptimizer(args.learning_rate)\n train_op = optimizer.minimize(loss, global_step=global_step)\n\n if use_ps:\n return loss, y, train_op, global_step\n else:\n return loss, y, train_op\n\ndef padding(graph, target):\n assert graph.num_nodes <= target\n extra = target - graph.num_nodes\n x = np.concatenate([graph.x, np.tile(graph.x[0], [extra, 1])])\n y = np.concatenate([graph.y, np.repeat(graph.y[0], extra)])\n return Graph(x, y, graph.edge_index, graph.num_classes)\n\ndef prepare_data(ngraph):\n rank = ad.get_worker_communicate().rank()\n nrank = ad.get_worker_communicate().nrank()\n graphs = []\n graphsage_sample_depth = 2\n graphsage_sample_width = 2\n node_upper_bound = args.batch_size * ((graphsage_sample_width ** (graphsage_sample_depth + 1)) - 1)\n print(\"Start Sampling {} graphs\".format(ngraph))\n def transform(result):\n [graph, sample_mask] = result\n train_mask = np.zeros(node_upper_bound)\n train_mask[0:graph.num_nodes] = sample_mask * graph.x[:, -1]\n test_mask = np.zeros(node_upper_bound)\n test_mask[0:graph.num_nodes] = (sample_mask - graph.x[:, -1]) * sample_mask\n graph = padding(graph, node_upper_bound)\n mp_val = mp_matrix(graph, 0, system=\"tensorflow\")\n return graph, mp_val, train_mask, test_mask\n with DistributedGraphSageSampler(args.path, args.batch_size, graphsage_sample_depth, graphsage_sample_width,\n rank=rank, nrank=nrank ,transformer=transform, cache_size_factor=1, reduce_nonlocal_factor=0, num_sample_thread=4) as sampler:\n for i in tqdm(range(ngraph)):\n g_sample, mp_val, train_mask, test_mask = sampler.sample()\n graphs.append([g_sample, mp_val, train_mask, test_mask])\n return graphs\n\ndef train_main(args):\n autodist = AutoDist(resource_spec_file, Parallaxx())\n with open(os.path.join(args.path, \"meta.yml\"), 'rb') as f:\n meta = yaml.load(f.read(), Loader=yaml.FullLoader)\n hidden_layer_size = args.hidden_size\n num_epoch = args.num_epoch\n rank = ad.get_worker_communicate().rank()\n device_id = rank % args.num_local_worker\n nrank = ad.get_worker_communicate().nrank()\n distributed.ps_init(rank, nrank)\n ngraph = meta[\"partition\"][\"nodes\"][rank] // args.batch_size\n graphs = prepare_data(ngraph)\n idx, epoch, nnodes = 0, 0, 0\n graph_len = graphs[0][0].y.shape[0]\n with tf.Graph().as_default() as g, autodist.scope():\n norm_adj = tf.compat.v1.sparse.placeholder(tf.float32, name=\"norm_adj\")\n sparse_feature = tf.placeholder(tf.int32, [graph_len, meta[\"feature\"] - 1])\n y_ = tf.placeholder(tf.int32, [graph_len], name=\"y_\")\n train_mask = tf.placeholder(tf.float32, [graph_len], name=\"train_mask\")\n loss, y, train_op = model(norm_adj, sparse_feature, y_, train_mask)\n sess = autodist.create_distributed_session()\n\n acc_stat = []\n start = time.time()\n while True:\n g_sample, mp_val, mask, mask_eval = graphs[idx]\n idx = (idx + 1) % ngraph\n feed_dict = {\n norm_adj : mp_val,\n sparse_feature : g_sample.x[:, 0:-1],\n y_ : g_sample.y,\n train_mask : mask\n }\n print(\"Before training\")\n loss_val = sess.run([loss, y, y_, train_op], feed_dict=feed_dict)\n print(loss_val)\n pred_val = loss_val[1]\n true_val = loss_val[2]\n acc_val = np.equal(np.argmax(pred_val, 1), true_val).astype(np.float)\n acc_stat.append(acc_val)\n nnodes += mask.sum() + mask_eval.sum()\n if nnodes > meta[\"partition\"][\"nodes\"][rank]:\n nnodes = 0\n epoch += 1\n print(\"Acc : \", np.mean(acc_stat), \"Time : \", time.time() - start)\n start = time.time()\n acc_stat = []\n if epoch >= num_epoch:\n break\n\nif __name__ =='__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument(\"config\")\n parser.add_argument(\"--path\", \"-p\", required=True)\n parser.add_argument(\"--num_epoch\", default=300, type=int)\n parser.add_argument(\"--hidden_size\", default=128, type=int)\n parser.add_argument(\"--learning_rate\", default=1, type=float)\n parser.add_argument(\"--batch_size\", default=128, type=int)\n args = parser.parse_args()\n with open(os.path.join(args.path, \"meta.yml\"), 'rb') as f:\n meta = yaml.load(f.read(), Loader=yaml.FullLoader)\n launch(train_main, args)\n", "from __future__ import absolute_import\nfrom .Node import Op\nimport numpy as np\nfrom .. import ndarray\nfrom ..gpu_links import layer_normalization\nfrom ..gpu_links import layer_normalization_gradient\n\nclass Layer_NormalizationOp(Op):\n def __init__(self, node_in, ln_scale, ln_bias, eps=0.01, ctx=None):\n super().__init__(Layer_NormalizationOp, [node_in, ln_scale, ln_bias], ctx)\n self.eps = eps\n self.save_mean = None\n self.save_var = None\n self.data_shape = None\n \n def compute(self, input_vals, output_val, stream_handle=None):\n local_shape = list(input_vals[0].shape)\n local_shape[-1] = 1\n local_shape = tuple(local_shape)\n if self.on_cpu:\n input_vals = [n.asnumpy() for n in input_vals]\n data_type = input_vals[0].dtype\n if self.data_shape is None:\n self.save_mean = np.empty(local_shape, dtype=np.flota32)\n self.save_var = np.empty(local_shape, dtype=np.flota32)\n self.data_shape = local_shape\n elif self.data_shape != local_shape:\n del self.save_mean\n del self.save_var\n self.save_mean = np.empty(local_shape, dtype=np.flota32)\n self.save_var = np.empty(local_shape, dtype=np.flota32)\n self.data_shape = local_shape\n self.save_mean[:] = input_vals[0].mean(axis=-1, dtype=data_type, keepdims=True)\n self.save_var[:] = input_vals[0].var(axis=-1, dtype=data_type, keepdims=True)\n std = np.sqrt(self.save_var + self.eps, dtype=data_type)\n centered_input = input_vals[0] - self.save_mean\n normed_input = centered_input / std\n\n bc_shape = [1] * len(input_vals[0].shape)\n bc_shape[-1] = input_vals[0].shape[-1]\n\n output_val[:] = input_vals[1].reshape(bc_shape) * normed_input + \\\n input_vals[2].reshape(bc_shape)\n\n else:\n if self.data_shape is None:\n dev_id = input_vals[0].handle.contents.ctx.device_id\n self.save_mean = ndarray.empty(local_shape, ctx=ndarray.gpu(dev_id))\n self.save_var = ndarray.empty(local_shape, ctx=ndarray.gpu(dev_id))\n self.data_shape = local_shape\n elif self.data_shape != local_shape:\n del self.save_mean\n del self.save_var\n dev_id = input_vals[0].handle.contents.ctx.device_id\n self.save_mean = ndarray.empty(local_shape, ctx=ndarray.gpu(dev_id))\n self.save_var = ndarray.empty(local_shape, ctx=ndarray.gpu(dev_id))\n self.data_shape = local_shape\n layer_normalization(input_vals[0], input_vals[1], input_vals[2], \\\n self.save_mean, self.save_var, output_val, self.eps, stream_handle)\n\n def gradient(self, output_grad):\n ln_gradient_node = layer_normalization_gradient_op(\n output_grad, self.inputs[0], self.inputs[1], self, self.eps, ctx=output_grad.ctx)\n data_gradient = layer_normalization_gradient_of_data_op(\n ln_gradient_node, self.inputs[0], ctx=output_grad.ctx)\n scale_gradient = layer_normalization_gradient_of_scale_op(\n ln_gradient_node, self.inputs[1], ctx=output_grad.ctx)\n bias_gradient = layer_normalization_gradient_of_bias_op(\n ln_gradient_node, self.inputs[2], ctx=output_grad.ctx)\n return [data_gradient, scale_gradient, bias_gradient]\n\n def infer_shape(self, input_shapes):\n assert len(input_shapes) == 3\n assert len(input_shapes[1]) == len(input_shapes[2]) == 1\n assert input_shapes[0][-1] == input_shapes[1][0] == input_shapes[2][0]\n return input_shapes[0]\n\n\nclass Layer_Normalization_GradientOp(Op):\n def __init__(self, out_gradient, in_node, ln_scale, forward_node, eps, ctx=None):\n super().__init__(Layer_Normalization_GradientOp, [out_gradient, in_node, ln_scale], ctx)\n self.tmp_gradient_in_arr = None\n self.tmp_gradient_ln_bias = None\n self.tmp_gradient_ln_scale = None\n self.data_shape = None\n self.forward_node = forward_node\n self.eps = eps\n\n def compute(self, input_vals, output_val, stream_handle=None):\n if self.on_cpu:\n if self.tmp_gradient_ln_bias is None:\n shapeln = input_vals[2].shape\n self.data_shape = tuple(input_vals[0].shape)\n self.tmp_gradient_ln_scale = np.empty(shape=shapeln, dtype=np.float32)\n self.tmp_gradient_ln_bias = np.empty(shape=shapeln, dtype=np.float32)\n self.tmp_gradient_in_arr = np.empty(shape=self.data_shape, dtype=np.float32)\n elif self.data_shape != tuple(input_vals[0].shape):\n self.data_shape = tuple(input_vals[0].shape)\n del self.tmp_gradient_in_arr\n self.tmp_gradient_in_arr = np.empty(shape=self.data_shape, dtype=np.float32)\n \n red_axis = tuple(range(input_vals[0].ndim-1))\n self.tmp_gradient_ln_bias[:] = input_vals[0].sum(red_axis) # (X,)\n \n std = np.sqrt(self.forward_node.save_var + self.eps) # (N, 1)\n x_centered = input_vals[1] - self.forward_node.save_mean # (N, X)\n x_norm = x_centered / std # (N, X)\n self.tmp_gradient_ln_scale[:] = (input_vals[0] * x_norm).sum(red_axis) # (X,)\n\n last_dim = input_vals[1].shape[-1]\n dx_norm = input_vals[0] * input_vals[2].reshape([1] * (input_vals[0].ndim - 1) + [-1]) # (N, X)\n dvar = (dx_norm * x_centered).sum(axis=-1, keepdims=True) * -0.5 / (self.forward_node.save_var + self.eps) / std # (N, 1)\n dx_mu_1 = dx_norm / std # (N, X)\n dx_mu_2 = dvar * 2 * x_centered / last_dim # (N, X)\n dx_1 = dx_mu_1 + dx_mu_2 # (N, X)\n dx_2 = -1 * dx_1.sum(axis=-1, keepdims=True) / last_dim # (N, 1)\n self.tmp_gradient_in_arr[:] = dx_1 + dx_2 # (N, X)\n else:\n if self.tmp_gradient_ln_bias is None:\n shapeln = input_vals[2].shape\n self.data_shape = tuple(input_vals[0].shape)\n self.tmp_gradient_ln_bias = ndarray.empty(\n shape=shapeln, ctx=input_vals[0].ctx)\n self.tmp_gradient_ln_scale = ndarray.empty(\n shape=shapeln, ctx=input_vals[0].ctx)\n self.tmp_gradient_in_arr = ndarray.empty(\n shape=self.data_shape, ctx=input_vals[0].ctx)\n elif self.data_shape != tuple(input_vals[0].shape):\n self.data_shape = tuple(input_vals[0].shape)\n del self.tmp_gradient_in_arr\n self.tmp_gradient_in_arr = ndarray.empty(\n shape=self.data_shape, ctx=input_vals[0].ctx)\n\n layer_normalization_gradient(input_vals[0], input_vals[1], input_vals[2],\n self.tmp_gradient_in_arr, self.tmp_gradient_ln_scale,\n self.tmp_gradient_ln_bias, self.forward_node.save_mean,\n self.forward_node.save_var, self.eps, stream_handle)\n\n def gradient(self, output_grad):\n raise NotImplementedError\n\n def infer_shape(self, input_shapes):\n return (1,)\n\n\nclass Layer_Normalization_Gradient_of_DataOp(Op):\n def __init__(self, ln_gradient, in_arr, ctx=None):\n super().__init__(Layer_Normalization_Gradient_of_DataOp, [ln_gradient, in_arr], ctx)\n\n def compute(self, input_vals, output_val, stream_handle=None):\n if self.on_cpu:\n output_val[:] = self.inputs[0].tmp_gradient_in_arr\n else:\n self.inputs[0].tmp_gradient_in_arr.copyto(output_val)\n\n def gradient(self, output_grad):\n raise NotImplementedError\n\n def infer_shape(self, input_shapes):\n return input_shapes[1]\n\n\nclass Layer_Normalization_Gradient_of_ScaleOp(Op):\n def __init__(self, ln_gradient, in_scale, ctx=None):\n super().__init__(Layer_Normalization_Gradient_of_ScaleOp, [ln_gradient, in_scale], ctx)\n\n def compute(self, input_vals, output_val, stream_handle=None):\n if self.on_cpu:\n output_val[:] = self.inputs[0].tmp_gradient_ln_scale\n else:\n self.inputs[0].tmp_gradient_ln_scale.copyto(output_val)\n\n def gradient(self, output_grad):\n raise NotImplementedError\n\n def infer_shape(self, input_shapes):\n return input_shapes[1]\n\n\nclass Layer_Normalization_Gradient_of_BiasOp(Op):\n def __init__(self, ln_gradient, in_bias, ctx=None):\n super().__init__(Layer_Normalization_Gradient_of_BiasOp, [ln_gradient, in_bias], ctx)\n\n def compute(self, input_vals, output_val, stream_handle=None):\n if self.on_cpu:\n output_val[:] = self.inputs[0].tmp_gradient_ln_bias\n else:\n self.inputs[0].tmp_gradient_ln_bias.copyto(output_val)\n\n def gradient(self, output_grad):\n raise NotImplementedError\n\n def infer_shape(self, input_shapes):\n return input_shapes[1]\n\n\ndef layer_normalization_op(node_in, ln_scale, ln_bias, eps=0.01, ctx=None):\n \"\"\"Layer normalization node.\n\n Parameters:\n ----\n node_in : Node\n Input data.\n ln_scale : float\n scaling parameter\n ln_bias :\n learnable bias parameter\n eps : float\n Epsilon value for numerical stability.\n\n Returns:\n ----\n A new Node instance created by Op.\n\n \"\"\"\n return Layer_NormalizationOp(node_in, ln_scale, ln_bias, eps, ctx=ctx)\n\n\ndef layer_normalization_gradient_op(out_gradient, in_node, ln_scale, forward_node, eps, ctx=None):\n \"\"\"Gradient node of layer normalization.\n\n Parameters:\n ----\n out_gradient :\n The gradient array.\n in_node : Node\n Input node of ln layer.\n ln_scale :\n Scaling parameter.\n\n Returns:\n ----\n A new Node instance created by Op.\n\n \"\"\"\n return Layer_Normalization_GradientOp(out_gradient, in_node, ln_scale, forward_node, eps, ctx=ctx)\n\n\ndef layer_normalization_gradient_of_data_op(ln_gradient, in_arr, ctx=None):\n \"\"\"Gradient node of data of layer normalization.\n\n Parameters:\n ----\n ln_gradient :\n The gradient array.\n in_arr : Node\n Input array of ln layer.\n\n Returns:\n ----\n A new Node instance created by Op.\n\n \"\"\"\n return Layer_Normalization_Gradient_of_DataOp(ln_gradient, in_arr, ctx=ctx)\n\n\ndef layer_normalization_gradient_of_scale_op(ln_gradient, in_scale, ctx=None):\n \"\"\"Gradient node of scale parameter of layer normalization.\n\n Parameters:\n ----\n ln_gradient :\n The gradient array.\n in_scale :\n Scaling parameter of ln layer.\n\n Returns:\n ----\n A new Node instance created by Op.\n\n \"\"\"\n return Layer_Normalization_Gradient_of_ScaleOp(ln_gradient, in_scale, ctx=ctx)\n\n\ndef layer_normalization_gradient_of_bias_op(ln_gradient, in_bias, ctx=None):\n \"\"\"Gradient node of bias parameter of layer normalization.\n\n Parameters:\n ----\n ln_gradient :\n The gradient array.\n in_bias :\n Bias parameter of ln layer.\n\n Returns:\n ----\n A new Node instance created by Op.\n\n \"\"\"\n return Layer_Normalization_Gradient_of_BiasOp(ln_gradient, in_bias, ctx=ctx)\n", "from athena import gpu_ops as ad\nfrom athena import cpu_links as cpu_op\nfrom athena import gpu_links as gpu_op\nfrom athena import ndarray\nimport numpy as np\nimport ctypes\n\n\nclass BaseInit(object):\n def __init__(self, shape):\n self.shape = tuple(shape)\n\n def __call__(self, node, seed, np_rand=None, stream=None):\n self.node = node\n self.seed = seed + node.id\n node.tensor_value = ndarray.empty(self.shape, ctx=node.ctx)\n if ndarray.is_gpu_ctx(node.ctx):\n self.init_on_gpu(stream)\n else:\n self.init_on_cpu(np_rand)\n\n def init_on_gpu(self, stream):\n raise NotImplementedError\n\n def init_on_cpu(self, np_rand):\n raise NotImplementedError\n\n def init_on_ps(self, param_type):\n # param types: Dense 0, Sparse 1, CacheSparse 2\n if param_type == 0:\n self.length = ctypes.c_int(np.prod(self.shape))\n self.width = ctypes.c_int(1)\n else:\n assert len(self.shape) == 2\n self.length = ctypes.c_int(self.shape[0])\n self.width = ctypes.c_int(self.shape[1])\n\n\nclass ConstantInit(BaseInit):\n def __init__(self, constant, shape):\n super().__init__(shape)\n self.constant = constant\n\n def init_on_gpu(self, stream):\n gpu_op.array_set(self.node.tensor_value, self.constant, stream)\n\n def init_on_cpu(self, np_rand):\n from ._base import DNNL_LIB\n if DNNL_LIB['cpu_ArraySet']:\n cpu_op.array_set(self.node.tensor_value, self.constant)\n else:\n self.node.tensor_value[:] = np.full(self.shape, self.constant).astype(np.float32)\n\n def init_on_ps(self, comm, nid, param_type):\n super().init_on_ps(param_type)\n init_type = ctypes.c_int(0)\n comm.InitTensor(nid, ctypes.c_int(param_type), self.length, self.width, \\\n init_type, ctypes.c_double(self.constant), ctypes.c_double(1.0))\n\n\nclass ZerosInit(ConstantInit):\n def __init__(self, shape):\n super().__init__(0.0, shape)\n\n\nclass OnesInit(ConstantInit):\n def __init__(self, shape):\n super().__init__(1.0, shape)\n\n\nclass UniformInit(BaseInit):\n def __init__(self, low, high, shape):\n super().__init__(shape)\n self.low = low\n self.high = high\n\n def init_on_gpu(self, stream):\n gpu_op.uniform_init(self.node.tensor_value, self.low, self.high, self.seed, stream)\n\n def init_on_cpu(self, np_rand):\n from ._base import DNNL_LIB\n if DNNL_LIB['cpu_UniformInit']:\n cpu_op.uniform_init(self.node.tensor_value, self.low, self.high, self.seed)\n else:\n self.node.tensor_value[:] = np_rand.uniform(low=self.low, high=self.high, size=self.shape).astype(np.float32)\n\n def init_on_ps(self, comm, nid, param_type):\n super().init_on_ps(param_type)\n init_type = ctypes.c_int(1)\n comm.InitTensor(nid, ctypes.c_int(param_type), self.length, self.width, \\\n init_type, ctypes.c_double(self.low), ctypes.c_double(self.high))\n\n\nclass GeneralizedXavierUniformInit(UniformInit):\n def __init__(self, gain, mode, shape):\n assert mode in ('fan_in', 'fan_out', 'avg'), 'Mode %s not valid.' % mode\n assert gain > 0, 'Gain value %s not valid.' % str(gain)\n assert len(shape) >= 2, 'Generalized xavier requires shape to be at least 2D.'\n hw_scale = 1 if len(shape) == 2 else np.prod(shape[2:])\n fan_in = hw_scale * shape[1]\n fan_out = hw_scale * shape[0]\n if mode == 'fan_in':\n factor = fan_in\n elif mode == 'fan_out':\n factor = fan_out\n else:\n factor = (fan_in + fan_out) / 2.0\n limit = np.sqrt(gain / factor)\n super().__init__(-limit, limit, shape)\n\n\nclass XavierUniformInit(GeneralizedXavierUniformInit):\n def __init__(self, shape):\n super().__init__(3.0, 'avg', shape)\n\n\nclass HeUniformInit(GeneralizedXavierUniformInit):\n def __init__(self, shape):\n super().__init__(6.0, 'fan_in', shape)\n\n\nclass LecunUniformInit(GeneralizedXavierUniformInit):\n def __init__(self, shape):\n super().__init__(3.0, 'fan_in', shape)\n\n\nclass NormalInit(BaseInit):\n def __init__(self, mean, stddev, shape):\n super().__init__(shape)\n self.mean = mean\n self.stddev = stddev\n\n def init_on_gpu(self, stream):\n gpu_op.normal_init(self.node.tensor_value, self.mean, self.stddev, self.seed, stream)\n\n def init_on_cpu(self, np_rand):\n from ._base import DNNL_LIB\n if DNNL_LIB['cpu_NormalInit']:\n cpu_op.normal_init(self.node.tensor_value, self.mean, self.stddev, self.seed)\n else:\n self.node.tensor_value[:] = np_rand.normal(loc=self.mean, scale=self.stddev, size=self.shape).astype(np.float32)\n\n def init_on_ps(self, comm, nid, param_type):\n super().init_on_ps(param_type)\n init_type = ctypes.c_int(2)\n comm.InitTensor(nid, ctypes.c_int(param_type), self.length, self.width, \\\n init_type, ctypes.c_double(self.mean), ctypes.c_double(self.stddev))\n\n\nclass GeneralizedXavierNormalInit(NormalInit):\n def __init__(self, gain, mode, shape):\n assert mode in ('fan_in', 'fan_out', 'avg'), 'Mode not allowed.'\n assert gain > 0, 'Gain value not allowed.'\n assert len(shape) >= 2, 'Generalized xavier requires shape to be at least 2D.'\n hw_scale = 1 if len(shape) == 2 else np.prod(shape[2:])\n fan_in = hw_scale * shape[1]\n fan_out = hw_scale * shape[0]\n if mode == 'fan_in':\n factor = fan_in\n elif mode == 'fan_out':\n factor = fan_out\n else:\n factor = (fan_in + fan_out) / 2.0\n scale = np.sqrt(gain / factor)\n super().__init__(0, scale, shape)\n\n\nclass XavierNormalInit(GeneralizedXavierNormalInit):\n def __init__(self, shape):\n super().__init__(1.0, 'avg', shape)\n\n\nclass HeNormalInit(GeneralizedXavierNormalInit):\n def __init__(self, shape):\n super().__init__(2.0, 'fan_in', shape)\n\n\nclass LecunNormalInit(GeneralizedXavierNormalInit):\n def __init__(self, shape):\n super().__init__(1.0, 'fan_in', shape)\n\n\nclass TruncatedNormalInit(BaseInit):\n def __init__(self, mean, stddev, shape):\n super().__init__(shape)\n self.mean = mean\n self.stddev = stddev\n\n def init_on_gpu(self, stream):\n gpu_op.truncated_normal_init(self.node.tensor_value, self.mean, self.stddev, self.seed, stream)\n\n def init_on_cpu(self, np_rand):\n from ._base import DNNL_LIB\n if DNNL_LIB['cpu_TruncatedNormalInit']:\n cpu_op.truncated_normal_init(self.node.tensor_value, self.mean, self.stddev, self.seed)\n else:\n # this function cannot use np_rand\n from scipy.stats import truncnorm\n self.node.tensor_value[:] = truncnorm(\n -2.0, 2.0, loc=self.mean, scale=self.stddev).rvs(self.shape).astype(np.float32)\n\n def init_on_ps(self, comm, nid):\n super().init_on_ps(param_type)\n init_type = ctypes.c_int(3)\n comm.InitTensor(nid, ctypes.c_int(param_type), self.length, self.width, \\\n init_type, ctypes.c_double(self.mean), ctypes.c_double(self.stddev))\n\n\n\n### here we provide easy APIs\n\n\ndef zeros(shape, name=None, trainable=True, ctx=None):\n if name is None:\n name = 'zeros_initializer'\n init = ZerosInit(shape)\n return ad.Variable(name=name, initializer=init, trainable=trainable, ctx=ctx)\n\ndef ones(shape, name=None, trainable=True, ctx=None):\n if name is None:\n name = 'ones_initializer'\n init = OnesInit(shape)\n return ad.Variable(name=name, initializer=init, trainable=trainable, ctx=ctx)\n\ndef constant(shape, fill_value=0.0, name=None, trainable=True, ctx=None):\n if name is None:\n name = 'constant_initializer'\n init = ConstantInit(fill_value, shape)\n return ad.Variable(name=name, initializer=init, trainable=trainable, ctx=ctx)\n\ndef truncated_normal(shape, mean=0.0, stddev=1.0, name=None, trainable=True, ctx=None):\n if name is None:\n name = 'truncated_normal_initializer'\n init = TruncatedNormalInit(mean, stddev, shape)\n return ad.Variable(name=name, initializer=init, trainable=trainable, ctx=ctx)\n\ndef random_normal(shape, mean=0.0, stddev=1.0, name=None, trainable=True, ctx=None):\n if name is None:\n name = 'random_normal_initializer'\n init = NormalInit(mean, stddev, shape)\n return ad.Variable(name=name, initializer=init, trainable=trainable, ctx=ctx)\n\ndef random_uniform(shape, minval=-1.0, maxval=1.0, name=None, trainable=True, ctx=None):\n if name is None:\n name = 'random_uniform_initializer'\n init = UniformInit(minval, maxval, shape)\n return ad.Variable(name=name, initializer=init, trainable=trainable, ctx=ctx)\n\ndef xavier_normal(shape, name=None, trainable=True, ctx=None):\n if name is None:\n name = 'xavier_normal_initializer'\n init = XavierNormalInit(shape)\n return ad.Variable(name=name, initializer=init, trainable=trainable, ctx=ctx)\n\ndef xavier_uniform(shape, name=None, trainable=True, ctx=None):\n if name is None:\n name = 'xavier_uniform_initializer'\n init = XavierUniformInit(shape)\n return ad.Variable(name=name, initializer=init, trainable=trainable, ctx=ctx)\n\ndef he_normal(shape, name=None, trainable=True, ctx=None):\n if name is None:\n name = 'he_normal_initializer'\n init = HeNormalInit(shape)\n return ad.Variable(name=name, initializer=init, trainable=trainable, ctx=ctx)\n\ndef he_uniform(shape, name=None, trainable=True, ctx=None):\n if name is None:\n name = 'he_uniform_initializer'\n init = HeUniformInit(shape)\n return ad.Variable(name=name, initializer=init, trainable=trainable, ctx=ctx)\n\ndef lecun_normal(shape, name=None, trainable=True, ctx=None):\n if name is None:\n name = 'lecun_normal_initializer'\n init = LecunNormalInit(shape)\n return ad.Variable(name=name, initializer=init, trainable=trainable, ctx=ctx)\n\ndef lecun_uniform(shape, name=None, trainable=True, ctx=None):\n if name is None:\n name = 'lecun_uniform_initializer'\n init = LecunUniformInit(shape)\n return ad.Variable(name=name, initializer=init, trainable=trainable, ctx=ctx)\n", "import json, ast\nimport numpy as np\nimport nltk\n# all products with metadata\nfilemap = {'train': './amazon-3M_train_map.txt',\n 'test': './amazon-3M_test_map.txt',\n 'bow' : './amazon-3M_feature_map.txt',\n 'meta': './metadata.json',\n 'output': './graph.npz',\n 'output_sparse' : './sparsefeature.npy'}\n\ndef getBagofWord():\n bow = dict()\n with open(filemap['bow'], 'r') as f:\n # start with 1, 0 for padding\n word_cnt = 1\n for line in f.read().strip().split():\n bow[line] = word_cnt\n word_cnt += 1\n return bow\n\ndef gettoken(descriptions, length):\n bow = getBagofWord()\n token_matrix = []\n for desc in descriptions:\n token_id = []\n token = nltk.word_tokenize(desc.lower())\n for word in token:\n if word in bow:\n token_id.append(bow[word])\n if len(token_id) == length:\n break\n while len(token_id) < length:\n token_id.append(0)\n token_matrix.append(token_id)\n return np.array(token_matrix)\n\nprod_all = dict()\nprod_rcd = dict()\nwith open(filemap['meta'], 'r') as f:\n for line in f:\n prod = ast.literal_eval(line.strip().replace('\\n', '\\\\n'))\n asin = prod['asin']\n prod_all[asin] = prod\n if 'related' in prod and 'categories' in prod and 'description' in prod:\n prod_rcd[asin] = prod\n\ntestNodes = set()\nprod_gcn = dict()\nasin2id = dict()\ncnt_id = 0\nasinlist = []\n\nfor kword in ['train', 'test']:\n with open(filemap[kword], 'r') as f:\n for line in f:\n asin = line.split()[0]\n if asin in prod_rcd:\n if kword == 'test':\n testNodes.add(asin)\n prod_gcn[asin] = prod_rcd[asin]\n asin2id[asin] = cnt_id\n cnt_id += 1\n asinlist.append(asin)\n\ngraphlen = len(prod_gcn)\nprint('#products with rel/cat/des/feat (GCN assumptions)', graphlen)\nprint('#trainNodes:', graphlen-len(testNodes), 'testNodes:', len(testNodes))\n\nprint(len(asin2id))\n\ncat2id = dict()\ncnt_id = 0\n\nclass_map = np.zeros(graphlen).astype(np.int32)\ntrain_map = np.zeros(graphlen).astype(np.int32)\ndescriptions = []\nfor idx, asin in enumerate(asinlist):\n prod = prod_gcn[asin]\n isTest = True if asin in testNodes else False\n\n cat = prod['categories'][0][0]\n if cat not in cat2id:\n cat2id[cat] = (cnt_id, 0, 0)\n cnt_id += 1\n\n if isTest:\n cat2id[cat] = (cat2id[cat][0], cat2id[cat][1], cat2id[cat][2]+1)\n else:\n cat2id[cat] = (cat2id[cat][0], cat2id[cat][1]+1, cat2id[cat][2])\n\n class_map[idx] = cat2id[cat][0]\n train_map[idx] = 0 if isTest else 1\n if \"title\" in prod:\n descriptions.append(prod[\"title\"] + \" \" + prod['description'])\n else:\n descriptions.append(prod['description'])\n\nprint('Classes:', cat2id)\nprint(\"Num Classes:\", len(cat2id))\n\nlinks_set = set()\nfor idx, asin in enumerate(asinlist):\n for rel, neighbors in prod_gcn[asin]['related'].items():\n for asin_nei in neighbors:\n if asin_nei not in asin2id: continue\n idx_nei = asin2id[asin_nei]\n lk = (idx, idx_nei) if idx_nei > idx else (idx_nei, idx)\n if lk not in links_set:\n links_set.add(lk)\nlinks = np.array(list(links_set))\nprint('#links between products:', len(links))\ntoken_matrix = gettoken(descriptions, 16)\nnp.savez(file=filemap['output'] ,y=class_map, train_map=train_map, edge=links)\nnp.save(file=filemap['output_sparse'], arr=token_matrix)\n", "from __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\n\nimport numpy as np\nfrom onnx import onnx_pb\nfrom athena.onnx import constants, util,graph\nfrom athena.onnx.handler import athena_op\nfrom athena.onnx.onnx_opset import general\n\n@athena_op([\"PadOp\"],onnx_op=[\"Pad\"])\nclass Pad:\n @classmethod\n def version_2(cls,ctx,node,**kwargs):\n pads=node.get_attr_value('paddings',None)\n assert pads is not None\n node.set_attr('pads',pads)\n\n support_modes=['constant','reflect','edge']\n mode=node.get_attr_value('mode','constant').lower()\n assert mode in support_modes\n node.set_attr('mode',mode)\n\n @classmethod\n def version_11(cls,ctx,node,**kwargs):\n pads=node.get_attr_value('paddings',None)\n assert pads is not None\n paddings=np.array(pads).astype(np.int64)\n paddings_node=ctx.make_const(util.make_name(node.name),paddings)\n node.input_tensor_names = node.input_tensor_names + paddings_node.output_tensor_names\n\n support_modes=['constant','reflect','edge']\n mode=node.get_attr_value('mode','constant').lower()\n assert mode in support_modes\n node.set_attr('mode',mode)\n\n constant_value =node.get_attr_value('constant_values',None)\n constant_value=np.array([constant_value]).astype(np.float32)\n constant_value_node=ctx.make_const(util.make_name(node.name),constant_value,)\n node.input_tensor_names=node.input_tensor_names+constant_value_node.output_tensor_names" ]
[ [ "tensorflow.device", "tensorflow.nn.softmax_cross_entropy_with_logits", "tensorflow.compat.v1.sparse.placeholder", "numpy.mean", "tensorflow.Graph", "tensorflow.Variable", "numpy.argmax", "tensorflow.sparse.matmul", "numpy.repeat", "tensorflow.random_normal_initializer", "numpy.zeros", "tensorflow.matmul", "tensorflow.placeholder", "tensorflow.train.GradientDescentOptimizer", "tensorflow.one_hot", "tensorflow.keras.initializers.glorot_uniform", "numpy.random.RandomState", "tensorflow.nn.embedding_lookup", "tensorflow.nn.relu", "tensorflow.reshape", "numpy.tile", "tensorflow.keras.initializers.zeros", "tensorflow.train.replica_device_setter" ], [ "numpy.sqrt", "numpy.empty" ], [ "scipy.stats.truncnorm", "numpy.sqrt", "numpy.prod", "numpy.full" ], [ "numpy.array", "numpy.savez", "numpy.zeros", "numpy.save" ], [ "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
dezounet/datadez
[ "fa14fc8bc7e1a50cc9cdaa90d11da0e57bf80b73" ]
[ "datadez/vectorize.py" ]
[ "from __future__ import unicode_literals, print_function\n\nimport operator\n\nimport numpy as np\nimport pandas as pd\n\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.preprocessing import LabelBinarizer\nfrom sklearn.preprocessing import MultiLabelBinarizer\n\n\ndef _vectorize(vectorizer, series):\n vectorizer.fit(series)\n\n # Vectorize the input\n vector = vectorizer.transform(series)\n\n try:\n vector = vector.todense()\n vector = list(np.squeeze(vector))\n vector = list(map(lambda x: np.array(x)[0], vector))\n except AttributeError:\n pass\n\n # Get vocabulary, ordered by id\n if hasattr(vectorizer, 'vocabulary_'):\n vocabulary = sorted(vectorizer.vocabulary_.items(), key=operator.itemgetter(1))\n vocabulary = [word[0] for word in vocabulary]\n elif hasattr(vectorizer, 'classes_'):\n vocabulary = vectorizer.classes_\n else:\n raise ValueError(\"Wrong type of vectorizer given! Excepting one with attribute 'vocabulary_' or 'classes_'\")\n\n # Encapsulate new columns inside a meta column, and put each word to its own column\n new_columns = pd.DataFrame(vector)\n new_columns.columns = pd.Series(vocabulary)\n\n return new_columns\n\n\ndef vectorize_text(series, min_df=1, max_df=1.0, binary=False):\n \"\"\"\n Vectorize a text column.\n\n Tokenization of the input and vectorization is done\n through a CountVectorizer.\n\n :param series: series to vectorize\n :param min_df: float in range [0.0, 1.0] or int, default=1\n :param max_df: float in range [0.0, 1.0] or int, default=1.0\n :param binary: If True, all non zero counts are set to 1, else to count.\n\n :return: vectorized series as a dataframe, vectorizer\n \"\"\"\n vectorizer = CountVectorizer(min_df=min_df, max_df=max_df, binary=binary)\n dataframe = _vectorize(vectorizer, series)\n\n return dataframe, vectorizer\n\n\ndef vectorize_mono_label(series):\n \"\"\"\n Vectorize a mono-label column.\n\n :param series: series to vectorize\n :return: vectorized series as a dataframe, vectorizer\n \"\"\"\n vectorizer = LabelBinarizer()\n dataframe = _vectorize(vectorizer, series)\n\n return dataframe, vectorizer\n\n\ndef vectorize_multi_label(series):\n \"\"\"\n Vectorize a multi-label column.\n\n :param series: series to vectorize\n :return: vectorized series as a dataframe, vectorizer\n \"\"\"\n vectorizer = MultiLabelBinarizer()\n dataframe = _vectorize(vectorizer, series)\n\n return dataframe, vectorizer\n" ]
[ [ "pandas.Series", "numpy.squeeze", "sklearn.preprocessing.MultiLabelBinarizer", "pandas.DataFrame", "sklearn.feature_extraction.text.CountVectorizer", "sklearn.preprocessing.LabelBinarizer", "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.3", "0.19", "1.1", "1.5", "0.24", "0.20", "1.0", "0.25", "1.2" ], "scipy": [], "tensorflow": [] } ]
codesutras/pandas-ta
[ "78598df9cfd2f165553262e85ae4c0392598d48c", "78598df9cfd2f165553262e85ae4c0392598d48c" ]
[ "pandas_ta/utils/_time.py", "tests/test_indicator_candle.py" ]
[ "# -*- coding: utf-8 -*-\nfrom datetime import datetime\nfrom time import localtime, perf_counter\nfrom typing import Tuple\n\nfrom pandas import DataFrame, Timestamp\n\nfrom pandas_ta import EXCHANGE_TZ, RATE\nfrom pandas_ta.utils import verify_series\n\n\ndef df_dates(df: DataFrame, dates: Tuple[str, list] = None) -> DataFrame:\n \"\"\"Yields the DataFrame with the given dates\"\"\"\n if dates is None: return None\n if not isinstance(dates, list):\n dates = [dates]\n return df[df.index.isin(dates)]\n\n\ndef df_month_to_date(df: DataFrame) -> DataFrame:\n \"\"\"Yields the Month-to-Date (MTD) DataFrame\"\"\"\n return df[df.index >= Timestamp.now().strftime(\"%Y-%m-01\")]\n\n\ndef df_quarter_to_date(df: DataFrame) -> DataFrame:\n \"\"\"Yields the Quarter-to-Date (QTD) DataFrame\"\"\"\n now = Timestamp.now()\n for m in [1, 4, 7, 10]:\n if now.month <= m:\n return df[df.index >= datetime(now.year, m, 1).strftime(\"%Y-%m-01\")]\n return df[df.index >= now.strftime(\"%Y-%m-01\")]\n\n\ndef df_year_to_date(df: DataFrame) -> DataFrame:\n \"\"\"Yields the Year-to-Date (YTD) DataFrame\"\"\"\n return df[df.index >= Timestamp.now().strftime(\"%Y-01-01\")]\n\n\ndef final_time(stime: float) -> str:\n \"\"\"Human readable elapsed time. Calculates the final time elasped since\n stime and returns a string with microseconds and seconds.\"\"\"\n time_diff = perf_counter() - stime\n return f\"{time_diff * 1000:2.4f} ms ({time_diff:2.4f} s)\"\n\n\ndef get_time(exchange: str = \"NYSE\", full:bool = True, to_string:bool = False) -> Tuple[None, str]:\n \"\"\"Returns Current Time, Day of the Year and Percentage, and the current\n time of the selected Exchange.\"\"\"\n tz = EXCHANGE_TZ[\"NYSE\"] # Default is NYSE (Eastern Time Zone)\n if isinstance(exchange, str):\n exchange = exchange.upper()\n tz = EXCHANGE_TZ[exchange]\n\n # today = Timestamp.utcnow()\n today = Timestamp.now()\n date = f\"{today.day_name()} {today.month_name()} {today.day}, {today.year}\"\n\n _today = today.timetuple()\n exchange_time = f\"{(_today.tm_hour + tz) % 24}:{_today.tm_min:02d}:{_today.tm_sec:02d}\"\n\n if full:\n lt = localtime()\n local_ = f\"Local: {lt.tm_hour}:{lt.tm_min:02d}:{lt.tm_sec:02d} {lt.tm_zone}\"\n doy = f\"Day {today.dayofyear}/365 ({100 * round(today.dayofyear/365, 2):.2f}%)\"\n exchange_ = f\"{exchange}: {exchange_time}\"\n\n s = f\"{date}, {exchange_}, {local_}, {doy}\"\n else:\n s = f\"{date}, {exchange}: {exchange_time}\"\n\n return s if to_string else print(s)\n\n\ndef total_time(df: DataFrame, tf: str = \"years\") -> float:\n \"\"\"Calculates the total time of a DataFrame. Difference of the Last and\n First index. Options: 'months', 'weeks', 'days', 'hours', 'minutes'\n and 'seconds'. Default: 'years'.\n Useful for annualization.\"\"\"\n time_diff = df.index[-1] - df.index[0]\n TimeFrame = {\n \"years\": time_diff.days / RATE[\"TRADING_DAYS_PER_YEAR\"],\n \"months\": time_diff.days / 30.417,\n \"weeks\": time_diff.days / 7,\n \"days\": time_diff.days,\n \"hours\": time_diff.days * 24,\n \"minutes\": time_diff.total_seconds() / 60,\n \"seconds\": time_diff.total_seconds()\n }\n\n if isinstance(tf, str) and tf in TimeFrame.keys():\n return TimeFrame[tf]\n return TimeFrame[\"years\"]\n\n\ndef to_utc(df: DataFrame) -> DataFrame:\n \"\"\"Either localizes the DataFrame Index to UTC or it applies\n tz_convert to set the Index to UTC.\n \"\"\"\n if not df.empty:\n try:\n df.index = df.index.tz_localize(\"UTC\")\n except TypeError:\n df.index = df.index.tz_convert(\"UTC\")\n return df\n\n\n# Aliases\nmtd_df = df_month_to_date\nqtd_df = df_quarter_to_date\nytd_df = df_year_to_date", "from .config import error_analysis, sample_data, CORRELATION, CORRELATION_THRESHOLD, VERBOSE\nfrom .context import pandas_ta\n\nfrom unittest import TestCase, skip\nimport pandas.testing as pdt\nfrom pandas import DataFrame, Series\n\nimport talib as tal\n\n\nclass TestCandle(TestCase):\n @classmethod\n def setUpClass(cls):\n cls.data = sample_data\n cls.data.columns = cls.data.columns.str.lower()\n cls.open = cls.data[\"open\"]\n cls.high = cls.data[\"high\"]\n cls.low = cls.data[\"low\"]\n cls.close = cls.data[\"close\"]\n if \"volume\" in cls.data.columns:\n cls.volume = cls.data[\"volume\"]\n\n @classmethod\n def tearDownClass(cls):\n del cls.open\n del cls.high\n del cls.low\n del cls.close\n if hasattr(cls, \"volume\"):\n del cls.volume\n del cls.data\n\n def setUp(self): pass\n def tearDown(self): pass\n\n\n def test_ha(self):\n result = pandas_ta.ha(self.open, self.high, self.low, self.close)\n self.assertIsInstance(result, DataFrame)\n self.assertEqual(result.name, \"Heikin-Ashi\")\n\n def test_cdl_pattern(self):\n result = pandas_ta.cdl_pattern(self.open, self.high, self.low, self.close, name=\"all\")\n self.assertIsInstance(result, DataFrame)\n self.assertEqual(len(result.columns), len(pandas_ta.CDL_PATTERN_NAMES))\n\n result = pandas_ta.cdl_pattern(self.open, self.high, self.low, self.close, name=\"doji\")\n self.assertIsInstance(result, DataFrame)\n\n result = pandas_ta.cdl_pattern(self.open, self.high, self.low, self.close, name=[\"doji\", \"inside\"])\n self.assertIsInstance(result, DataFrame)\n\n def test_cdl_doji(self):\n result = pandas_ta.cdl_doji(self.open, self.high, self.low, self.close)\n self.assertIsInstance(result, Series)\n self.assertEqual(result.name, \"CDL_DOJI_10_0.1\")\n\n try:\n expected = tal.CDLDOJI(self.open, self.high, self.low, self.close)\n pdt.assert_series_equal(result, expected, check_names=False)\n except AssertionError as ae:\n try:\n corr = pandas_ta.utils.df_error_analysis(result, expected, col=CORRELATION)\n self.assertGreater(corr, CORRELATION_THRESHOLD)\n except Exception as ex:\n error_analysis(result, CORRELATION, ex)\n\n def test_cdl_inside(self):\n result = pandas_ta.cdl_inside(self.open, self.high, self.low, self.close)\n self.assertIsInstance(result, Series)\n self.assertEqual(result.name, \"CDL_INSIDE\")\n\n result = pandas_ta.cdl_inside(self.open, self.high, self.low, self.close, asbool=True)\n self.assertIsInstance(result, Series)\n self.assertEqual(result.name, \"CDL_INSIDE\")\n\n def test_cdl_z(self):\n result = pandas_ta.cdl_z(self.open, self.high, self.low, self.close)\n self.assertIsInstance(result, DataFrame)\n self.assertEqual(result.name, \"CDL_Z_30_1\")" ]
[ [ "pandas.Timestamp.now" ], [ "pandas.testing.assert_series_equal" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [ "0.23", "0.21", "2.0", "1.4", "1.1", "1.5", "1.2", "0.24", "0.20", "1.0", "0.25", "1.3" ], "scipy": [], "tensorflow": [] } ]
gabrielelanaro/scrapegoat
[ "6dd95d12c241943ca26bcfb261168a2dd84c9a48" ]
[ "scratchpad/pulearning.py" ]
[ "# Basically we have found the following facts:\n# Making edge prediction only doesn't work, there are just too many possibilities, therefore we need to\n# first do node classification, and then we need to tdo edge classification.\n#\n# And how do we do node classification?\n# Two approaches: one of them would involve first performing a regular classification with the tagging tool\n# and this is probably a good method anyway.\n#\n# Another approach can do everything together and it is as follows:\n#\n# 1. use the features to classify what can a \"node\" be. You need to extract the number of candidates. and this could be probably\n# an hyperparameter of the range of 100 or whatever your capacity is. This is a PU labeling and can be done using a PUBagging classifier\n# strategy.\n#\n# 2. use the candidates obtained to do link prediction. Again this is a PU learning problem and can be done using PUBagging or other methods.\n\nfrom scrapegoat.store import Store\nfrom scrapegoat.learn.link_extractor_pipeline import (\n suggest_new_links,\n LinkType,\n PULabelType,\n)\nimport json\n\nstore = Store(\"immoscout/store\")\npage = store.get_page(\n \"https://www.immobilienscout24.de/Suche/de/berlin/berlin/wohnung-mieten?numberofrooms=2.0-&price=-1300.0&enteredFrom=one_step_search\"\n)\n\ncandidates = page.get_candidates()\nlinks = json.load((page._path / \"linkLabels.json\").open())\n\n\ncandidates_by_id = {c.path: c for c in candidates}\n\n\nfor link in links:\n source = candidates_by_id[link[\"source\"]]\n target = candidates_by_id[link[\"target\"]]\n\n print(source.text, \"->\", target.text)\n\nlinks_typed = [\n LinkType(\n source=link[\"source\"], target=link[\"target\"], value=PULabelType(link[\"value\"])\n )\n for link in links\n]\n\nnl = suggest_new_links(candidates, links_typed)\n\nprint(\"SUggetsted links\")\nfor l in nl:\n print(candidates_by_id[l.source].text, \"->\", candidates_by_id[l.target].text)\n\n\n1 / 0\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.svm import LinearSVC\nfrom scrapegoat.transforms import CandidateTransform\nfrom scrapegoat.bagging_pu import BaggingClassifierPU\nimport random\nimport numpy as np\n\n\ndef train_graph(candidates, link_labels):\n true_edges = {(l[\"source\"], l[\"target\"]) for l in link_labels}\n print(\"training\")\n positive = []\n unlabeled = []\n for source in candidates:\n for target in candidates:\n if (source.path, target.path) in true_edges:\n positive.append((source, target))\n else:\n unlabeled.append((source, target))\n\n # Let's do a naive approach, treat a sample as negative\n # negatives = random.choices(unlabeled, k=len(link_labels) * 10)\n negatives = unlabeled\n tf = CandidateTransform()\n\n X = []\n y = []\n\n for (source, target) in positive:\n X.append(encode_pair(tf.encode(source), tf.encode(target)))\n y.append(1.0)\n\n for (source, target) in negatives:\n X.append(encode_pair(tf.encode(source), tf.encode(target)))\n y.append(0.0)\n\n model = RandomForestClassifier()\n\n mdl = model.fit(X, y)\n proba = mdl.predict_proba(X[: len(positive)])\n\n return model, proba[:, 1].min(), proba[:, 1].max()\n\n\ndef predict_graph(candidates, model, min_thr, max_thr):\n tf = CandidateTransform()\n\n features = [tf.encode(candidate) for candidate in candidates]\n positive_links = []\n print(\"predicting\")\n for i, source in enumerate(candidates):\n X = []\n for j, target in enumerate(candidates):\n X.append(encode_pair(features[i], features[j]))\n\n pred = model.predict_proba(X)[:, 1]\n\n for i, p in enumerate(pred):\n target = candidates[i]\n if p >= max_thr:\n print(\"p=\", p, source.text[:100], \"->\", target.text[:100])\n positive_links.append({\"source\": source.path, \"target\": target.path})\n\n return positive_links\n\n\ndef _dedup_links(source, target):\n pass\n\n\nlinks = links\n\nfor n in range(10):\n print(\"Step\", n)\n print(\"---\")\n\n reduced_list = train_binary(candidates, links)\n model, min_thr, max_thr = train_graph(reduced_list, links)\n\n new_links = predict_graph(reduced_list, model, min_thr, max_thr)\n\n links.extend(new_links)\n\n" ]
[ [ "sklearn.ensemble.RandomForestClassifier" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
sciapp/pyMolDyn
[ "fba6ea91cb185f916b930cd25b4b1d28a22fb4c5" ]
[ "src/core/calculation/gyrationtensor.py" ]
[ "# coding: utf-8\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport math\nimport numpy as np\nimport numpy.linalg as la\n\n\ndef calculate_gyration_tensor_parameters(points):\n \"\"\"\n Calculates the gyration tensor parameters R_g^2, η, c, κ from a list of\n all points inside a cavity.\n - R_g^2 is the squared gyration radius\n - η is the asphericity\n - c is the acylindricity\n - κ is the anisotropy\n \"\"\"\n\n points = np.array(points, dtype=np.float)\n mean = np.mean(points, axis=0)\n points -= mean\n\n gyration_tensor = np.zeros((3, 3))\n for i in range(3):\n for j in range(i, 3):\n gyration_tensor[i, j] = np.dot(points[:, i], points[:, j])\n gyration_tensor[j, i] = gyration_tensor[i, j]\n # cell volume is constant, cavity volume is proportional to len(points)\n gyration_tensor /= len(points)\n\n eigvals = list(sorted(la.eigvalsh(gyration_tensor), reverse=True))\n\n squared_gyration_radius = sum(eigvals)\n if squared_gyration_radius > 0:\n asphericity = (eigvals[0] - 0.5 * (eigvals[1] + eigvals[2])) / squared_gyration_radius\n acylindricity = (eigvals[1]-eigvals[2]) / squared_gyration_radius\n anisotropy = (asphericity**2 + 0.75 * acylindricity**2)**0.5\n else:\n asphericity = 0\n acylindricity = 0\n anisotropy = 0\n return mean, squared_gyration_radius, asphericity, acylindricity, anisotropy\n\n\n# Test code:\n\n\ndef generate_box_points(offset, side_length, n):\n return generate_cuboid_points(offset, (side_length, side_length, side_length), n)\n\n\ndef generate_cuboid_points(offset, side_lengths, n):\n offset = np.array(offset)\n interval = 0.5 * max(side_lengths) * np.linspace(-1, 1, n)\n points = []\n for x in interval:\n if abs(x) > 0.5 * side_lengths[0]:\n continue\n for y in interval:\n if abs(y) > 0.5 * side_lengths[1]:\n continue\n for z in interval:\n if abs(z) > 0.5 * side_lengths[2]:\n continue\n points.append((x, y, z) + offset)\n return points\n\n\ndef generate_sphere_points(offset, radius, n):\n offset = np.array(offset)\n interval = radius * np.linspace(-1, 1, n)\n points = []\n for x in interval:\n for y in interval:\n for z in interval:\n if la.norm((x, y, z)) <= radius:\n points.append((x, y, z) + offset)\n return points\n\n\ndef generate_cylinder_points(offset, radius, length, n):\n offset = np.array(offset)\n interval = max(radius, length/2) * np.linspace(-1, 1, n)\n points = []\n for x in interval:\n for y in interval:\n for z in interval:\n if abs(z) < length/2 and la.norm((x, y)) <= radius:\n points.append((x, y, z) + offset)\n return points\n\n\ndef main():\n silly_offset = (-2, 17.3, 42)\n print('box (a=1): ', calculate_gyration_tensor_parameters(generate_box_points(silly_offset, 1, 100)))\n print('box (a=2): ', calculate_gyration_tensor_parameters(generate_box_points(silly_offset, 2, 100)))\n print('cuboid (a=1, b=2, c=1): ', calculate_gyration_tensor_parameters(generate_cuboid_points(silly_offset, (1, 2, 1), 100)))\n print('cuboid (a=1, b=20, c=1): ', calculate_gyration_tensor_parameters(generate_cuboid_points(silly_offset, (1, 20, 1), 100)))\n print('sphere (r=1): ', calculate_gyration_tensor_parameters(generate_sphere_points(silly_offset, 1, 100)))\n print('sphere (r=2): ', calculate_gyration_tensor_parameters(generate_sphere_points(silly_offset, 2, 100)))\n print('cylinder (r=1, l=1): ', calculate_gyration_tensor_parameters(generate_cylinder_points(silly_offset, 1, 1, 100)))\n print('cylinder (r=1, l=20): ', calculate_gyration_tensor_parameters(generate_cylinder_points(silly_offset, 1, 20, 100)))\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "numpy.dot", "numpy.linspace", "numpy.linalg.norm", "numpy.mean", "numpy.linalg.eigvalsh", "numpy.array", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
rdues/traccovid
[ "41e63b21e20071bdae9ca06a52ee54acb2001ae3" ]
[ "tracdash/app/elasticsearch.py" ]
[ "\"\"\"\nConnects to the ElasticSearch database.\nGenerates aggregations to be returned to the dashboard.\n\"\"\"\n\nfrom scipy.stats.distributions import chi2\n\nimport os, math, re, itertools, json\nfrom copy import deepcopy\nfrom elasticsearch import Elasticsearch\nfrom pandas import json_normalize\nfrom pprint import pprint\nimport pandas as pd\nimport numpy as np\nimport diskcache\nfrom hashlib import md5\n\nfrom ..exceptions import SearchException\nfrom ..stopwords import SEED_STOPWORDS, STOPWORDS_EN\nfrom ..unicodetokeniser.util import contains_digit\nfrom .util import info, exception\n\n\n# update accordingly\nSTART_DATE = \"2020-01-01\"\nEND_DATE = \"2021-04-30\"\nMIN_TIMESTAMP = \"2020-01-01T00:00:00\"\n\nAGG_SIZE = 1000\nRETURN_SIZE = 100\nMIN_THRESHOLD_PROPORTION = 1.0/1000000\t\t# 1 in 1 million\nMIN_THRESHOLD_HARD = 10\n\n# reference corpus collected mid-2018 via the Twitter Sample Stream (English only)\nREFERENCE_TYPE_LIST = os.path.join(os.path.dirname(__file__), '..', 'data', 'twitter_reference_types.txt')\nREFERENCE_CORPUS_SIZE = 49307264\nREFERENCE_MIN_PROPORTION = 1.0/1000000\nKEYNESS_SM_SMOOTHING = 100.0/1000000.0\nKEYNESS_SM_THRESHOLD = 1.1\nCANDIDATE_KEYWORDS_SIZE = 100000\nCANDIDATE_NGRAMS_SIZE = 100000\n\n\nRT_FILTER = {\n \"script\": {\n \"script\": {\n \"source\": \"(!doc['is_retweet'].value && !doc['is_reply'].value && !doc['is_quote'].value) || (doc['is_retweet'].value && params['rt']) || (doc['is_quote'].value && params['qt']) || (doc['is_reply'].value && params['re'])\",\n \"lang\": \"painless\",\n \"params\": {\n \"rt\": False,\n \"qt\": False,\n \"re\": False\n }\n }\n }\n}\n\n\nAGG_LIST = {\n \"size\": 0,\n \"query\": {\n \"bool\": {\n \"must\": [],\n \"filter\": [\n { \"range\": { \"timestamp\": { \"gte\": MIN_TIMESTAMP } } },\n RT_FILTER\n ]\n }\n },\n \"aggs\": {\n \"total\": {\n \"value_count\": { \"field\": \"tweet_id\" }\n },\n \"counts\": {\n \"terms\": {\n \"field\": \"FIELD\",\n \"size\": AGG_SIZE,\n \"min_doc_count\": MIN_THRESHOLD_HARD\n }\n }\n }\n}\n\n\nDAY_TOTALS = {\n \"size\": 0,\n \"query\": {\n \"bool\": {\n \"filter\": [\n { \"range\": { \"timestamp\": { \"gte\": MIN_TIMESTAMP } } },\n RT_FILTER\n ]\n }\n },\n \"aggs\": {\n \"counts\": {\n \"date_histogram\": {\n \"field\": \"timestamp\",\n \"calendar_interval\": \"day\"\n }\n }\n }\n}\n\n\nDAY_TOTALS_SHARING = {\n \"size\": 0,\n \"query\": {\n \"bool\": {\n \"filter\": [\n { \"range\": { \"timestamp\": { \"gte\": MIN_TIMESTAMP } } }\n ]\n }\n }, \n \"aggs\": {\n \"counts\": {\n \"date_histogram\": {\n \"field\": \"timestamp\",\n \"calendar_interval\": \"day\"\n }\n },\n \"rt_counts\": {\n \"filter\": { \"term\": { \"is_retweet\": True } },\n \"aggs\": {\n \"dates\": {\n \"date_histogram\": {\n \"field\": \"timestamp\",\n \"calendar_interval\": \"day\"\n }\n }\n }\n },\n \"qt_counts\": {\n \"filter\": { \"term\": { \"is_quote\": True } },\n \"aggs\": {\n \"dates\": {\n \"date_histogram\": {\n \"field\": \"timestamp\",\n \"calendar_interval\": \"day\"\n }\n }\n }\n },\n \"re_counts\": {\n \"filter\": { \"term\": { \"is_reply\": True } },\n \"aggs\": {\n \"dates\": {\n \"date_histogram\": {\n \"field\": \"timestamp\",\n \"calendar_interval\": \"day\"\n }\n }\n }\n }\n }\n}\n\n\nSEARCH_DAYS = {\n \"size\": 0,\n \"query\": {\n \"bool\": {\n \"must\": [],\n \"filter\": [\n { \"range\": { \"timestamp\": { \"gte\": MIN_TIMESTAMP } } },\n RT_FILTER\n ]\n }\n },\n \"aggs\": {\n \"counts\": {\n \"date_histogram\": {\n \"field\": \"timestamp\",\n \"calendar_interval\": \"day\"\n }\n }\n }\n}\n\n\nTERM_RT = {\n \"size\": 0,\n \"query\": {\n \"bool\": {\n \"must\": [],\n \"filter\": [\n { \"range\": { \"timestamp\": { \"gte\": MIN_TIMESTAMP } } },\n RT_FILTER\n ]\n }\n },\n \"aggs\": {\n \"rt_counts\": {\n \"terms\": {\n \"field\": \"is_retweet\",\n \"size\": 2\n }\n },\n \"qt_counts\": {\n \"terms\": {\n \"field\": \"is_quote\",\n \"size\": 2\n }\n },\n \"re_counts\": {\n \"terms\": {\n \"field\": \"is_reply\",\n \"size\": 2\n }\n }\n }\n}\n\n\nTERM_TYPES = {\n \"size\": 0,\n \"query\": {\n \"bool\": {\n \"must\": [],\n \"filter\": [\n { \"range\": { \"timestamp\": { \"gte\": MIN_TIMESTAMP } } },\n RT_FILTER\n ]\n }\n },\n \"aggs\": {\n \t\"total\": {\n \"value_count\": { \"field\": \"tweet_id\" }\n },\n \"counts\": {\n \"terms\": {\n \"field\": \"types\",\n \"size\": AGG_SIZE,\n \"min_doc_count\": MIN_THRESHOLD_HARD\n }\n }\n }\n}\n\n\nTERM_BIGRAMS = {\n \"size\": 0,\n \"query\": {\n \"bool\": {\n \"must\": [],\n \"filter\": [\n { \"range\": { \"timestamp\": { \"gte\": MIN_TIMESTAMP } } },\n RT_FILTER\n ]\n }\n },\n \"aggs\": {\n \"total\": {\n \"value_count\": { \"field\": \"tweet_id\" }\n },\n \"counts\": {\n \"terms\": {\n \"field\": \"bi_grams\",\n \"size\": CANDIDATE_NGRAMS_SIZE,\n \"min_doc_count\": MIN_THRESHOLD_HARD,\n }\n }\n }\n}\n\n\nTERM_TRIGRAMS = {\n \"size\": 0,\n \"query\": {\n \"bool\": {\n \"must\": [],\n \"filter\": [\n { \"range\": { \"timestamp\": { \"gte\": MIN_TIMESTAMP } } },\n RT_FILTER\n ]\n }\n },\n \"aggs\": {\n \"total\": {\n \"value_count\": { \"field\": \"tweet_id\" }\n },\n \"counts\": {\n \"terms\": {\n \"field\": \"tri_grams\",\n \"size\": CANDIDATE_NGRAMS_SIZE,\n \"min_doc_count\": MIN_THRESHOLD_HARD\n }\n }\n }\n}\n\n\nTERM_HASHTAGS = {\n \"size\": 0,\n \"query\": {\n \"bool\": {\n \"must\": [],\n \"filter\": [\n { \"range\": { \"timestamp\": { \"gte\": MIN_TIMESTAMP } } },\n RT_FILTER\n ]\n }\n },\n \"aggs\": {\n \t\"total\": {\n \"value_count\": { \"field\": \"tweet_id\" }\n },\n \"counts\": {\n \"terms\": {\n \"field\": \"hashtags\",\n \"size\": AGG_SIZE,\n \"min_doc_count\": MIN_THRESHOLD_HARD\n }\n }\n }\n}\n\n\nTERM_WEBSITES = {\n \"size\": 0,\n \"query\": {\n \"bool\": {\n \"must\": [],\n \"filter\": [\n { \"range\": { \"timestamp\": { \"gte\": MIN_TIMESTAMP } } },\n RT_FILTER\n ]\n }\n },\n \"aggs\": {\n \t\"total\": {\n \"value_count\": { \"field\": \"tweet_id\" }\n },\n \"counts\": {\n \"terms\": {\n \"field\": \"websites\",\n \"size\": AGG_SIZE,\n \"min_doc_count\": MIN_THRESHOLD_HARD\n }\n }\n }\n}\n\n\nTERM_URLS = {\n \"size\": 0,\n \"query\": {\n \"bool\": {\n \"must\": [],\n \"filter\": [\n { \"range\": { \"timestamp\": { \"gte\": MIN_TIMESTAMP } } },\n RT_FILTER\n ]\n }\n },\n \"aggs\": {\n \t\"total\": {\n \"value_count\": { \"field\": \"tweet_id\" }\n },\n \"counts\": {\n \"terms\": {\n \"field\": \"urls\",\n \"size\": AGG_SIZE,\n \"min_doc_count\": MIN_THRESHOLD_HARD\n }\n }\n }\n}\n\n\nTERM_USERS = {\n \"size\": 0,\n \"query\": {\n \"bool\": {\n \"must\": [],\n \"filter\": [\n { \"range\": { \"timestamp\": { \"gte\": MIN_TIMESTAMP } } },\n RT_FILTER\n ]\n }\n },\n \"aggs\": {\n \t\"total\": {\n \"value_count\": { \"field\": \"tweet_id\" }\n },\n \"users\": {\n \"cardinality\": { \"field\": \"username\" }\n },\n \"counts\": {\n \"terms\": {\n \"field\": \"username\",\n \"size\": 1000\n # no min threshold as we need to show many proportions to avoid the results being misleading\n }\n }\n }\n}\n\n\nCORPUS_SIZE = {\n \"query\": {\n \"bool\": {\n \"must\": [\n ],\n \"filter\": [\n { \"range\": { \"timestamp\": { \"gte\": MIN_TIMESTAMP } } },\n RT_FILTER\n ]\n }\n }\n}\n\n\nCORPUS_SIZE_RT = {\n \"size\": 0,\n \"query\": {\n \"bool\": {\n \"filter\": { \"range\": { \"timestamp\": { \"gte\": MIN_TIMESTAMP } } }\n }\n },\n \"aggs\": {\n \"counts\": {\n \"composite\": {\n \"sources\": [\n { \"is_retweet\": { \"terms\": { \"field\": \"is_retweet\" } } },\n { \"is_reply\": { \"terms\": { \"field\": \"is_reply\" } } },\n { \"is_quote\": { \"terms\": { \"field\": \"is_quote\" } } }\n ]\n }\n }\n }\n}\n\n\nCORPUS_USERS_COUNT = {\n \"query\": {\n \"bool\": {\n \"must\": [\n ],\n \"filter\": [\n { \"range\": { \"timestamp\": { \"gte\": MIN_TIMESTAMP } } },\n RT_FILTER\n ]\n }\n },\n \"aggs\": {\n \"users\": {\n \"cardinality\": {\n \t\"field\": \"username\",\n \t\"precision_threshold\": 40000\t# maximum\n }\n }\n }\n}\n\n\n\nclass ESHelper:\n\tdef __init__(self, es_instance, index_name, start_date=START_DATE, end_date=END_DATE, cache_path=None):\n\t\tself.es = es_instance\n\t\tself.index_name = index_name\n\t\tself.start_date = start_date\n\t\tself.end_date = end_date\n\t\tself.data_version = self.index_name[-2:]\n\t\t\n\t\tself.cache = None\n\t\tif cache_path is not None:\n\t\t\tself.init_cache(cache_path)\n\t\t\n\t\tself.init_stats()\n\t\tself.init_reference_types()\n\t\n\t\n\tdef init_cache(self, cache_path):\n\t\tself.cache_path = os.path.join(cache_path, self.index_name)\n\t\tself.cache = diskcache.Cache(self.cache_path)\n\t\tself.cache.reset('cull_limit', 0)\n\t\tinfo(\"Using Elastic Search cache at {}\".format(cache_path))\n\t\n\t\n\tdef init_stats(self):\n\t\tself.max_total_documents = 0\n\t\tself.min_total_documents = 0\n\t\tself.min_threshold = MIN_THRESHOLD_HARD\n\t\tself.total_users = 0\n\n\t\ttry:\n\t\t\tagg = deepcopy(CORPUS_SIZE_RT)\n\t\t\tres = self.search(agg)\n\t\t\tdf = json_normalize(res['aggregations']['counts']['buckets'])\n\t\t\tdf = df.rename(columns={\n\t\t\t\t'key.is_retweet': 'rt',\n\t\t\t\t'key.is_reply': 're',\n\t\t\t\t'key.is_quote': 'qt'\n\t\t\t})\n\t\t\t\n\t\t\tself.max_total_documents = df['doc_count'].sum()\n\t\t\tself.min_total_documents = df.query('rt == False and re == False and qt == False')['doc_count'].sum()\n\t\t\tself.min_threshold = max(MIN_THRESHOLD_HARD, int(self.min_total_documents * MIN_THRESHOLD_PROPORTION))\n\t\t\t\n\t\t\tinfo(\"Total documents: {} - {} Min threshold: {}\".format(self.min_total_documents, self.max_total_documents, self.min_threshold))\n\t\t\t\n\t\t\tself.total_users = self.corpus_users_count()\n\t\t\t\n\t\t\tinfo(\"Total users estimate: {}\".format(self.total_users))\n\t\t\t\n\t\texcept Exception as e:\n\t\t\tmessage = \"rt totals & users aggregations failed\"\n\t\t\texception(message, e)\n\t\t\traise SearchException(message)\n\t\n\t\n\tdef init_reference_types(self):\n\t\tself.reference_types = {}\n\t\twith open(REFERENCE_TYPE_LIST, 'r', encoding='utf-8') as f:\n\t\t\tfor line in f:\n\t\t\t\tline = line.strip()\n\t\t\t\tif line:\n\t\t\t\t\tdoc_prop, type = line.split('\\t')\n\t\t\t\t\tself.reference_types[type] = float(doc_prop)\n\t\tinfo(\"Reference types loaded: {} types\".format( len(self.reference_types) ))\n\n\t\n\tdef date_list(self, str_format=\"%Y-%m-%dT00:00:00.000Z\"):\n\t\tdates = []\n\t\tfor d in pd.date_range(start=self.start_date, end=self.end_date):\n\t\t\tdates.append(d.strftime(str_format))\n\t\treturn dates\n\n\n\tdef keyness(self, type, doc_freq, corp_size, n_tests):\n\t\ta_rel = doc_freq / corp_size\n\t\tb_rel = self.reference_types.get(type, 0.0)\n\t\t\n\t\tif a_rel > b_rel:\n\t\t\t# simple maths\n\t\t\tsm = (a_rel + KEYNESS_SM_SMOOTHING) / (b_rel + KEYNESS_SM_SMOOTHING)\n\t\t\t\n\t\t\tif sm >= KEYNESS_SM_THRESHOLD:\n\t\t\t\treturn sm\n\t\t\n\t\treturn 0.0\n\t\n\t\n\tdef search(self, query):\n\t\tself.set_min_threshold(query)\n\t\t\n\t\tres = None\n\t\tif self.cache is not None:\n\t\t\tres = self.cache.get(query, None)\n\t\tif res is None:\n\t\t\tres = self.es.search(index=self.index_name, body=query)\n\t\t\tif self.cache is not None:\n\t\t\t\tself.cache[query] = res\n\t\treturn res\n\t\n\t\n\tdef count(self, query):\n\t\tres = None\n\t\tif self.cache is not None:\n\t\t\tres = self.cache.get(query, None)\n\t\tif res is None:\n\t\t\tres = self.es.count(index=self.index_name, body=query)\n\t\t\tif self.cache is not None:\n\t\t\t\tself.cache[query] = res\n\t\treturn res\n\t\n\t\n\tdef add_rt_filter(self, agg, include_rt=False, include_qt=False, include_re=False):\n\t\tfor filter in agg['query']['bool']['filter']:\n\t\t\tif 'script' in filter:\n\t\t\t\tfilter['script']['script']['params']['rt'] = include_rt\n\t\t\t\tfilter['script']['script']['params']['qt'] = include_qt\n\t\t\t\tfilter['script']['script']['params']['re'] = include_re\n\t\n\t\n\tdef add_wildcard_filter(self, agg, field, search):\n\t\tif search is not None and search != '':\n\t\t\tinclude = re.sub(r'\\W+', '.+', search)\n\t\t\tquery = re.sub(r'\\W+', '*', search)\n\t\t\tagg['query']['bool']['must'].append({ \"wildcard\": { field: \"*\" + query + \"*\" } })\n\t\t\tagg['aggs']['counts']['terms']['include'] = \".*\" + include + \".*\"\n\t\n\t\n\tdef add_term_filter(self, agg, term):\n\t\tif term is not None and term != '':\n\t\t\tsearch_term, field = self.search_term_and_field(term)\n\t\t\tagg['query']['bool']['must'].append( { \"term\": { field: search_term } } )\n\t\n\t\n\tdef add_date_range(self, agg, date_range):\n\t\tif date_range and len(date_range) == 2:\n\t\t\tif date_range[0] is None:\n\t\t\t\tdate_range[0] = self.start_date\n\t\t\tif date_range[1] is None:\n\t\t\t\tdate_range[1] = self.end_date\n\t\t\t\n\t\t\tfor filter in agg['query']['bool']['filter']:\n\t\t\t\tif 'range' in filter:\n\t\t\t\t\tfilter['range']['timestamp'] = {\n\t\t\t\t\t\t'gte': date_range[0] + 'T00:00:00',\n\t\t\t\t\t\t'lte': date_range[1] + 'T23:59:59'\n\t\t\t\t\t}\n\t\n\t\n\tdef set_min_threshold(self, agg):\n\t\tif 'aggs' in agg:\n\t\t\tif 'counts' in agg['aggs']:\n\t\t\t\tif 'terms' in agg['aggs']['counts']:\n\t\t\t\t\tif 'min_doc_count' in agg['aggs']['counts']['terms']:\n\t\t\t\t\t\tagg['aggs']['counts']['terms']['min_doc_count'] = self.min_threshold\n\t\n\t\n\tdef search_term_and_field(self, term):\n\t\tfield = \"types\"\n\t\tsearch_term = term\n\t\tif term.endswith(\"/\"):\n\t\t\tfield = \"websites\"\n\t\t\tsearch_term = term[:-1]\n\t\telif term.startswith(\"#\"):\n\t\t\tfield = \"hashtags\"\n\t\t\tsearch_term = term[1:]\n\t\treturn search_term, field\n\t\n\t\n\tdef filter_keys(self, df, stopwords=set(), seeds=set(), terms=set()):\n\t\tterm_stopwords = set([self.search_term_and_field(t)[0] for t in terms])\n\t\tfilter = stopwords | seeds | term_stopwords\n\t\tdf['key'] = df['key'].apply(lambda x: None if x in filter or contains_digit(x) else x)\n\t\tdf = df.dropna(subset=['key'])\n\t\treturn df\n\t\n\t\n\tdef filter_urls(self, df):\n\t\tdf['key'] = df['key'].apply(lambda x: None if 'twitter.com/i/web/status' in x else x)\n\t\tdf = df.dropna(subset=['key'])\n\t\treturn df\n\t\n\t\n\tdef get_aggregation(self, agg, field):\n\t\tagg['aggs']['counts']['terms']['field'] = field\n\t\n\t\ttry:\n\t\t\tres = self.search(agg)\n\t\t\tdf = json_normalize(res['aggregations']['counts']['buckets'])\n\t\t\ttotal = res['aggregations']['total']['value']\n\t\t\treturn df, total\n\t\texcept Exception as e:\n\t\t\tmessage = \"aggregation failed on {}\".format(field)\n\t\t\texception(message, e)\n\t\t\traise SearchException(message)\n\t\n\t\n\tdef corpus_size(self, include_rt=False, include_qt=False, include_re=False, date_range=None):\n\t\tagg = deepcopy(CORPUS_SIZE)\n\t\tself.add_rt_filter(agg, include_rt, include_qt, include_re)\n\t\tif date_range:\n\t\t\tself.add_date_range(agg, date_range)\n\t\n\t\ttry:\n\t\t\tres = self.count(agg)\n\t\t\t\n\t\t\treturn int(res['count'])\n\t\texcept Exception as e:\n\t\t\tmessage = \"corpus size count failed\"\n\t\t\texception(message, e)\n\t\t\traise SearchException(message)\n\t\n\t\n\tdef corpus_users_count(self, include_rt=False, include_qt=False, include_re=False, date_range=None):\n\t\tagg = deepcopy(CORPUS_USERS_COUNT)\n\t\tself.add_rt_filter(agg, include_rt, include_qt, include_re)\n\t\tif date_range:\n\t\t\tself.add_date_range(agg, date_range)\n\t\n\t\ttry:\n\t\t\tres = self.search(agg)\n\t\t\t\n\t\t\treturn int(res['aggregations']['users']['value'])\n\t\texcept Exception as e:\n\t\t\tmessage = \"corpus users count failed\"\n\t\t\texception(message, e)\n\t\t\traise SearchException(message)\n\t\n\t\n\tdef time_series_totals(self, include_rt=False, include_qt=False, include_re=False, date_range=None):\n\t\tdf = pd.DataFrame(data = self.date_list(), columns = ['_key_as_string'])\n\t\t\n\t\tagg = deepcopy(DAY_TOTALS)\n\t\tself.add_rt_filter(agg, include_rt, include_qt, include_re)\n\t\tself.add_date_range(agg, date_range)\n\t\n\t\ttry:\n\t\t\tres = self.search(agg)\t\t\t\n\t\t\tnew_df = json_normalize(res['aggregations']['counts']['buckets'])\n\t\t\tnew_df = new_df.drop(columns=['key'])\n\t\t\tnew_df = new_df.rename(columns={\"key_as_string\": \"_key_as_string\", \"doc_count\": \"_total\"})\n\t\t\t\n\t\t\tdf = df.merge(new_df, 'outer', '_key_as_string')\n\t\t\t\n\t\texcept Exception as e:\n\t\t\tmessage = \"types and date aggregation failed on {}\".format(terms)\n\t\t\texception(message, e)\n\t\t\traise SearchException(message)\n\n\t\tdf = df.fillna(0)\n\t\t\n\t\treturn df\n\t\n\t\n\tdef time_series_total_sharing(self):\n\t\tdf = pd.DataFrame(data = self.date_list(), columns = ['_key_as_string'])\n\t\t\n\t\tagg = deepcopy(DAY_TOTALS_SHARING)\n\t\n\t\ttry:\n\t\t\tres = self.search(agg)\t\t\t\n\t\t\tnew_df = json_normalize(res['aggregations']['counts']['buckets'])\n\t\t\t\n\t\t\tnew_df = new_df.drop(columns=['key'])\n\t\t\tnew_df = new_df.rename(columns={\"key_as_string\": \"_key_as_string\", \"doc_count\": \"total\"})\n\t\t\t\n\t\t\tdf = df.merge(new_df, 'outer', '_key_as_string')\n\t\t\t\n\t\t\tfor agg_name in ['rt_counts', 'qt_counts', 're_counts']:\n\t\t\t\tnew_df = json_normalize(res['aggregations'][agg_name]['dates']['buckets'])\n\t\t\t\tnew_df = new_df.drop(columns=['key'])\n\t\t\t\tnew_df = new_df.rename(columns={\"key_as_string\": \"_key_as_string\", \"doc_count\": agg_name})\n\t\t\t\t\n\t\t\t\tdf = df.merge(new_df, 'outer', '_key_as_string')\n\t\t\t\n\t\texcept Exception as e:\n\t\t\tmessage = \"date and sharing aggregation failed\"\n\t\t\texception(message, e)\n\t\t\traise SearchException(message)\n\n\t\tdf = df.fillna(0)\n\t\t\n\t\treturn df\n\t\n\t\n\tdef types_aggregation(self, include_rt=False, include_qt=False, include_re=False, search=None):\n\t\tagg = deepcopy(AGG_LIST)\n\t\tself.add_rt_filter(agg, include_rt, include_qt, include_re)\n\t\t\n\t\tif search:\n\t\t\tself.add_wildcard_filter(agg, \"types\", search)\n\t\t\t\t\n\t\tdf, total = self.get_aggregation(agg, \"types\")\n\t\t\t\t\n\t\tif len(df.index) == 0:\n\t\t\treturn None, 0\n\t\t\n\t\tdf = self.filter_keys(df, stopwords=STOPWORDS_EN, seeds=SEED_STOPWORDS)\n\t\t\n\t\tif len(df.index) == 0:\n\t\t\treturn None, 0\n\t\t\n\t\treturn df.head(RETURN_SIZE), total\n\t\n\t\n\tdef keywords_aggregation(self, include_rt=False, include_qt=False, include_re=False, search=None):\n\t\tagg = deepcopy(AGG_LIST)\n\t\tself.add_rt_filter(agg, include_rt, include_qt, include_re)\n\t\tagg[\"aggs\"][\"counts\"][\"terms\"][\"size\"] = CANDIDATE_KEYWORDS_SIZE\n\t\t\n\t\tif search:\n\t\t\tself.add_wildcard_filter(agg, \"types\", search)\n\t\t\n\t\tdf, total = self.get_aggregation(agg, \"types\")\n\t\t\n\t\tif len(df.index) == 0:\n\t\t\treturn None, 0\n\t\t\n\t\tdf = self.filter_keys(df, stopwords=STOPWORDS_EN, seeds=SEED_STOPWORDS)\n\t\t\n\t\tif len(df.index) == 0:\n\t\t\treturn None, 0\n\t\t\n\t\tn_tests = len(df.index)\n\t\tcorp_size = self.corpus_size(include_rt=include_rt, include_qt=include_qt, include_re=include_re)\n\t\tdf['keyness'] = df.apply(lambda x: self.keyness(x['key'], x['doc_count'], corp_size, n_tests), axis=1)\n\t\tdf = df.query('keyness > 0.0')\n\t\tdf = df.sort_values(by='keyness', ascending=False)\n\t\t\n\t\treturn df.head(RETURN_SIZE), total\n\t\n\t\n\tdef hashtags_aggregation(self, include_rt=False, include_qt=False, include_re=False, search=None):\n\t\tagg = deepcopy(AGG_LIST)\n\t\tself.add_rt_filter(agg, include_rt, include_qt, include_re)\n\t\t\n\t\tif search:\n\t\t\tself.add_wildcard_filter(agg, \"hashtags\", search)\n\t\t\n\t\tdf, total = self.get_aggregation(agg, \"hashtags\")\n\t\t\n\t\tif len(df.index) == 0:\n\t\t\treturn None, 0\n\t\t\n\t\tdf = self.filter_keys(df, seeds=SEED_STOPWORDS)\n\t\t\n\t\tif len(df.index) == 0:\n\t\t\treturn None, 0\n\t\t\n\t\tdf['key'] = df['key'].apply(lambda x: \"#\" + x)\n\t\t\n\t\treturn df.head(RETURN_SIZE), total\n\t\n\t\n\tdef websites_aggregation(self, include_rt=False, include_qt=False, include_re=False, search=None):\n\t\tagg = deepcopy(AGG_LIST)\n\t\tself.add_rt_filter(agg, include_rt, include_qt, include_re)\n\t\t\n\t\tif search:\n\t\t\tself.add_wildcard_filter(agg, \"websites\", search)\n\t\t\n\t\tdf, total = self.get_aggregation(agg, \"websites\")\n\t\t\n\t\tif len(df.index) == 0:\n\t\t\treturn None, 0\n\t\t\t\n\t\tdf = self.filter_urls(df)\n\t\t\n\t\tif len(df.index) == 0:\n\t\t\treturn None, 0\n\t\t\n\t\tdf['key'] = df['key'].apply(lambda x: x + \"/\")\n\t\t\n\t\treturn df.head(RETURN_SIZE), total\n\t\t\n\t\n\tdef time_series_search(self, terms, include_rt=False, include_qt=False, include_re=False, normalise=False, date_range=None):\n\t\tdf = pd.DataFrame(data = self.date_list(), columns = ['_key_as_string'])\n\t\t\n\t\tfor term in terms:\n\t\t\tagg = deepcopy(SEARCH_DAYS)\n\t\t\tself.add_rt_filter(agg, include_rt, include_qt, include_re)\n\t\t\tself.add_term_filter(agg, term)\n\t\t\tself.add_date_range(agg, date_range)\n\t\t\n\t\t\ttry:\n\t\t\t\tres = self.search(agg)\t\t\t\n\t\t\t\tnew_df = json_normalize(res['aggregations']['counts']['buckets'])\n\t\t\t\t\n\t\t\t\tif len(new_df.index) == 0:\n\t\t\t\t\tbreak\n\t\t\t\t\n\t\t\t\tnew_df = new_df.drop(columns=['key'])\n\t\t\t\tnew_df = new_df.rename(columns={\"key_as_string\": \"_key_as_string\", \"doc_count\": term})\n\t\t\t\t\n\t\t\t\tdf = df.merge(new_df, 'outer', '_key_as_string')\n\t\t\t\t\n\t\t\texcept Exception as e:\n\t\t\t\tmessage = \"types and date aggregation failed on {}\".format(terms)\n\t\t\t\texception(message, e)\n\t\t\t\traise SearchException(message)\n\t\t\n\t\tif len(df.index) == 0:\n\t\t\treturn None\n\t\t\n\t\tdf = df.fillna(0)\n\t\t\n\t\t# normalise\n\t\tif normalise:\n\t\t\ttry:\n\t\t\t\tday_totals_df = self.time_series_totals(include_rt=include_rt, include_qt=include_qt, include_re=include_re)\n\t\t\n\t\t\t\tdf = df.merge(day_totals_df, 'outer', '_key_as_string')\n\t\t\t\tdf[terms] = df[terms].div(df['_total'], axis=0)\n\t\t\t\tdf = df.drop(columns=['_total'])\n\t\t\texcept Exception as e:\n\t\t\t\tmessage = \"types and date aggregation failed on {}\".format(terms)\n\t\t\t\texception(message, )\n\t\t\t\traise SearchException(message)\n\t\t\n\t\treturn df\n\t\n\t\n\tdef cooccuring_types_search(self, terms, include_rt=False, include_qt=False, include_re=False, date_range=None):\n\t\tdf = None\n\t\trank_df = None\n\t\t\n\t\ttry:\t\t\n\t\t\tfor term in terms:\n\t\t\t\tagg = deepcopy(TERM_TYPES)\n\t\t\t\tself.add_rt_filter(agg, include_rt, include_qt, include_re)\n\t\t\t\tself.add_term_filter(agg, term)\n\t\t\t\tself.add_date_range(agg, date_range)\n\t\t\t\t\t\t\n\t\t\t\tres = self.search(agg)\n\t\t\t\tnew_df = json_normalize(res['aggregations']['counts']['buckets'])\n\t\t\t\t\n\t\t\t\tif len(new_df.index) == 0:\n\t\t\t\t\tcontinue\n\t\t\t\n\t\t\t\ttotal = res['aggregations']['total']['value']\n\t\t\t\t\n\t\t\t\tnew_df = self.filter_keys(new_df, stopwords=STOPWORDS_EN, seeds=SEED_STOPWORDS, terms=terms)\n\t\t\t\t\n\t\t\t\tif len(new_df.index) == 0:\n\t\t\t\t\tcontinue\n\t\t\t\n\t\t\t\tnew_df = new_df.rename(columns={\"key\": \"_key\"})\n\t\t\t\tnew_df[term] = new_df['doc_count'].div(total)\n\t\t\t\tnew_df = new_df.drop(columns=['doc_count'])\n\t\t\t\n\t\t\t\tif df is None:\n\t\t\t\t\tdf = new_df\n\t\t\t\telse:\n\t\t\t\t\tdf = df.merge(new_df, 'outer', '_key')\n\t\t\t\n\t\t\tif df is None:\n\t\t\t\treturn None, None\n\t\t\t\n\t\t\tdf = df.fillna(0)\n\t\t\t\n\t\t\t# ranks\n\t\t\trank_df = df.copy()\n\t\t\tfor term in terms:\n\t\t\t\tif term in rank_df.columns:\n\t\t\t\t\trank_df[term] = rank_df[term].rank(ascending=False).apply(lambda x: 51 - x if x <= 50 else 0)\n\t\t\t\n\t\t\trank_df['_max'] = rank_df.max(axis=1, numeric_only=True)\n\t\t\trank_df = rank_df.query('_max > 0')\n\t\t\trank_df = rank_df.sort_values(by='_max', ascending=False)\n\t\t\t\n\t\t\t# filter and sort\n\t\t\tdf = df[ df['_key'].isin(rank_df['_key']) ]\n\t\t\tdf['_max'] = df.max(axis=1, numeric_only=True)\n\t\t\tdf = df.sort_values(by='_max', ascending=False)\n\t\t\t\n\t\texcept Exception as e:\n\t\t\tmessage = \"cooccuring types aggregation failed on {}\".format(terms)\n\t\t\texception(message, e)\n\t\t\traise SearchException(message)\n\n\t\treturn df, rank_df\n\t\n\t\n\tdef cooccuring_hashtags_search(self, terms, include_rt=False, include_qt=False, include_re=False, date_range=None):\n\t\tdf = None\n\t\trank_df = None\n\t\t\n\t\ttry:\n\t\t\n\t\t\tfor term in terms:\n\t\t\t\tagg = deepcopy(TERM_HASHTAGS)\n\t\t\t\tself.add_rt_filter(agg, include_rt, include_qt, include_re)\n\t\t\t\tself.add_term_filter(agg, term)\n\t\t\t\tself.add_date_range(agg, date_range)\n\t\t\t\t\n\t\t\t\tres = self.search(agg)\t\t\t\n\t\t\t\tnew_df = json_normalize(res['aggregations']['counts']['buckets'])\n\t\t\t\t\n\t\t\t\tif len(new_df.index) == 0:\n\t\t\t\t\tcontinue\n\t\t\t\t\n\t\t\t\ttotal = res['aggregations']['total']['value']\n\t\t\t\t\n\t\t\t\tnew_df = self.filter_keys(new_df, stopwords=STOPWORDS_EN, seeds=SEED_STOPWORDS, terms=terms)\n\t\t\t\t\n\t\t\t\tif len(new_df.index) == 0:\n\t\t\t\t\tcontinue\n\t\t\t\t\n\t\t\t\tnew_df['key'] = new_df['key'].apply(lambda x: \"#\" + x)\n\t\t\t\t\n\t\t\t\tnew_df = new_df.rename(columns={\"key\": \"_key\"})\t\t\t\t\n\t\t\t\tnew_df[term] = new_df['doc_count'].div(total)\n\t\t\t\tnew_df = new_df.drop(columns=['doc_count'])\n\t\t\t\t\n\t\t\t\tif df is None:\n\t\t\t\t\tdf = new_df\n\t\t\t\telse:\n\t\t\t\t\tdf = df.merge(new_df, 'outer', '_key')\n\t\t\t\n\t\t\tif df is None:\n\t\t\t\treturn None, None\n\t\t\t\n\t\t\tdf = df.fillna(0)\n\t\t\t\n\t\t\t# ranks\n\t\t\trank_df = df.copy()\n\t\t\tfor term in terms:\n\t\t\t\tif term in rank_df.columns:\n\t\t\t\t\trank_df[term] = rank_df[term].rank(ascending=False).apply(lambda x: 51 - x if x <= 50 else 0)\n\t\t\t\n\t\t\trank_df['_max'] = rank_df.max(axis=1, numeric_only=True)\n\t\t\trank_df = rank_df.query('_max > 0')\n\t\t\trank_df = rank_df.sort_values(by='_max', ascending=False)\n\t\t\t\n\t\t\t# filter and sort\n\t\t\tdf = df[ df['_key'].isin(rank_df['_key']) ]\n\t\t\tdf['_max'] = df.max(axis=1, numeric_only=True)\n\t\t\tdf = df.sort_values(by='_max', ascending=False)\n\t\t\n\t\texcept Exception as e:\n\t\t\tmessage = \"cooccuring hashtags aggregation failed on {}\".format(terms)\n\t\t\texception(message, e)\n\t\t\traise SearchException(message)\n\t\t\n\t\treturn df, rank_df\n\t\n\t\n\t\n\tdef term_rt_aggregation(self, term, include_rt=False, include_qt=False, include_re=False, date_range=None):\n\t\tagg = deepcopy(TERM_RT)\n\t\tself.add_rt_filter(agg, include_rt, include_qt, include_re)\n\t\tself.add_term_filter(agg, term)\n\t\tself.add_date_range(agg, date_range)\n\t\t\n\t\ttry:\n\t\t\tres = self.search(agg)\n\t\t\t\n\t\t\tdf = json_normalize(res['aggregations']['rt_counts']['buckets']).rename(columns={\"doc_count\": \"Retweets\"}).drop(columns=['key'])\n\t\t\tqt_df = json_normalize(res['aggregations']['qt_counts']['buckets']).rename(columns={\"doc_count\": \"Quote Tweets\"}).drop(columns=['key'])\n\t\t\tre_df = json_normalize(res['aggregations']['re_counts']['buckets']).rename(columns={\"doc_count\": \"Replies\"}).drop(columns=['key'])\n\t\t\t\n\t\t\tdf = df.merge(qt_df, 'outer', 'key_as_string')\n\t\t\tdf = df.merge(re_df, 'outer', 'key_as_string')\n\t\t\tdf = df.fillna(0)\n\t\t\t\n\t\t\tcols = []\n\t\t\t\n\t\t\tif not include_rt:\n\t\t\t\tdf = df.drop(columns=['Retweets'])\n\t\t\telse:\n\t\t\t\tcols.append('Retweets')\n\t\t\t\n\t\t\tif not include_qt:\n\t\t\t\tdf = df.drop(columns=['Quote Tweets'])\n\t\t\telse:\n\t\t\t\tcols.append('Quote Tweets')\n\t\t\t\n\t\t\tif not include_re:\n\t\t\t\tdf = df.drop(columns=['Replies'])\n\t\t\telse:\n\t\t\t\tcols.append('Replies')\n\t\t\t\n\t\t\tif len(cols) == 0:\n\t\t\t\treturn None\n\t\t\t\n\t\t\ttotal = df[cols[0]].sum()\n\t\t\tdf[cols] = df[cols].div(total)\n\t\t\t\n\t\t\tdf = pd.melt(df, id_vars=['key_as_string'], value_vars=cols, var_name='type', value_name='doc_count')\n\t\t\t\n\t\t\tdf = df.sort_values(by='key_as_string', ascending=False)\n\t\t\t\n\t\t\treturn df\n\t\texcept Exception as e:\n\t\t\tmessage = \"term rt aggregation failed on {}\".format(term)\n\t\t\texception(message, e)\n\t\t\traise SearchException(message)\n\t\n\t\n\tdef term_types_aggregation(self, term, include_rt=False, include_qt=False, include_re=False, date_range=None):\n\t\tagg = deepcopy(TERM_TYPES)\n\t\tself.add_rt_filter(agg, include_rt, include_qt, include_re)\n\t\tself.add_term_filter(agg, term)\n\t\tself.add_date_range(agg, date_range)\n\t\t\n\t\ttry:\n\t\t\tres = self.search(agg)\n\t\t\t\t\t\t\n\t\t\tdf = json_normalize(res['aggregations']['counts']['buckets'])\n\t\t\t\n\t\t\tif len(df.index) == 0:\n\t\t\t\treturn None, 0\n\t\t\t\n\t\t\ttotal = res['aggregations']['total']['value']\n\t\t\t\n\t\t\tdf = self.filter_keys(df, stopwords=STOPWORDS_EN, seeds=SEED_STOPWORDS, terms=[term])\n\t\t\t\n\t\t\tif len(df.index) == 0:\n\t\t\t\treturn None, 0\n\t\t\t\n\t\t\tdf['proportion'] = df['doc_count'].div(total)\n\t\t\t\n\t\t\treturn df.head(RETURN_SIZE), total\n\t\texcept Exception as e:\n\t\t\tmessage = \"term types aggregation failed on {}\".format(term)\n\t\t\texception(message, e)\n\t\t\traise SearchException(message)\n\t\n\t\n\tdef term_ngrams_aggregation(self, term, n, include=None, include_rt=False, include_qt=False, include_re=False, date_range=None):\n\t\tagg = None\n\t\tif n <= 2:\n\t\t\tagg = deepcopy(TERM_BIGRAMS)\n\t\telif n >= 3:\n\t\t\tagg = deepcopy(TERM_TRIGRAMS)\n\t\t\n\t\tinclude_regex = re.compile(r\"\")\n\t\tif include:\n\t\t\t\tinclude_regex = re.compile(r\"(?:^| )\" + include + \"(?: |$)\")\n\t\t\n\t\tself.add_rt_filter(agg, include_rt, include_qt, include_re)\n\t\tself.add_term_filter(agg, term)\n\t\tself.add_date_range(agg, date_range)\n\t\t\n\t\ttry:\n\t\t\tres = self.search(agg)\n\t\t\t\n\t\t\tdf = json_normalize(res['aggregations']['counts']['buckets'])\n\t\t\t\n\t\t\tif len(df.index) == 0:\n\t\t\t\treturn None, 0\n\t\t\t\n\t\t\ttotal = res['aggregations']['total']['value']\n\t\t\t\n\t\t\tdf['key'] = df['key'].apply(lambda x: None if not include_regex.search(x) else x)\n\t\t\tdf = df.dropna(subset=['key'])\n\t\t\t\n\t\t\tif len(df.index) == 0:\n\t\t\t\treturn None, 0\n\t\t\t\n\t\t\tdf['proportion'] = df['doc_count'].div(total)\n\t\t\t\n\t\t\treturn df.head(RETURN_SIZE), total\n\t\texcept Exception as e:\n\t\t\tmessage = \"term types aggregation failed on {}\".format(term)\n\t\t\texception(message, e)\n\t\t\traise SearchException(message)\n\t\n\t\n\tdef term_hashtags_aggregation(self, term, include_rt=False, include_qt=False, include_re=False, date_range=None):\n\t\tagg = deepcopy(TERM_HASHTAGS)\n\t\tself.add_rt_filter(agg, include_rt, include_qt, include_re)\n\t\tself.add_term_filter(agg, term)\n\t\tself.add_date_range(agg, date_range)\n\t\t\t\t\n\t\ttry:\n\t\t\tres = self.search(agg)\n\t\t\tdf = json_normalize(res['aggregations']['counts']['buckets'])\n\t\t\t\n\t\t\tif len(df.index) == 0:\n\t\t\t\treturn None, 0\n\t\t\t\n\t\t\ttotal = res['aggregations']['total']['value']\n\t\t\t\n\t\t\tdf = self.filter_keys(df, seeds=SEED_STOPWORDS, terms=[term])\n\t\t\t\n\t\t\tif len(df.index) == 0:\n\t\t\t\treturn None, 0\n\t\t\t\n\t\t\tdf['key'] = df['key'].apply(lambda x: \"#\" + x)\n\t\t\tdf['proportion'] = df['doc_count'].div(total)\n\t\t\t\n\t\t\treturn df.head(RETURN_SIZE), total\n\t\texcept Exception as e:\n\t\t\tmessage = \"term hashtags aggregation failed on {}\".format(term)\n\t\t\texception(message, e)\n\t\t\traise SearchException(message)\n\t\n\t\n\tdef term_websites_aggregation(self, term, include_rt=False, include_qt=False, include_re=False, date_range=None):\n\t\tagg = deepcopy(TERM_WEBSITES)\n\t\tself.add_rt_filter(agg, include_rt, include_qt, include_re)\n\t\tself.add_term_filter(agg, term)\n\t\tself.add_date_range(agg, date_range)\n\t\t\n\t\ttry:\n\t\t\tres = self.search(agg)\n\t\t\tdf = json_normalize(res['aggregations']['counts']['buckets'])\n\t\t\t\n\t\t\tif len(df.index) == 0:\n\t\t\t\treturn None, 0\n\t\t\t\n\t\t\ttotal = res['aggregations']['total']['value']\n\t\t\t\n\t\t\tdf = self.filter_keys(df, terms=[term])\n\t\t\tdf = self.filter_urls(df)\n\t\t\t\n\t\t\tif len(df.index) == 0:\n\t\t\t\treturn None, 0\n\t\t\t\n\t\t\tdf['key'] = df['key'].apply(lambda x: x + \"/\")\n\t\t\tdf['proportion'] = df['doc_count'].div(total)\n\t\t\t\n\t\t\treturn df.head(RETURN_SIZE), total\n\t\texcept Exception as e:\n\t\t\tmessage = \"term websites aggregation failed on {}\".format(term)\n\t\t\texception(message, e)\n\t\t\traise SearchException(message)\n\t\n\t\n\tdef term_urls_aggregation(self, term, include_rt=False, include_qt=False, include_re=False, website=None, date_range=None):\n\t\tagg = deepcopy(TERM_URLS)\n\t\tself.add_rt_filter(agg, include_rt, include_qt, include_re)\n\t\tself.add_term_filter(agg, term)\n\t\tself.add_date_range(agg, date_range)\n\t\t\n\t\tif website:\n\t\t\tagg['aggs']['counts']['terms']['include'] = \".*\" + website + \".*\"\n\t\t\n\t\ttry:\n\t\t\tres = self.search(agg)\n\t\t\tdf = json_normalize(res['aggregations']['counts']['buckets'])\n\t\t\t\n\t\t\tif len(df.index) == 0:\n\t\t\t\treturn None, 0\n\t\t\t\n\t\t\tdf = self.filter_urls(df)\n\t\t\n\t\t\tif len(df.index) == 0:\n\t\t\t\treturn None, 0\n\t\t\t\n\t\t\ttotal = res['aggregations']['total']['value']\n\t\t\t\n\t\t\tdf['proportion'] = df['doc_count'].div(total)\n\t\t\t\n\t\t\treturn df.head(RETURN_SIZE), total\n\t\texcept Exception as e:\n\t\t\tmessage = \"term urls aggregation failed on {}\".format(term)\n\t\t\texception(message, e)\n\t\t\traise SearchException(message)\n\t\n\t\n\tdef term_users_aggregation(self, term, include_rt=False, include_qt=False, include_re=False, date_range=None):\n\t\tagg = deepcopy(TERM_USERS)\n\t\tself.add_rt_filter(agg, include_rt, include_qt, include_re)\n\t\tself.add_term_filter(agg, term)\n\t\tself.add_date_range(agg, date_range)\n\t\t\n\t\ttry:\n\t\t\tres = self.search(agg)\n\t\t\t\n\t\t\tdf = json_normalize(res['aggregations']['counts']['buckets'])\n\t\t\t\n\t\t\tif len(df.index) == 0:\n\t\t\t\treturn None, 0, 0\n\t\t\t\n\t\t\ttotal = res['aggregations']['total']['value']\n\t\t\tusers = res['aggregations']['users']['value']\n\t\t\t\n\t\t\tdf['proportion'] = df['doc_count'].div(total)\n\t\t\t\n\t\t\treturn df, total, users\n\t\texcept Exception as e:\n\t\t\tmessage = \"term users aggregation failed on {}\".format(term)\n\t\t\texception(message, e)\n\t\t\traise SearchException(message)\n\t\n\t\n\t\n\t\n\t\n\t\n\t\n\t\n\t\n\t" ]
[ [ "pandas.json_normalize", "pandas.melt", "pandas.date_range" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [ "2.0" ], "scipy": [], "tensorflow": [] } ]
marwash25/subpruning
[ "5361dfd186f061aa155a03d263537cc356529c96", "5361dfd186f061aa155a03d263537cc356529c96" ]
[ "SubmodularStreamingMaximization/tests/main.py", "SubmodularStreamingMaximization/experiments/creditfraud/run.py" ]
[ "#!/usr/bin/env python3\n\nimport numpy as np\nfrom numpy.linalg import slogdet\n\nfrom PySSM import SubmodularFunctionlist\nfrom PySSM import IVM, FastIVM\nfrom PySSM import RBFKernel\n\nfrom PySSM import Greedylist\nfrom PySSM import Randomlist\nfrom PySSM import SieveStreaminglist\nfrom PySSM import SieveStreamingPPlist\nfrom PySSM import ThreeSieveslist\n\n\ndef logdet(X):\n X = np.array(X)\n K = X.shape[0]\n kmat = np.zeros((K,K))\n\n for i, xi in enumerate(X):\n for j, xj in enumerate(X):\n kval = 1.0*np.exp(-np.sum((xi-xj)**2) / 1.0)\n if i == j:\n kmat[i][i] = 1.0 + kval / 1.0**2\n else:\n kmat[i][j] = kval / 1.0**2\n kmat[j][i] = kval / 1.0**2\n return slogdet(kmat)[1]\n\n\nclass FastLogdet(SubmodularFunctionlist):\n def __init__(self, K):\n super().__init__()\n self.added = 0\n self.K = K\n self.kmat = np.zeros((K,K))\n\n def peek(self, X, x, pos):\n # if self.added == 0:\n # return 0\n\n if pos >= self.added:\n #X = np.array(X)\n x = np.array(x)\n\n row = []\n for xi in X:\n kval = 1.0*np.exp(-np.sum((xi-x)**2) / 1.0)\n row.append(kval)\n kval = 1.0*np.exp(-np.sum((x-x)**2) / 1.0)\n row.append(1.0 + kval / 1.0**2)\n\n self.kmat[:self.added, self.added] = row[:-1]\n self.kmat[self.added, :self.added + 1] = row\n return slogdet(self.kmat[:self.added + 1,:self.added + 1])[1]\n else:\n print(\"pos < solution size\")\n return 0\n\n def update(self, X, x, pos):\n #X = np.array(X)\n if pos >= self.added:\n fval = self.peek(X, x, pos)\n self.added += 1\n return fval\n else:\n return 0\n\n def clone(self):\n return FastLogdet(self.K)\n\n # print(\"CLONE\")\n # cloned = FastLogdet.__new__(FastLogdet)\n # print(cloned)\n # # clone C++ state\n # #SubmodularFunction.__init__(self, cloned)\n # FastLogdet.__init__(self, self.K)\n # # clone Python state\n # cloned.__dict__.update(self.__dict__)\n # print(\"CLONE DONE\")\n # print(cloned.__call__)\n # print(self.__call__)\n # return cloned\n\n def __call__(self, X):\n return logdet(X)\n\n\n\n# # optimizers = [SieveStreaming] #Greedy, Random\n# # for clazz in optimizers:\n# # kernel = RBFKernel(sigma=1,scale=1)\n# # slowIVM = IVM(kernel = kernel, sigma = 1.0)\n#\n# # opt = clazz(K, slowIVM)\n# #opt = clazz(K, logdet)\n# # fastLogDet = FastLogdet(K)\n# # opt = SieveStreaming(K, fastLogDet, 2.0, 0.1)\n# # opt = SieveStreamingPP(K, fastLogDet, 2.0, 0.1)\n#\n# fastLogDet = FastLogdet(K)\n# opt = ThreeSieves(K, fastLogDet, 2.0, 0.1, \"sieve\", T = 100)\n\n# X = list(range(10))\nX = [\n [0, 0],\n [1, 1],\n [0.5, 1.0],\n [1.0, 0.5],\n [0, 0.5],\n [0.5, 1],\n [0.0, 1.0],\n [1.0, 0.]\n]\n\nK = 3\nkernel = RBFKernel(sigma=1, scale=1)\nivm = IVM(kernel=kernel, sigma=1.0)\nfastivm = FastIVM(K=K, kernel=kernel, sigma=1.0)\n#fastLogDet = FastLogdet(K)\noptimizers = [SieveStreaminglist(K, FastLogdet(K), 2.0, 0.1), SieveStreaminglist(K, FastLogdet(K), 2.0, 0.1), Greedylist(K, FastLogdet(K)), ThreeSieveslist(K, FastLogdet(K), 2.0, 0.1, \"sieve\", T=10)]\n\nfor opt in optimizers:\n opt.fit(X)\n\n # Alternativley, you can use the streaming interface.\n #for x in X:\n # opt.next(x)\n\n fval = opt.get_fval()\n solution = np.array(opt.get_solution())\n f = opt.get_f()\n print(\"Found a solution with fval = {}\".format(fval))\n print(\"kmat saved in f = {}\".format(f.kmat))\n print(\"solution = \", solution)\n", "#!/usr/bin/env python3\n\nimport os\nimport random\nfrom scipy.io import arff\nimport numpy as np\nimport pandas as pd\n\nimport numpy as np\nfrom numpy.linalg import slogdet\nimport time\nfrom experiment_runner.experiment_runner_v2 import run_experiments\n\n# from PySSM import Matrix, Vector\nfrom PySSM import RBFKernel\nfrom PySSM import IVM, FastIVM\n\nfrom PySSM import Greedy\nfrom PySSM import Random\nfrom PySSM import SieveStreaming\nfrom PySSM import SieveStreamingPP\nfrom PySSM import ThreeSieves \nfrom PySSM import Salsa \nfrom PySSM import IndependentSetImprovement\n\nimport os\nimport numpy as np\nimport scipy.io\nimport scipy.io\nfrom sklearn import preprocessing\n\n\ndef pre(cfg):\n name = cfg[\"method\"]\n sigma = cfg[\"sigma\"]\n scale = cfg[\"scale\"]\n K = cfg[\"K\"]\n\n kernel = RBFKernel(sigma=sigma,scale=scale)\n fastLogDet = FastIVM(K, kernel, 1.0)\n\n if name == \"Greedy\":\n opt = Greedy(K, fastLogDet)\n if name == \"IndependentSetImprovement\":\n opt = IndependentSetImprovement(K, fastLogDet)\n elif name == \"Random\":\n opt = Random(K, fastLogDet, cfg[\"run_id\"])\n elif name == \"SieveStreaming\":\n e = cfg[\"epsilon\"]\n opt = SieveStreaming(K, fastLogDet, 1.0, e)\n elif name == \"SieveStreaming++\":\n e = cfg[\"epsilon\"]\n opt = SieveStreamingPP(K, fastLogDet, 1.0, e)\n elif name == \"Salsa\":\n e = cfg[\"epsilon\"]\n opt = Salsa(K, fastLogDet, 1.0, e)\n elif name == \"ThreeSieves\":\n e = cfg[\"epsilon\"]\n T = cfg[\"T\"]\n opt = ThreeSieves(K, fastLogDet, 1.0, e, \"sieve\", T)\n return opt\n\ndef fit(cfg, opt):\n X = cfg[\"X\"]\n \n opt.fit(cfg[\"X\"],cfg[\"K\"])\n return opt\n\ndef post(cfg, opt):\n return {\n \"fval\":opt.get_fval(),\n \"num_candidate_solutions\":opt.get_num_candidate_solutions(),\n \"num_elements_stored\":opt.get_num_elements_stored(),\n }\n\nprint(\"Loading data\")\n\ndata = pd.read_csv(os.path.join(os.path.dirname(__file__), \"data\", \"creditcard.csv\"), header=0, index_col=None)\n\n# Extract label vector\ny = np.array([1 if x == 0 else -1 for x in data[\"Class\"]]) # 1 = inlier, -1 = outlier\n\n# Delete irrelevant features.\ndata = data.drop(\"Time\", axis=1)\ndata = data.drop(\"Class\", axis=1)\n\n# Only values from now on\nX = data.values\n\n# MinMax normalize the data\nmin_max_scaler = preprocessing.MinMaxScaler()\nX = min_max_scaler.fit_transform(X)\n\nKs = range(5,105,5)\n# Ks = [5]\neps = [1e-1, 5e-2, 1e-2, 1e-3, 5e-3]\nTs = [500, 1000, 2500, 5000]\n#Sigmas = np.array([0.1, 0.5, 1.0, 2.0, 5.0])*np.sqrt(X.shape[1])\nSigmas = [np.sqrt(X.shape[1])]\n\nbasecfg = {\n \"out_path\":\"results\",\n \"backend\":\"multiprocessing\",\n \"num_cpus\":10,\n \"pre\": pre,\n \"post\": post,\n \"fit\": fit,\n}\n\nresults = []\n\nruns = []\nfor K in Ks:\n for s in Sigmas:\n runs.append(\n ({ \n \"method\": \"Greedy\",\n \"K\":K,\n \"sigma\":s,\n \"scale\":1,\n \"X\":X\n })\n )\n\n runs.append(\n ({ \n \"method\": \"IndependentSetImprovement\",\n \"K\":K,\n \"sigma\":s,\n \"scale\":1,\n \"X\":X\n })\n )\n\n runs.append(\n ({ \n \"method\": \"Random\",\n \"K\":K,\n \"sigma\":s,\n \"scale\":1,\n \"repetitions\":5,\n \"X\":X\n })\n )\n\n for e in eps:\n runs.append(\n ( { \n \"method\": \"SieveStreaming\",\n \"K\":K,\n \"sigma\":s,\n \"scale\":1,\n \"epsilon\":e,\n \"X\":X\n })\n )\n\n runs.append(\n ( { \n \"method\": \"SieveStreaming++\",\n \"K\":K,\n \"sigma\":s,\n \"scale\":1,\n \"epsilon\":e,\n \"X\":X\n })\n )\n\n runs.append(\n ( { \n \"method\": \"Salsa\",\n \"K\":K,\n \"sigma\":s,\n \"scale\":1,\n \"epsilon\":e,\n \"X\":X\n })\n )\n\n for T in Ts: \n runs.append(\n ( { \n \"method\": \"ThreeSieves\",\n \"K\":K,\n \"sigma\":s,\n \"scale\":1,\n \"epsilon\":e,\n \"T\":T,\n \"X\":X\n })\n )\n\nrandom.shuffle(runs)\nrun_experiments(basecfg, runs)\n" ]
[ [ "numpy.linalg.slogdet", "numpy.array", "numpy.zeros", "numpy.sum" ], [ "numpy.array", "numpy.sqrt", "sklearn.preprocessing.MinMaxScaler" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
StanfordVLSI/dragonphy2
[ "996cc14f800b01c5ec0534e79dd2340f4de5e704", "996cc14f800b01c5ec0534e79dd2340f4de5e704" ]
[ "experiments/channel_characteristics/energy_concentration.py", "tests/cpu_block_tests/reorder/test_reorder.py" ]
[ "from pathlib import Path\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom dragonphy import *\n\n\nTHIS_DIR = Path(__file__).resolve().parent\nsparam_file_list = [\"Case4_FM_13SI_20_T_D13_L6.s4p\",\n\"peters_01_0605_B1_thru.s4p\", \n \"peters_01_0605_B12_thru.s4p\", \n \"peters_01_0605_T20_thru.s4p\",\n \"TEC_Whisper42p8in_Meg6_THRU_C8C9.s4p\",\n \"TEC_Whisper42p8in_Nelco6_THRU_C8C9.s4p\"]\n\nnum_pre = np.zeros((5,), dtype=np.uint8)\n\ncolor_list = ['C0','C1','C2', 'C3','C4','C5']\n\nfor ii, sparam_file in enumerate(sparam_file_list):\n file_name = str(get_file(f'data/channel_sparam/{sparam_file}'))\n t, imp = s4p_to_impulse(file_name, 0.1e-12, 20e-9, zs=50, zl=50)\n\n im_idx = np.argmax(imp)\n t_max = t[im_idx]\n\n chan = Channel(channel_type='s4p', sampl_rate=10e12, resp_depth=200000,\n s4p=file_name, zs=50, zl=50)\n\n _, pulse = chan.get_pulse_resp(f_sig=16e9, resp_depth=350, t_delay=0)\n #Try to center the sample!\n am_idx = np.argmax(pulse)\n shift_delay = (am_idx*625 - im_idx)*0.1e-12\n _, pulse = chan.get_pulse_resp(f_sig=16e9, resp_depth=350, t_delay=-shift_delay)\n\n #Calculate the centered sample time\n am_idx = np.argmax(pulse)\n st = np.array(range(0,len(pulse)))/16e9\n st_max = st[am_idx]\n\n #Calculate the first N bins around the cursor that contain the majority (95%) of the energy\n sqr_pls = np.multiply(pulse, pulse)\n total_energy = np.dot(pulse, pulse)\n\n pre_idx = 0\n post_idx = 0\n\n finished = False\n\n cur_pos = am_idx\n\n partial_energy = sqr_pls[cur_pos]\n while not finished:\n next_pst_ener = sqr_pls[cur_pos+post_idx+1]\n if pre_idx+1 <= cur_pos:\n next_pre_ener = sqr_pls[cur_pos-pre_idx-1]\n if next_pst_ener >= next_pre_ener:\n post_idx += 1\n partial_energy = partial_energy + next_pst_ener\n else:\n pre_idx += 1\n partial_energy = partial_energy + next_pre_ener\n else:\n post_idx += 1\n partial_energy = partial_energy + next_pst_ener\n finished = partial_energy >= 0.80*total_energy\n\n print(pre_idx, post_idx, partial_energy/total_energy)\n name = sparam_file.split('.')[0]\n\n hdl = plt.plot((st - st_max)*1e9 + 2*ii, pulse, label=f'{name} - % of Energy: {partial_energy/total_energy*100 : .1f}')\n color = hdl[0].get_color()\n print(color)\n plt.stem((st[(cur_pos-pre_idx):(cur_pos+post_idx+1)] - st_max)*1e9 + 2*ii, pulse[(cur_pos-pre_idx):(cur_pos+post_idx+1)], markerfmt='ko', linefmt=color)\n\nplt.legend(prop={'size' : 36})\nplt.xlim((-2.5, 12))\nplt.xlabel('time (ns)', fontdict={'fontsize':32})\nplt.title('Impulse Response for Five Channels, with 99% Energy Samples', fontdict={'fontsize':32})\nplt.show()", "# general imports\nimport numpy as np\nfrom pathlib import Path\n\n# DragonPHY imports\nfrom dragonphy import *\n\nTHIS_DIR = Path(__file__).parent.resolve()\nBUILD_DIR = THIS_DIR / 'build'\n\ndef test_sim(dump_waveforms):\n deps = get_deps_cpu_sim(impl_file=THIS_DIR / 'test.sv')\n print(deps)\n\n def qwrap(s):\n return f'\"{s}\"'\n\n defines = {\n 'OUT_TXT': qwrap(BUILD_DIR / 'out.txt'),\n 'REP_TXT': qwrap(BUILD_DIR / 'rep.txt')\n }\n\n DragonTester(\n ext_srcs=deps,\n directory=BUILD_DIR,\n defines=defines,\n dump_waveforms=dump_waveforms\n ).run()\n\n # check the main ADC slices\n meas = np.loadtxt(BUILD_DIR / 'out.txt', dtype=int, delimiter=',')\n meas = meas.flatten()\n expct = np.arange(-128, 128)\n assert (meas == expct).all(), 'Data mismatch for main ADC slices'\n\n # check the replica ADC slices\n meas = np.loadtxt(BUILD_DIR / 'rep.txt', dtype=int, delimiter=',')\n meas = meas.flatten()\n expct = np.array([+12, -34]*16)\n assert (meas == expct).all(), 'Data mismatch for replica ADC slices'\n" ]
[ [ "matplotlib.pyplot.legend", "numpy.dot", "matplotlib.pyplot.stem", "matplotlib.pyplot.title", "numpy.multiply", "matplotlib.pyplot.plot", "matplotlib.pyplot.xlim", "numpy.argmax", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.show", "numpy.zeros" ], [ "numpy.arange", "numpy.array", "numpy.loadtxt" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
CyberZHG/keras-gcn
[ "5186f856b6d392a3b82d0c062a21bc0420b89ec0" ]
[ "tests/test_graph_pool.py" ]
[ "import unittest\nimport os\nimport tempfile\n\nimport numpy as np\n\nfrom keras_gcn.backend import keras\nfrom keras_gcn.backend import backend as K\nfrom keras_gcn import GraphMaxPool, GraphAveragePool\nfrom keras_gcn.layers import GraphPool\n\n\nclass TestGraphPool(unittest.TestCase):\n\n input_data = np.array([\n [\n [0, 4, 8],\n [1, 5, 9],\n [2, 6, 1],\n [3, 7, 2],\n ]\n ], dtype=K.floatx())\n input_edge = np.array([\n [\n [1, 1, 1, 0],\n [1, 1, 0, 0],\n [1, 0, 1, 0],\n [0, 0, 0, 1],\n ]\n ], dtype='int32')\n\n def test_max_pool(self):\n data_layer = keras.layers.Input(shape=(None, 3), name='Input-Data')\n edge_layer = keras.layers.Input(shape=(None, None), dtype='int32', name='Input-Edge')\n conv_layer = GraphMaxPool(\n step_num=1,\n name='GraphMaxPool',\n )([data_layer, edge_layer])\n model = keras.models.Model(inputs=[data_layer, edge_layer], outputs=conv_layer)\n model.compile(\n optimizer='adam',\n loss='mae',\n metrics=['mae'],\n )\n model_path = os.path.join(tempfile.gettempdir(), 'test_save_load_%f.h5' % np.random.random())\n model.save(model_path)\n model = keras.models.load_model(model_path, custom_objects={'GraphMaxPool': GraphMaxPool})\n model.summary()\n predicts = model.predict([self.input_data, self.input_edge])[0]\n expects = np.asarray([\n [2, 6, 9],\n [1, 5, 9],\n [2, 6, 8],\n [3, 7, 2],\n ])\n self.assertTrue(np.allclose(expects, predicts), predicts)\n conv_layer = GraphMaxPool(\n step_num=2,\n name='GraphMaxPool',\n )([data_layer, edge_layer])\n model = keras.models.Model(inputs=[data_layer, edge_layer], outputs=conv_layer)\n model.compile(\n optimizer='adam',\n loss='mae',\n metrics=['mae'],\n )\n predicts = model.predict([self.input_data, self.input_edge])[0]\n expects = np.asarray([\n [2, 6, 9],\n [2, 6, 9],\n [2, 6, 9],\n [3, 7, 2],\n ])\n self.assertTrue(np.allclose(expects, predicts), predicts)\n\n def test_average_pooling(self):\n data_layer = keras.layers.Input(shape=(None, 3), name='Input-Data')\n edge_layer = keras.layers.Input(shape=(None, None), dtype='int32', name='Input-Edge')\n conv_layer = GraphAveragePool(\n step_num=1,\n name='GraphAveragePool',\n )([data_layer, edge_layer])\n model = keras.models.Model(inputs=[data_layer, edge_layer], outputs=conv_layer)\n model.compile(\n optimizer='adam',\n loss='mae',\n metrics=['mae'],\n )\n model_path = os.path.join(tempfile.gettempdir(), 'test_save_load_%f.h5' % np.random.random())\n model.save(model_path)\n model = keras.models.load_model(model_path, custom_objects={'GraphAveragePool': GraphAveragePool})\n model.summary()\n predicts = model.predict([self.input_data, self.input_edge])[0]\n expects = np.asarray([\n [1, 5, 6],\n [0.5, 4.5, 8.5],\n [1, 5, 4.5],\n [3, 7, 2],\n ])\n self.assertTrue(np.allclose(expects, predicts), predicts)\n conv_layer = GraphAveragePool(\n step_num=2,\n name='GraphAveragePool',\n )([data_layer, edge_layer])\n model = keras.models.Model(inputs=[data_layer, edge_layer], outputs=conv_layer)\n model.compile(\n optimizer='adam',\n loss='mae',\n metrics=['mae'],\n )\n predicts = model.predict([self.input_data, self.input_edge])[0]\n expects = np.asarray([\n [1, 5, 6],\n [1, 5, 6],\n [1, 5, 6],\n [3, 7, 2],\n ])\n self.assertTrue(np.allclose(expects, predicts), predicts)\n\n def test_not_implemented(self):\n with self.assertRaises(NotImplementedError):\n data_layer = keras.layers.Input(shape=(None, 3), name='Input-Data')\n edge_layer = keras.layers.Input(shape=(None, None), dtype='int32', name='Input-Edge')\n conv_layer = GraphPool(\n step_num=1,\n name='GraphPool',\n )([data_layer, edge_layer])\n model = keras.models.Model(inputs=[data_layer, edge_layer], outputs=conv_layer)\n model.compile(\n optimizer='adam',\n loss='mae',\n metrics=['mae'],\n )\n model.summary()\n model.predict([self.input_data, self.input_edge])\n" ]
[ [ "numpy.asarray", "numpy.array", "numpy.random.random", "numpy.allclose" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
salonirk11/tensorflow
[ "7fda1bb1177c69fa7bf80d20d5c5e7aaa25816e7", "7fda1bb1177c69fa7bf80d20d5c5e7aaa25816e7", "7fda1bb1177c69fa7bf80d20d5c5e7aaa25816e7", "7fda1bb1177c69fa7bf80d20d5c5e7aaa25816e7", "7fda1bb1177c69fa7bf80d20d5c5e7aaa25816e7" ]
[ "tensorflow/contrib/slim/python/slim/learning.py", "tensorflow/tensorboard/backend/event_processing/event_accumulator.py", "tensorflow/python/framework/importer.py", "tensorflow/python/training/momentum.py", "tensorflow/tensorboard/backend/event_processing/event_file_inspector_test.py" ]
[ "# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Contains TF-Slim code for training models.\n\nThis script contains various functions for training models. These include\nmanipulating gradients, creating a `train_op` (an operation that computes the\nloss and applies the gradients) and a training loop function. The training loop\nallows the user to pass in the `train_op` and runs the optimization according\nto user-specified arguments. Note that the training loop uses the\ntf.train.Supervisor and its managed_session in its implementation to ensure the\nability of worker processes to recover from failures.\n\n************************************\n* A simple working training script *\n************************************\n\n # Load data and create the model:\n images, labels = LoadData(...)\n predictions = MyModel(images)\n\n # Define the loss:\n slim.losses.log_loss(predictions, labels)\n total_loss = slim.losses.get_total_loss()\n\n # Define the optimizer:\n optimizer = tf.train.MomentumOptimizer(FLAGS.learning_rate, FLAGS.momentum)\n\n # Create the train_op\n train_op = slim.learning.create_train_op(total_loss, optimizer)\n\n # Run training.\n slim.learning.train(train_op, my_log_dir)\n\n*************************\n* Creating the train_op *\n*************************\n\nIn order to train, TF-Slim's train loop needs a train_op: an `Operation` that\n(a) computes the loss, (b) applies the gradients to update the weights and\n(c) returns the value of the loss. slim.learning.create_train_op creates\nsuch an `Operation`. This function also provides the ability to manipulate\nthe gradients using a few arguments:\n\n # Create the train_op and clip the gradient norms:\n train_op = slim.learning.create_train_op(\n total_loss,\n optimizer,\n clip_gradient_norm=4)\n\n # Create the train_op and scale the gradients by providing a map from variable\n # name (or variable) to a scaling coefficient:\n gradient_multipliers = {\n 'conv0/weights': 1.2,\n 'fc8/weights': 3.4,\n }\n train_op = slim.learning.create_train_op(\n total_loss,\n optimizer,\n gradient_multipliers=gradient_multipliers)\n\n****************************************************************\n* Performing additional (non-gradient) updates during training *\n****************************************************************\n\nMany networks utilize modules, like BatchNorm, that require performing a series\nof non-gradient updates during training. slim.learning.create_train_op allows\na user to pass in a list of update_ops to call along with the gradient updates.\n\n train_op = slim.learning.create_train_op(total_loss, optimizer, update_ops)\n\nBy default, slim.learning.create_train_op includes all update ops that are\npart of the `tf.GraphKeys.UPDATE_OPS` collection. Additionally, TF-Slim's\nslim.batch_norm function adds the moving mean and moving variance updates to\nthis collection. Consequently, users who want to use slim.batch_norm will not\nneed to take any additional steps in order to have the moving mean and moving\nvariance updates be computed.\n\nHowever, users with additional, specialized updates can either override the\ndefault update ops or simply add additional update ops to the\n`tf.GraphKeys.UPDATE_OPS` collection:\n\n # Force TF-Slim NOT to use ANY update_ops:\n train_op = slim.learning.create_train_op(\n total_loss,\n optimizer,\n update_ops=[])\n\n # Use an alternative set of update ops:\n train_op = slim.learning.create_train_op(\n total_loss,\n optimizer,\n update_ops=my_other_update_ops)\n\n # Use an alternative set of update ops in addition to the default updates:\n tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, my_update0)\n tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, my_update1)\n\n train_op = slim.learning.create_train_op(\n total_loss,\n optimizer)\n\n # Which is the same as:\n train_op = slim.learning.create_train_op(\n total_loss,\n optimizer,\n update_ops=tf.get_collection(tf.GraphKeys.UPDATE_OPS))\n\n******************************************\n* Initializing a model from a checkpoint *\n******************************************\n\nIt is common to want to 'warm-start' a model from a pre-trained checkpoint.\nTF-Slim provides a convenient mechanism for doing so:\n\n ...\n\n # Create the train_op\n train_op = slim.learning.create_train_op(total_loss, optimizer)\n\n # Create the initial assignment op\n checkpoint_path = '/path/to/old_model_checkpoint'\n variables_to_restore = slim.get_model_variables()\n init_assign_op, init_feed_dict = slim.assign_from_checkpoint(\n checkpoint_path, variables_to_restore)\n\n # Create an initial assignment function.\n def InitAssignFn(sess):\n sess.run(init_assign_op, init_feed_dict)\n\n # Run training.\n slim.learning.train(train_op, my_log_dir, init_fn=InitAssignFn)\n\n***************************************************************************\n* Initializing a model from a checkpoint whose variable names don't match *\n***************************************************************************\n\nAt times, a user may want to initialize a new model with values from a\ncheckpoint whose variable names do not match those of the current model. In this\ncase, one needs to create a mapping from the checkpoint variable names to the\ncurrent model variables. This requires only a small modification of the code\nabove:\n ...\n # Creates a model with two variables, var0 and var1\n predictions = MyModel(images)\n ...\n\n # Create the train_op\n train_op = slim.learning.create_train_op(total_loss, optimizer)\n\n checkpoint_path = '/path/to/old_model_checkpoint'\n\n # Create the mapping:\n variables_to_restore = {\n 'name_var_0_in_checkpoint': slim.get_unique_variable('var0'),\n 'name_var_1_in_checkpoint': slim.get_unique_variable('var1')\n }\n init_assign_op, init_feed_dict = slim.assign_from_checkpoint(\n checkpoint_path, variables_to_restore)\n\n # Create an initial assignment function.\n def InitAssignFn(sess):\n sess.run(init_assign_op, init_feed_dict)\n\n # Run training.\n slim.learning.train(train_op, my_log_dir, init_fn=InitAssignFn)\n\n\n*************************************************\n* Fine-Tuning Part of a model from a checkpoint *\n*************************************************\n\nRather than initializing all of the weights of a given model, we sometimes\nonly want to restore some of the weights from a checkpoint. To do this, one\nneed only filter those variables to initialize as follows:\n\n ...\n\n # Create the train_op\n train_op = slim.learning.create_train_op(total_loss, optimizer)\n\n checkpoint_path = '/path/to/old_model_checkpoint'\n\n # Specify the variables to restore via a list of inclusion or exclusion\n # patterns:\n variables_to_restore = slim.get_variables_to_restore(\n include=[\"conv\"], exclude=[\"fc8\", \"fc9])\n # or\n variables_to_restore = slim.get_variables_to_restore(exclude=[\"conv\"])\n\n init_assign_op, init_feed_dict = slim.assign_from_checkpoint(\n checkpoint_path, variables_to_restore)\n\n # Create an initial assignment function.\n def InitAssignFn(sess):\n sess.run(init_assign_op, init_feed_dict)\n\n # Run training.\n slim.learning.train(train_op, my_log_dir, init_fn=InitAssignFn)\n\n******************************************************\n* Initializing model variables from values in memory *\n******************************************************\n\nOne may want to initialize the weights of a model from values from an arbitrary\nsource (a text document, matlab file, etc). While this is technically feasible\nusing plain TensorFlow, it also results in the values of your weights being\nstored in the graph. For large models, this becomes prohibitively large. TF-Slim\nallows you to perform this initial assignment without having to store the values\nof the initial model in the graph itself by using placeholders and a feed\ndictionary:\n\n ...\n\n # Create the train_op\n train_op = slim.learning.create_train_op(total_loss, optimizer)\n\n # Create the mapping from variable names to values:\n var0_initial_value = ReadFromDisk(...)\n var1_initial_value = ReadFromDisk(...)\n\n var_names_to_values = {\n 'var0': var0_initial_value,\n 'var1': var1_initial_value,\n }\n init_assign_op, init_feed_dict = slim.assign_from_values(var_names_to_values)\n\n # Create an initial assignment function.\n def InitAssignFn(sess):\n sess.run(init_assign_op, init_feed_dict)\n\n # Run training.\n slim.learning.train(train_op, my_log_dir, init_fn=InitAssignFn)\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport sys\nimport time\n\nfrom tensorflow.contrib.framework.python.ops import variables\nfrom tensorflow.contrib.training.python.training import training\nfrom tensorflow.core.protobuf import config_pb2\nfrom tensorflow.python.client import timeline\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import errors\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.lib.io import file_io\nfrom tensorflow.python.ops import clip_ops\nfrom tensorflow.python.ops import control_flow_ops\nfrom tensorflow.python.ops import lookup_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import variables as tf_variables\nfrom tensorflow.python.platform import tf_logging as logging\nfrom tensorflow.python.summary import summary\nfrom tensorflow.python.training import optimizer as tf_optimizer\nfrom tensorflow.python.training import saver as tf_saver\nfrom tensorflow.python.training import supervisor\nfrom tensorflow.python.training import sync_replicas_optimizer\nfrom tensorflow.python.training import training_util\n\n__all__ = [\n 'add_gradients_summaries', 'clip_gradient_norms', 'multiply_gradients',\n 'create_train_op', 'train_step', 'train'\n]\n\n\ndef clip_gradient_norms(gradients_to_variables, max_norm):\n \"\"\"Clips the gradients by the given value.\n\n Args:\n gradients_to_variables: A list of gradient to variable pairs (tuples).\n max_norm: the maximum norm value.\n\n Returns:\n A list of clipped gradient to variable pairs.\n \"\"\"\n clipped_grads_and_vars = []\n for grad, var in gradients_to_variables:\n if grad is not None:\n if isinstance(grad, ops.IndexedSlices):\n tmp = clip_ops.clip_by_norm(grad.values, max_norm)\n grad = ops.IndexedSlices(tmp, grad.indices, grad.dense_shape)\n else:\n grad = clip_ops.clip_by_norm(grad, max_norm)\n clipped_grads_and_vars.append((grad, var))\n return clipped_grads_and_vars\n\n\ndef multiply_gradients(grads_and_vars, gradient_multipliers):\n \"\"\"Multiply specified gradients.\n\n Args:\n grads_and_vars: A list of gradient to variable pairs (tuples).\n gradient_multipliers: A map from either `Variables` or `Variable` op names\n to the coefficient by which the associated gradient should be scaled.\n\n Returns:\n The updated list of gradient to variable pairs.\n\n Raises:\n ValueError: If `grads_and_vars` is not a list or if `gradient_multipliers`\n is empty or None or if `gradient_multipliers` is not a dictionary.\n \"\"\"\n if not isinstance(grads_and_vars, list):\n raise ValueError('`grads_and_vars` must be a list.')\n if not gradient_multipliers:\n raise ValueError('`gradient_multipliers` is empty.')\n if not isinstance(gradient_multipliers, dict):\n raise ValueError('`gradient_multipliers` must be a dict.')\n\n multiplied_grads_and_vars = []\n for grad, var in grads_and_vars:\n if var in gradient_multipliers or var.op.name in gradient_multipliers:\n key = var if var in gradient_multipliers else var.op.name\n if grad is None:\n raise ValueError('Requested multiple of `None` gradient.')\n\n multiplier = gradient_multipliers[key]\n if not isinstance(multiplier, ops.Tensor):\n multiplier = constant_op.constant(multiplier, dtype=grad.dtype)\n\n if isinstance(grad, ops.IndexedSlices):\n tmp = grad.values * multiplier\n grad = ops.IndexedSlices(tmp, grad.indices, grad.dense_shape)\n else:\n grad *= multiplier\n multiplied_grads_and_vars.append((grad, var))\n return multiplied_grads_and_vars\n\n\ndef add_gradients_summaries(grads_and_vars):\n \"\"\"Add summaries to gradients.\n\n Args:\n grads_and_vars: A list of gradient to variable pairs (tuples).\n\n Returns:\n The list of created summaries.\n \"\"\"\n summaries = []\n for grad, var in grads_and_vars:\n if grad is not None:\n if isinstance(grad, ops.IndexedSlices):\n grad_values = grad.values\n else:\n grad_values = grad\n summaries.append(\n summary.histogram(var.op.name + '/gradient', grad_values))\n summaries.append(\n summary.scalar(var.op.name + '/gradient_norm',\n clip_ops.global_norm([grad_values])))\n else:\n logging.info('Var %s has no gradient', var.op.name)\n\n return summaries\n\n\n_USE_GLOBAL_STEP = 0\n\n\ndef create_train_op(total_loss,\n optimizer,\n global_step=_USE_GLOBAL_STEP,\n update_ops=None,\n variables_to_train=None,\n clip_gradient_norm=0,\n summarize_gradients=False,\n gate_gradients=tf_optimizer.Optimizer.GATE_OP,\n aggregation_method=None,\n colocate_gradients_with_ops=False,\n gradient_multipliers=None,\n check_numerics=True):\n \"\"\"Creates an `Operation` that evaluates the gradients and returns the loss.\n\n Args:\n total_loss: A `Tensor` representing the total loss.\n optimizer: A tf.Optimizer to use for computing the gradients.\n global_step: A `Tensor` representing the global step variable. If left as\n `_USE_GLOBAL_STEP`, then slim.variables.global_step() is used.\n update_ops: An optional list of updates to execute. If `update_ops` is\n `None`, then the update ops are set to the contents of the\n `tf.GraphKeys.UPDATE_OPS` collection. If `update_ops` is not `None`, but\n it doesn't contain all of the update ops in `tf.GraphKeys.UPDATE_OPS`,\n a warning will be displayed.\n variables_to_train: an optional list of variables to train. If None, it will\n default to all tf.trainable_variables().\n clip_gradient_norm: If greater than 0 then the gradients would be clipped\n by it.\n summarize_gradients: Whether or not add summaries for each gradient.\n gate_gradients: How to gate the computation of gradients. See tf.Optimizer.\n aggregation_method: Specifies the method used to combine gradient terms.\n Valid values are defined in the class `AggregationMethod`.\n colocate_gradients_with_ops: Whether or not to try colocating the gradients\n with the ops that generated them.\n gradient_multipliers: A dictionary of either `Variables` or `Variable` op\n names to the coefficient by which the associated gradient should be\n scaled.\n check_numerics: Whether or not we apply check_numerics.\n\n Returns:\n A `Tensor` that when evaluated, computes the gradients and returns the total\n loss value.\n \"\"\"\n def transform_grads_fn(grads):\n if gradient_multipliers:\n with ops.name_scope('multiply_grads'):\n grads = multiply_gradients(grads, gradient_multipliers)\n\n # Clip gradients.\n if clip_gradient_norm > 0:\n with ops.name_scope('clip_grads'):\n grads = clip_gradient_norms(grads, clip_gradient_norm)\n return grads\n\n return training.create_train_op(\n total_loss=total_loss,\n optimizer=optimizer,\n global_step=global_step,\n update_ops=update_ops,\n variables_to_train=variables_to_train,\n transform_grads_fn=transform_grads_fn,\n summarize_gradients=summarize_gradients,\n gate_gradients=gate_gradients,\n aggregation_method=aggregation_method,\n colocate_gradients_with_ops=colocate_gradients_with_ops,\n check_numerics=check_numerics)\n\n\ndef _wait_for_step(sess, global_step, step):\n \"\"\"Wait till the global step has reached at least 'step'.\n\n Args:\n sess: A session.\n global_step: A Tensor.\n step: Int. The global step to reach.\n \"\"\"\n while True:\n if training_util.global_step(sess, global_step) >= step:\n break\n time.sleep(1.0)\n\n\ndef train_step(sess, train_op, global_step, train_step_kwargs):\n \"\"\"Function that takes a gradient step and specifies whether to stop.\n\n Args:\n sess: The current session.\n train_op: An `Operation` that evaluates the gradients and returns the\n total loss.\n global_step: A `Tensor` representing the global training step.\n train_step_kwargs: A dictionary of keyword arguments.\n\n Returns:\n The total loss and a boolean indicating whether or not to stop training.\n\n Raises:\n ValueError: if 'should_trace' is in `train_step_kwargs` but `logdir` is not.\n \"\"\"\n start_time = time.time()\n\n trace_run_options = None\n run_metadata = None\n if 'should_trace' in train_step_kwargs:\n if 'logdir' not in train_step_kwargs:\n raise ValueError('logdir must be present in train_step_kwargs when '\n 'should_trace is present')\n if sess.run(train_step_kwargs['should_trace']):\n trace_run_options = config_pb2.RunOptions(\n trace_level=config_pb2.RunOptions.FULL_TRACE)\n run_metadata = config_pb2.RunMetadata()\n\n total_loss, np_global_step = sess.run([train_op, global_step],\n options=trace_run_options,\n run_metadata=run_metadata)\n time_elapsed = time.time() - start_time\n\n if run_metadata is not None:\n tl = timeline.Timeline(run_metadata.step_stats)\n trace = tl.generate_chrome_trace_format()\n trace_filename = os.path.join(train_step_kwargs['logdir'],\n 'tf_trace-%d.json' % np_global_step)\n logging.info('Writing trace to %s', trace_filename)\n file_io.write_string_to_file(trace_filename, trace)\n if 'summary_writer' in train_step_kwargs:\n train_step_kwargs['summary_writer'].add_run_metadata(run_metadata,\n 'run_metadata-%d' %\n np_global_step)\n\n if 'should_log' in train_step_kwargs:\n if sess.run(train_step_kwargs['should_log']):\n logging.info('global step %d: loss = %.4f (%.3f sec/step)',\n np_global_step, total_loss, time_elapsed)\n\n # TODO(nsilberman): figure out why we can't put this into sess.run. The\n # issue right now is that the stop check depends on the global step. The\n # increment of global step often happens via the train op, which used\n # created using optimizer.apply_gradients.\n #\n # Since running `train_op` causes the global step to be incremented, one\n # would expected that using a control dependency would allow the\n # should_stop check to be run in the same session.run call:\n #\n # with ops.control_dependencies([train_op]):\n # should_stop_op = ...\n #\n # However, this actually seems not to work on certain platforms.\n if 'should_stop' in train_step_kwargs:\n should_stop = sess.run(train_step_kwargs['should_stop'])\n else:\n should_stop = False\n\n return total_loss, should_stop\n\n\n_USE_DEFAULT = 0\n\n\ndef train(train_op,\n logdir,\n train_step_fn=train_step,\n train_step_kwargs=_USE_DEFAULT,\n log_every_n_steps=1,\n graph=None,\n master='',\n is_chief=True,\n global_step=None,\n number_of_steps=None,\n init_op=_USE_DEFAULT,\n init_feed_dict=None,\n local_init_op=_USE_DEFAULT,\n init_fn=None,\n ready_op=_USE_DEFAULT,\n summary_op=_USE_DEFAULT,\n save_summaries_secs=600,\n summary_writer=_USE_DEFAULT,\n startup_delay_steps=0,\n saver=None,\n save_interval_secs=600,\n sync_optimizer=None,\n session_config=None,\n trace_every_n_steps=None):\n \"\"\"Runs a training loop using a TensorFlow supervisor.\n\n When the sync_optimizer is supplied, gradient updates are applied\n synchronously. Otherwise, gradient updates are applied asynchronous.\n\n Args:\n train_op: A `Tensor` that, when executed, will apply the gradients and\n return the loss value.\n logdir: The directory where training logs are written to. If None, model\n checkpoints and summaries will not be written.\n train_step_fn: The function to call in order to execute a single gradient\n step. The function must have take exactly four arguments: the current\n session, the `train_op` `Tensor`, a global step `Tensor` and a dictionary.\n train_step_kwargs: A dictionary which is passed to the `train_step_fn`. By\n default, two `Boolean`, scalar ops called \"should_stop\" and \"should_log\"\n are provided.\n log_every_n_steps: The frequency, in terms of global steps, that the loss\n and global step and logged.\n graph: The graph to pass to the supervisor. If no graph is supplied the\n default graph is used.\n master: The address of the tensorflow master.\n is_chief: Specifies whether or not the training is being run by the primary\n replica during replica training.\n global_step: The `Tensor` representing the global step. If left as `None`,\n then slim.variables.get_or_create_global_step() is used.\n number_of_steps: The max number of gradient steps to take during training,\n as measured by 'global_step': training will stop if global_step is\n greater than 'number_of_steps'. If the value is left as None, training\n proceeds indefinitely.\n init_op: The initialization operation. If left to its default value, then\n the session is initialized by calling `tf.global_variables_initializer()`.\n init_feed_dict: A feed dictionary to use when executing the `init_op`.\n local_init_op: The local initialization operation. If left to its default\n value, then the session is initialized by calling\n `tf.local_variables_initializer()` and `tf.tables_initializer()`.\n init_fn: An optional callable to be executed after `init_op` is called. The\n callable must accept one argument, the session being initialized.\n ready_op: Operation to check if the model is ready to use. If left to its\n default value, then the session checks for readiness by calling\n `tf.report_uninitialized_variables()`.\n summary_op: The summary operation.\n save_summaries_secs: How often, in seconds, to save summaries.\n summary_writer: `SummaryWriter` to use. Can be `None`\n to indicate that no summaries should be written. If unset, we\n create a SummaryWriter.\n startup_delay_steps: The number of steps to wait for before beginning. Note\n that this must be 0 if a sync_optimizer is supplied.\n saver: Saver to save checkpoints. If None, a default one will be created\n and used.\n save_interval_secs: How often, in seconds, to save the model to `logdir`.\n sync_optimizer: an instance of tf.train.SyncReplicasOptimizer. If the\n argument is supplied, gradient updates will be synchronous. If left as\n `None`, gradient updates will be asynchronous.\n session_config: An instance of `tf.ConfigProto` that will be used to\n configure the `Session`. If left as `None`, the default will be used.\n trace_every_n_steps: produce and save a `Timeline` in Chrome trace format\n and add it to the summaries every `trace_every_n_steps`. If None, no trace\n information will be produced or saved.\n\n Returns:\n the value of the loss function after training.\n\n Raises:\n ValueError: if `train_op` is empty or if `startup_delay_steps` is\n non-zero when `sync_optimizer` is supplied, if `number_of_steps` is\n negative, or if `trace_every_n_steps` is not `None` and no `logdir` is\n provided.\n \"\"\"\n if train_op is None:\n raise ValueError('train_op cannot be None.')\n\n if logdir is None:\n if summary_op != _USE_DEFAULT:\n raise ValueError('Cannot provide summary_op because logdir=None')\n if saver is not None:\n raise ValueError('Cannot provide saver because logdir=None')\n if trace_every_n_steps is not None:\n raise ValueError('Cannot provide trace_every_n_steps because '\n 'logdir=None')\n\n if sync_optimizer is not None and startup_delay_steps > 0:\n raise ValueError(\n 'startup_delay_steps must be zero when sync_optimizer is supplied.')\n\n if number_of_steps is not None and number_of_steps <= 0:\n raise ValueError(\n '`number_of_steps` must be either None or a positive number.')\n\n graph = graph or ops.get_default_graph()\n with graph.as_default():\n if global_step is None:\n global_step = variables.get_or_create_global_step()\n saver = saver or tf_saver.Saver()\n\n with ops.name_scope('init_ops'):\n if init_op == _USE_DEFAULT:\n init_op = tf_variables.global_variables_initializer()\n\n if ready_op == _USE_DEFAULT:\n ready_op = tf_variables.report_uninitialized_variables()\n\n if local_init_op == _USE_DEFAULT:\n local_init_op = control_flow_ops.group(\n tf_variables.local_variables_initializer(),\n lookup_ops.tables_initializer())\n\n if sync_optimizer is not None and isinstance(\n sync_optimizer, sync_replicas_optimizer.SyncReplicasOptimizer):\n with ops.control_dependencies([local_init_op] if local_init_op is\n not None else []):\n if is_chief:\n local_init_op = sync_optimizer.chief_init_op\n else:\n local_init_op = sync_optimizer.local_step_init_op\n ready_for_local_init_op = sync_optimizer.ready_for_local_init_op\n else:\n ready_for_local_init_op = None\n\n if summary_op == _USE_DEFAULT:\n summary_op = summary.merge_all()\n\n if summary_writer == _USE_DEFAULT:\n summary_writer = supervisor.Supervisor.USE_DEFAULT\n\n if is_chief and sync_optimizer is not None:\n if not isinstance(sync_optimizer,\n (sync_replicas_optimizer.SyncReplicasOptimizer)):\n raise ValueError(\n '`sync_optimizer` must be a tf.train.SyncReplicasOptimizer.')\n\n # Need to create these BEFORE the supervisor finalizes the graph:\n init_tokens_op = sync_optimizer.get_init_tokens_op()\n chief_queue_runner = sync_optimizer.get_chief_queue_runner()\n\n if train_step_kwargs == _USE_DEFAULT:\n with ops.name_scope('train_step'):\n train_step_kwargs = {}\n\n if number_of_steps:\n should_stop_op = math_ops.greater_equal(global_step, number_of_steps)\n else:\n should_stop_op = constant_op.constant(False)\n train_step_kwargs['should_stop'] = should_stop_op\n if log_every_n_steps > 0:\n train_step_kwargs['should_log'] = math_ops.equal(\n math_ops.mod(global_step, log_every_n_steps), 0)\n if is_chief and trace_every_n_steps is not None:\n train_step_kwargs['should_trace'] = math_ops.equal(\n math_ops.mod(global_step, trace_every_n_steps), 0)\n train_step_kwargs['logdir'] = logdir\n\n sv = supervisor.Supervisor(\n graph=graph,\n is_chief=is_chief,\n logdir=logdir,\n init_op=init_op,\n init_feed_dict=init_feed_dict,\n local_init_op=local_init_op,\n ready_for_local_init_op=ready_for_local_init_op,\n ready_op=ready_op,\n summary_op=summary_op,\n summary_writer=summary_writer,\n global_step=global_step,\n saver=saver,\n save_summaries_secs=save_summaries_secs,\n save_model_secs=save_interval_secs,\n init_fn=init_fn)\n\n if summary_writer is not None:\n train_step_kwargs['summary_writer'] = sv.summary_writer\n\n should_retry = True\n while should_retry:\n try:\n should_retry = False\n with sv.managed_session(\n master, start_standard_services=False, config=session_config) as sess:\n logging.info('Starting Session.')\n if is_chief:\n if logdir:\n sv.start_standard_services(sess)\n elif startup_delay_steps > 0:\n _wait_for_step(sess, global_step,\n min(startup_delay_steps, number_of_steps or\n sys.maxint))\n threads = sv.start_queue_runners(sess)\n logging.info('Starting Queues.')\n if is_chief and sync_optimizer is not None:\n sv.start_queue_runners(sess, [chief_queue_runner])\n sess.run(init_tokens_op)\n try:\n while not sv.should_stop():\n total_loss, should_stop = train_step_fn(\n sess, train_op, global_step, train_step_kwargs)\n if should_stop:\n logging.info('Stopping Training.')\n sv.request_stop()\n break\n except errors.OutOfRangeError:\n # OutOfRangeError is thrown when epoch limit per\n # tf.train.limit_epochs is reached.\n logging.info('Caught OutOfRangeError. Stopping Training.')\n if logdir and sv.is_chief:\n logging.info('Finished training! Saving model to disk.')\n sv.saver.save(sess, sv.save_path, global_step=sv.global_step)\n sv.stop(threads, close_summary_writer=True)\n\n except errors.AbortedError:\n # Always re-run on AbortedError as it indicates a restart of one of the\n # distributed tensorflow servers.\n logging.info('Retrying training!')\n should_retry = True\n\n return total_loss\n", "# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Takes a generator of values, and accumulates them for a frontend.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\nimport os\nimport re\nimport threading\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom tensorflow.core.protobuf import meta_graph_pb2\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import tensor_util\nfrom tensorflow.tensorboard.backend.event_processing import directory_watcher\nfrom tensorflow.tensorboard.backend.event_processing import event_file_loader\nfrom tensorflow.tensorboard.backend.event_processing import plugin_asset_util\nfrom tensorflow.tensorboard.backend.event_processing import reservoir\n\nnamedtuple = collections.namedtuple\nScalarEvent = namedtuple('ScalarEvent', ['wall_time', 'step', 'value'])\n\nHealthPillEvent = namedtuple('HealthPillEvent', [\n 'wall_time', 'step', 'device_name', 'node_name', 'output_slot', 'dtype',\n 'shape', 'value'])\n\nCompressedHistogramEvent = namedtuple('CompressedHistogramEvent',\n ['wall_time', 'step',\n 'compressed_histogram_values'])\n\nCompressedHistogramValue = namedtuple('CompressedHistogramValue',\n ['basis_point', 'value'])\n\nHistogramEvent = namedtuple('HistogramEvent',\n ['wall_time', 'step', 'histogram_value'])\n\nHistogramValue = namedtuple('HistogramValue', ['min', 'max', 'num', 'sum',\n 'sum_squares', 'bucket_limit',\n 'bucket'])\n\nImageEvent = namedtuple('ImageEvent', ['wall_time', 'step',\n 'encoded_image_string', 'width',\n 'height'])\n\nAudioEvent = namedtuple('AudioEvent', ['wall_time', 'step',\n 'encoded_audio_string', 'content_type',\n 'sample_rate', 'length_frames'])\n\nTensorEvent = namedtuple('TensorEvent', ['wall_time', 'step', 'tensor_proto'])\n\n## Different types of summary events handled by the event_accumulator\nSUMMARY_TYPES = {\n 'simple_value': '_ProcessScalar',\n 'histo': '_ProcessHistogram',\n 'image': '_ProcessImage',\n 'audio': '_ProcessAudio',\n 'tensor': '_ProcessTensor',\n}\n\n## The tagTypes below are just arbitrary strings chosen to pass the type\n## information of the tag from the backend to the frontend\nCOMPRESSED_HISTOGRAMS = 'compressedHistograms'\nHISTOGRAMS = 'histograms'\nIMAGES = 'images'\nAUDIO = 'audio'\nSCALARS = 'scalars'\nTENSORS = 'tensors'\nHEALTH_PILLS = 'health_pills'\nGRAPH = 'graph'\nMETA_GRAPH = 'meta_graph'\nRUN_METADATA = 'run_metadata'\n\n## Normal CDF for std_devs: (-Inf, -1.5, -1, -0.5, 0, 0.5, 1, 1.5, Inf)\n## naturally gives bands around median of width 1 std dev, 2 std dev, 3 std dev,\n## and then the long tail.\nNORMAL_HISTOGRAM_BPS = (0, 668, 1587, 3085, 5000, 6915, 8413, 9332, 10000)\n\nDEFAULT_SIZE_GUIDANCE = {\n COMPRESSED_HISTOGRAMS: 500,\n IMAGES: 4,\n AUDIO: 4,\n SCALARS: 10000,\n # We store this many health pills per op.\n HEALTH_PILLS: 100,\n HISTOGRAMS: 1,\n TENSORS: 10,\n}\n\nSTORE_EVERYTHING_SIZE_GUIDANCE = {\n COMPRESSED_HISTOGRAMS: 0,\n IMAGES: 0,\n AUDIO: 0,\n SCALARS: 0,\n HEALTH_PILLS: 0,\n HISTOGRAMS: 0,\n TENSORS: 0,\n}\n\n# The tag that values containing health pills have. Health pill data is stored\n# in tensors. In order to distinguish health pill values from scalar values, we\n# rely on how health pill values have this special tag value.\nHEALTH_PILL_EVENT_TAG_PREFIX = '__health_pill__/'\n\n\ndef IsTensorFlowEventsFile(path):\n \"\"\"Check the path name to see if it is probably a TF Events file.\n\n Args:\n path: A file path to check if it is an event file.\n\n Raises:\n ValueError: If the path is an empty string.\n\n Returns:\n If path is formatted like a TensorFlowEventsFile.\n \"\"\"\n if not path:\n raise ValueError('Path must be a nonempty string')\n return 'tfevents' in tf.compat.as_str_any(os.path.basename(path))\n\n\nclass EventAccumulator(object):\n \"\"\"An `EventAccumulator` takes an event generator, and accumulates the values.\n\n The `EventAccumulator` is intended to provide a convenient Python interface\n for loading Event data written during a TensorFlow run. TensorFlow writes out\n `Event` protobuf objects, which have a timestamp and step number, and often\n contain a `Summary`. Summaries can have different kinds of data like an image,\n a scalar value, or a histogram. The Summaries also have a tag, which we use to\n organize logically related data. The `EventAccumulator` supports retrieving\n the `Event` and `Summary` data by its tag.\n\n Calling `Tags()` gets a map from `tagType` (e.g. `'images'`,\n `'compressedHistograms'`, `'scalars'`, etc) to the associated tags for those\n data types. Then, various functional endpoints (eg\n `Accumulator.Scalars(tag)`) allow for the retrieval of all data\n associated with that tag.\n\n The `Reload()` method synchronously loads all of the data written so far.\n\n Histograms, audio, and images are very large, so storing all of them is not\n recommended.\n @@Tensors\n \"\"\"\n\n def __init__(self,\n path,\n size_guidance=DEFAULT_SIZE_GUIDANCE,\n compression_bps=NORMAL_HISTOGRAM_BPS,\n purge_orphaned_data=True):\n \"\"\"Construct the `EventAccumulator`.\n\n Args:\n path: A file path to a directory containing tf events files, or a single\n tf events file. The accumulator will load events from this path.\n size_guidance: Information on how much data the EventAccumulator should\n store in memory. The DEFAULT_SIZE_GUIDANCE tries not to store too much\n so as to avoid OOMing the client. The size_guidance should be a map\n from a `tagType` string to an integer representing the number of\n items to keep per tag for items of that `tagType`. If the size is 0,\n all events are stored.\n compression_bps: Information on how the `EventAccumulator` should compress\n histogram data for the `CompressedHistograms` tag (for details see\n `ProcessCompressedHistogram`).\n purge_orphaned_data: Whether to discard any events that were \"orphaned\" by\n a TensorFlow restart.\n \"\"\"\n sizes = {}\n for key in DEFAULT_SIZE_GUIDANCE:\n if key in size_guidance:\n sizes[key] = size_guidance[key]\n else:\n sizes[key] = DEFAULT_SIZE_GUIDANCE[key]\n\n self._first_event_timestamp = None\n self._scalars = reservoir.Reservoir(size=sizes[SCALARS])\n\n # Unlike the other reservoir, the reservoir for health pills is keyed by the\n # name of the op instead of the tag. This lets us efficiently obtain the\n # health pills per node.\n self._health_pills = reservoir.Reservoir(size=sizes[HEALTH_PILLS])\n\n self._graph = None\n self._graph_from_metagraph = False\n self._meta_graph = None\n self._tagged_metadata = {}\n self._histograms = reservoir.Reservoir(size=sizes[HISTOGRAMS])\n self._compressed_histograms = reservoir.Reservoir(\n size=sizes[COMPRESSED_HISTOGRAMS], always_keep_last=False)\n self._images = reservoir.Reservoir(size=sizes[IMAGES])\n self._audio = reservoir.Reservoir(size=sizes[AUDIO])\n self._tensors = reservoir.Reservoir(size=sizes[TENSORS])\n\n self._generator_mutex = threading.Lock()\n self.path = path\n self._generator = _GeneratorFromPath(path)\n\n self._compression_bps = compression_bps\n self.purge_orphaned_data = purge_orphaned_data\n\n self.most_recent_step = -1\n self.most_recent_wall_time = -1\n self.file_version = None\n\n # The attributes that get built up by the accumulator\n self.accumulated_attrs = ('_scalars', '_histograms',\n '_compressed_histograms', '_images', '_audio')\n self._tensor_summaries = {}\n\n def Reload(self):\n \"\"\"Loads all events added since the last call to `Reload`.\n\n If `Reload` was never called, loads all events in the file.\n\n Returns:\n The `EventAccumulator`.\n \"\"\"\n with self._generator_mutex:\n for event in self._generator.Load():\n self._ProcessEvent(event)\n return self\n\n def PluginAssets(self, plugin_name):\n \"\"\"Return a list of all plugin assets for the given plugin.\n\n Args:\n plugin_name: The string name of a plugin to retrieve assets for.\n\n Returns:\n A list of string plugin asset names, or empty list if none are available.\n If the plugin was not registered, an empty list is returned.\n \"\"\"\n return plugin_asset_util.ListAssets(self.path, plugin_name)\n\n def RetrievePluginAsset(self, plugin_name, asset_name):\n \"\"\"Return the contents of a given plugin asset.\n\n Args:\n plugin_name: The string name of a plugin.\n asset_name: The string name of an asset.\n\n Returns:\n The string contents of the plugin asset.\n\n Raises:\n KeyError: If the asset is not available.\n \"\"\"\n return plugin_asset_util.RetrieveAsset(self.path, plugin_name, asset_name)\n\n def FirstEventTimestamp(self):\n \"\"\"Returns the timestamp in seconds of the first event.\n\n If the first event has been loaded (either by this method or by `Reload`,\n this returns immediately. Otherwise, it will load in the first event. Note\n that this means that calling `Reload` will cause this to block until\n `Reload` has finished.\n\n Returns:\n The timestamp in seconds of the first event that was loaded.\n\n Raises:\n ValueError: If no events have been loaded and there were no events found\n on disk.\n \"\"\"\n if self._first_event_timestamp is not None:\n return self._first_event_timestamp\n with self._generator_mutex:\n try:\n event = next(self._generator.Load())\n self._ProcessEvent(event)\n return self._first_event_timestamp\n\n except StopIteration:\n raise ValueError('No event timestamp could be found')\n\n def _ProcessEvent(self, event):\n \"\"\"Called whenever an event is loaded.\"\"\"\n if self._first_event_timestamp is None:\n self._first_event_timestamp = event.wall_time\n\n if event.HasField('file_version'):\n new_file_version = _ParseFileVersion(event.file_version)\n if self.file_version and self.file_version != new_file_version:\n ## This should not happen.\n tf.logging.warn(('Found new file_version for event.proto. This will '\n 'affect purging logic for TensorFlow restarts. '\n 'Old: {0} New: {1}').format(self.file_version,\n new_file_version))\n self.file_version = new_file_version\n\n self._MaybePurgeOrphanedData(event)\n\n ## Process the event.\n # GraphDef and MetaGraphDef are handled in a special way:\n # If no graph_def Event is available, but a meta_graph_def is, and it\n # contains a graph_def, then use the meta_graph_def.graph_def as our graph.\n # If a graph_def Event is available, always prefer it to the graph_def\n # inside the meta_graph_def.\n if event.HasField('graph_def'):\n if self._graph is not None:\n tf.logging.warn(\n ('Found more than one graph event per run, or there was '\n 'a metagraph containing a graph_def, as well as one or '\n 'more graph events. Overwriting the graph with the '\n 'newest event.'))\n self._graph = event.graph_def\n self._graph_from_metagraph = False\n elif event.HasField('meta_graph_def'):\n if self._meta_graph is not None:\n tf.logging.warn(('Found more than one metagraph event per run. '\n 'Overwriting the metagraph with the newest event.'))\n self._meta_graph = event.meta_graph_def\n if self._graph is None or self._graph_from_metagraph:\n # We may have a graph_def in the metagraph. If so, and no\n # graph_def is directly available, use this one instead.\n meta_graph = meta_graph_pb2.MetaGraphDef()\n meta_graph.ParseFromString(self._meta_graph)\n if meta_graph.graph_def:\n if self._graph is not None:\n tf.logging.warn(\n ('Found multiple metagraphs containing graph_defs,'\n 'but did not find any graph events. Overwriting the '\n 'graph with the newest metagraph version.'))\n self._graph_from_metagraph = True\n self._graph = meta_graph.graph_def.SerializeToString()\n elif event.HasField('tagged_run_metadata'):\n tag = event.tagged_run_metadata.tag\n if tag in self._tagged_metadata:\n tf.logging.warn('Found more than one \"run metadata\" event with tag ' +\n tag + '. Overwriting it with the newest event.')\n self._tagged_metadata[tag] = event.tagged_run_metadata.run_metadata\n elif event.HasField('summary'):\n for value in event.summary.value:\n if (value.HasField('tensor') and\n value.tag.startswith(HEALTH_PILL_EVENT_TAG_PREFIX)):\n self._ProcessHealthPillSummary(value, event)\n else:\n for summary_type, summary_func in SUMMARY_TYPES.items():\n if value.HasField(summary_type):\n datum = getattr(value, summary_type)\n tag = value.node_name if summary_type == 'tensor' else value.tag\n getattr(self, summary_func)(tag, event.wall_time, event.step,\n datum)\n\n def _ProcessHealthPillSummary(self, value, event):\n \"\"\"Process summaries containing health pills.\n\n These summaries are distinguished by the fact that they have a Tensor field\n and have a special tag value.\n\n This method emits ERROR-level messages to the logs if it encounters Tensor\n summaries that it cannot process.\n\n Args:\n value: A tf.Summary.Value with a Tensor field.\n event: The tf.Event containing that value.\n \"\"\"\n elements = tensor_util.MakeNdarray(value.tensor)\n\n # The node_name property of the value object is actually a watch key: a\n # combination of node name, output slot, and a suffix. We capture the\n # actual node name and the output slot with a regular expression.\n match = re.match(r'^(.*):(\\d+):DebugNumericSummary$', value.node_name)\n if not match:\n tf.logging.log_first_n(\n tf.logging.ERROR,\n 'Unsupported watch key %s for health pills; skipping this sequence.',\n 1, value.node_name)\n return\n\n node_name = match.group(1)\n output_slot = int(match.group(2))\n device_name = value.tag[len(HEALTH_PILL_EVENT_TAG_PREFIX):]\n self._ProcessHealthPill(event.wall_time, event.step, device_name, node_name,\n output_slot, elements)\n\n def Tags(self):\n \"\"\"Return all tags found in the value stream.\n\n Returns:\n A `{tagType: ['list', 'of', 'tags']}` dictionary.\n \"\"\"\n return {\n IMAGES: self._images.Keys(),\n AUDIO: self._audio.Keys(),\n HISTOGRAMS: self._histograms.Keys(),\n SCALARS: self._scalars.Keys(),\n COMPRESSED_HISTOGRAMS: self._compressed_histograms.Keys(),\n TENSORS: self._tensors.Keys(),\n # Use a heuristic: if the metagraph is available, but\n # graph is not, then we assume the metagraph contains the graph.\n GRAPH: self._graph is not None,\n META_GRAPH: self._meta_graph is not None,\n RUN_METADATA: list(self._tagged_metadata.keys())\n }\n\n def Scalars(self, tag):\n \"\"\"Given a summary tag, return all associated `ScalarEvent`s.\n\n Args:\n tag: A string tag associated with the events.\n\n Raises:\n KeyError: If the tag is not found.\n\n Returns:\n An array of `ScalarEvent`s.\n \"\"\"\n return self._scalars.Items(tag)\n\n def HealthPills(self, node_name):\n \"\"\"Returns all health pill values for a certain node.\n\n Args:\n node_name: The name of the node to obtain health pills for.\n\n Raises:\n KeyError: If the node name is not found.\n\n Returns:\n An array of `HealthPillEvent`s.\n \"\"\"\n return self._health_pills.Items(node_name)\n\n def GetOpsWithHealthPills(self):\n \"\"\"Determines which ops have at least 1 health pill event.\n\n Returns:\n A list of names of ops with at least 1 health pill event.\n \"\"\"\n return self._health_pills.Keys()\n\n def Graph(self):\n \"\"\"Return the graph definition, if there is one.\n\n If the graph is stored directly, return that. If no graph is stored\n directly but a metagraph is stored containing a graph, return that.\n\n Raises:\n ValueError: If there is no graph for this run.\n\n Returns:\n The `graph_def` proto.\n \"\"\"\n graph = tf.GraphDef()\n if self._graph is not None:\n graph.ParseFromString(self._graph)\n return graph\n raise ValueError('There is no graph in this EventAccumulator')\n\n def MetaGraph(self):\n \"\"\"Return the metagraph definition, if there is one.\n\n Raises:\n ValueError: If there is no metagraph for this run.\n\n Returns:\n The `meta_graph_def` proto.\n \"\"\"\n if self._meta_graph is None:\n raise ValueError('There is no metagraph in this EventAccumulator')\n meta_graph = meta_graph_pb2.MetaGraphDef()\n meta_graph.ParseFromString(self._meta_graph)\n return meta_graph\n\n def RunMetadata(self, tag):\n \"\"\"Given a tag, return the associated session.run() metadata.\n\n Args:\n tag: A string tag associated with the event.\n\n Raises:\n ValueError: If the tag is not found.\n\n Returns:\n The metadata in form of `RunMetadata` proto.\n \"\"\"\n if tag not in self._tagged_metadata:\n raise ValueError('There is no run metadata with this tag name')\n\n run_metadata = tf.RunMetadata()\n run_metadata.ParseFromString(self._tagged_metadata[tag])\n return run_metadata\n\n def Histograms(self, tag):\n \"\"\"Given a summary tag, return all associated histograms.\n\n Args:\n tag: A string tag associated with the events.\n\n Raises:\n KeyError: If the tag is not found.\n\n Returns:\n An array of `HistogramEvent`s.\n \"\"\"\n return self._histograms.Items(tag)\n\n def CompressedHistograms(self, tag):\n \"\"\"Given a summary tag, return all associated compressed histograms.\n\n Args:\n tag: A string tag associated with the events.\n\n Raises:\n KeyError: If the tag is not found.\n\n Returns:\n An array of `CompressedHistogramEvent`s.\n \"\"\"\n return self._compressed_histograms.Items(tag)\n\n def Images(self, tag):\n \"\"\"Given a summary tag, return all associated images.\n\n Args:\n tag: A string tag associated with the events.\n\n Raises:\n KeyError: If the tag is not found.\n\n Returns:\n An array of `ImageEvent`s.\n \"\"\"\n return self._images.Items(tag)\n\n def Audio(self, tag):\n \"\"\"Given a summary tag, return all associated audio.\n\n Args:\n tag: A string tag associated with the events.\n\n Raises:\n KeyError: If the tag is not found.\n\n Returns:\n An array of `AudioEvent`s.\n \"\"\"\n return self._audio.Items(tag)\n\n def Tensors(self, tag):\n \"\"\"Given a summary tag, return all associated tensors.\n\n Args:\n tag: A string tag associated with the events.\n\n Raises:\n KeyError: If the tag is not found.\n\n Returns:\n An array of `TensorEvent`s.\n \"\"\"\n return self._tensors.Items(tag)\n\n def _MaybePurgeOrphanedData(self, event):\n \"\"\"Maybe purge orphaned data due to a TensorFlow crash.\n\n When TensorFlow crashes at step T+O and restarts at step T, any events\n written after step T are now \"orphaned\" and will be at best misleading if\n they are included in TensorBoard.\n\n This logic attempts to determine if there is orphaned data, and purge it\n if it is found.\n\n Args:\n event: The event to use as a reference, to determine if a purge is needed.\n \"\"\"\n if not self.purge_orphaned_data:\n return\n ## Check if the event happened after a crash, and purge expired tags.\n if self.file_version and self.file_version >= 2:\n ## If the file_version is recent enough, use the SessionLog enum\n ## to check for restarts.\n self._CheckForRestartAndMaybePurge(event)\n else:\n ## If there is no file version, default to old logic of checking for\n ## out of order steps.\n self._CheckForOutOfOrderStepAndMaybePurge(event)\n\n def _CheckForRestartAndMaybePurge(self, event):\n \"\"\"Check and discard expired events using SessionLog.START.\n\n Check for a SessionLog.START event and purge all previously seen events\n with larger steps, because they are out of date. Because of supervisor\n threading, it is possible that this logic will cause the first few event\n messages to be discarded since supervisor threading does not guarantee\n that the START message is deterministically written first.\n\n This method is preferred over _CheckForOutOfOrderStepAndMaybePurge which\n can inadvertently discard events due to supervisor threading.\n\n Args:\n event: The event to use as reference. If the event is a START event, all\n previously seen events with a greater event.step will be purged.\n \"\"\"\n if event.HasField(\n 'session_log') and event.session_log.status == tf.SessionLog.START:\n self._Purge(event, by_tags=False)\n\n def _CheckForOutOfOrderStepAndMaybePurge(self, event):\n \"\"\"Check for out-of-order event.step and discard expired events for tags.\n\n Check if the event is out of order relative to the global most recent step.\n If it is, purge outdated summaries for tags that the event contains.\n\n Args:\n event: The event to use as reference. If the event is out-of-order, all\n events with the same tags, but with a greater event.step will be purged.\n \"\"\"\n if event.step < self.most_recent_step and event.HasField('summary'):\n self._Purge(event, by_tags=True)\n else:\n self.most_recent_step = event.step\n self.most_recent_wall_time = event.wall_time\n\n def _ConvertHistogramProtoToTuple(self, histo):\n return HistogramValue(min=histo.min,\n max=histo.max,\n num=histo.num,\n sum=histo.sum,\n sum_squares=histo.sum_squares,\n bucket_limit=list(histo.bucket_limit),\n bucket=list(histo.bucket))\n\n def _ProcessHistogram(self, tag, wall_time, step, histo):\n \"\"\"Processes a proto histogram by adding it to accumulated state.\"\"\"\n histo = self._ConvertHistogramProtoToTuple(histo)\n histo_ev = HistogramEvent(wall_time, step, histo)\n self._histograms.AddItem(tag, histo_ev)\n self._compressed_histograms.AddItem(\n tag, histo_ev, lambda x: _CompressHistogram(x, self._compression_bps))\n\n def _ProcessImage(self, tag, wall_time, step, image):\n \"\"\"Processes an image by adding it to accumulated state.\"\"\"\n event = ImageEvent(wall_time=wall_time,\n step=step,\n encoded_image_string=image.encoded_image_string,\n width=image.width,\n height=image.height)\n self._images.AddItem(tag, event)\n\n def _ProcessAudio(self, tag, wall_time, step, audio):\n \"\"\"Processes a audio by adding it to accumulated state.\"\"\"\n event = AudioEvent(wall_time=wall_time,\n step=step,\n encoded_audio_string=audio.encoded_audio_string,\n content_type=audio.content_type,\n sample_rate=audio.sample_rate,\n length_frames=audio.length_frames)\n self._audio.AddItem(tag, event)\n\n def _ProcessScalar(self, tag, wall_time, step, scalar):\n \"\"\"Processes a simple value by adding it to accumulated state.\"\"\"\n sv = ScalarEvent(wall_time=wall_time, step=step, value=scalar)\n self._scalars.AddItem(tag, sv)\n\n def _ProcessTensor(self, tag, wall_time, step, tensor):\n tv = TensorEvent(wall_time=wall_time, step=step, tensor_proto=tensor)\n self._tensors.AddItem(tag, tv)\n\n def _ProcessHealthPill(self, wall_time, step, device_name, node_name,\n output_slot, elements):\n \"\"\"Processes a health pill value by adding it to accumulated state.\n\n Args:\n wall_time: The time at which the health pill was created. Provided by the\n debugger.\n step: The step at which the health pill was created. Provided by the\n debugger.\n device_name: The name of the node's device.\n node_name: The name of the node for this health pill.\n output_slot: The output slot for this health pill.\n elements: An ND array of 20 floats. The elements of the health pill.\n \"\"\"\n # Key by the node name for fast retrieval of health pills by node name. The\n # array is cast to a list so that it is JSON-able. The debugger data plugin\n # serves a JSON response.\n self._health_pills.AddItem(node_name,\n HealthPillEvent(\n wall_time=wall_time,\n step=step,\n device_name=device_name,\n node_name=node_name,\n output_slot=output_slot,\n dtype=repr(dtypes.as_dtype(elements[12])),\n shape=list(elements[14:]),\n value=list(elements)))\n\n def _Purge(self, event, by_tags):\n \"\"\"Purge all events that have occurred after the given event.step.\n\n If by_tags is True, purge all events that occurred after the given\n event.step, but only for the tags that the event has. Non-sequential\n event.steps suggest that a TensorFlow restart occurred, and we discard\n the out-of-order events to display a consistent view in TensorBoard.\n\n Discarding by tags is the safer method, when we are unsure whether a restart\n has occurred, given that threading in supervisor can cause events of\n different tags to arrive with unsynchronized step values.\n\n If by_tags is False, then purge all events with event.step greater than the\n given event.step. This can be used when we are certain that a TensorFlow\n restart has occurred and these events can be discarded.\n\n Args:\n event: The event to use as reference for the purge. All events with\n the same tags, but with a greater event.step will be purged.\n by_tags: Bool to dictate whether to discard all out-of-order events or\n only those that are associated with the given reference event.\n \"\"\"\n ## Keep data in reservoirs that has a step less than event.step\n _NotExpired = lambda x: x.step < event.step\n\n if by_tags:\n\n def _ExpiredPerTag(value):\n return [getattr(self, x).FilterItems(_NotExpired, value.tag)\n for x in self.accumulated_attrs]\n\n expired_per_tags = [_ExpiredPerTag(value)\n for value in event.summary.value]\n expired_per_type = [sum(x) for x in zip(*expired_per_tags)]\n else:\n expired_per_type = [getattr(self, x).FilterItems(_NotExpired)\n for x in self.accumulated_attrs]\n\n if sum(expired_per_type) > 0:\n purge_msg = _GetPurgeMessage(self.most_recent_step,\n self.most_recent_wall_time, event.step,\n event.wall_time, *expired_per_type)\n tf.logging.warn(purge_msg)\n\n\ndef _GetPurgeMessage(most_recent_step, most_recent_wall_time, event_step,\n event_wall_time, num_expired_scalars, num_expired_histos,\n num_expired_comp_histos, num_expired_images,\n num_expired_audio):\n \"\"\"Return the string message associated with TensorBoard purges.\"\"\"\n return ('Detected out of order event.step likely caused by '\n 'a TensorFlow restart. Purging expired events from Tensorboard'\n ' display between the previous step: {} (timestamp: {}) and '\n 'current step: {} (timestamp: {}). Removing {} scalars, {} '\n 'histograms, {} compressed histograms, {} images, '\n 'and {} audio.').format(most_recent_step, most_recent_wall_time,\n event_step, event_wall_time,\n num_expired_scalars, num_expired_histos,\n num_expired_comp_histos, num_expired_images,\n num_expired_audio)\n\n\ndef _GeneratorFromPath(path):\n \"\"\"Create an event generator for file or directory at given path string.\"\"\"\n if not path:\n raise ValueError('path must be a valid string')\n if IsTensorFlowEventsFile(path):\n return event_file_loader.EventFileLoader(path)\n else:\n return directory_watcher.DirectoryWatcher(\n path, event_file_loader.EventFileLoader, IsTensorFlowEventsFile)\n\n\ndef _ParseFileVersion(file_version):\n \"\"\"Convert the string file_version in event.proto into a float.\n\n Args:\n file_version: String file_version from event.proto\n\n Returns:\n Version number as a float.\n \"\"\"\n tokens = file_version.split('brain.Event:')\n try:\n return float(tokens[-1])\n except ValueError:\n ## This should never happen according to the definition of file_version\n ## specified in event.proto.\n tf.logging.warn(\n ('Invalid event.proto file_version. Defaulting to use of '\n 'out-of-order event.step logic for purging expired events.'))\n return -1\n\n\ndef _CompressHistogram(histo_ev, bps):\n \"\"\"Creates fixed size histogram by adding compression to accumulated state.\n\n This routine transforms a histogram at a particular step by linearly\n interpolating its variable number of buckets to represent their cumulative\n weight at a constant number of compression points. This significantly reduces\n the size of the histogram and makes it suitable for a two-dimensional area\n plot where the output of this routine constitutes the ranges for a single x\n coordinate.\n\n Args:\n histo_ev: A HistogramEvent namedtuple.\n bps: Compression points represented in basis points, 1/100ths of a percent.\n\n Returns:\n CompressedHistogramEvent namedtuple.\n \"\"\"\n # See also: Histogram::Percentile() in core/lib/histogram/histogram.cc\n histo = histo_ev.histogram_value\n if not histo.num:\n return CompressedHistogramEvent(\n histo_ev.wall_time,\n histo_ev.step,\n [CompressedHistogramValue(b, 0.0) for b in bps])\n bucket = np.array(histo.bucket)\n weights = (bucket * bps[-1] / (bucket.sum() or 1.0)).cumsum()\n values = []\n j = 0\n while j < len(bps):\n i = np.searchsorted(weights, bps[j], side='right')\n while i < len(weights):\n cumsum = weights[i]\n cumsum_prev = weights[i - 1] if i > 0 else 0.0\n if cumsum == cumsum_prev: # prevent remap divide by zero\n i += 1\n continue\n if not i or not cumsum_prev:\n lhs = histo.min\n else:\n lhs = max(histo.bucket_limit[i - 1], histo.min)\n rhs = min(histo.bucket_limit[i], histo.max)\n weight = _Remap(bps[j], cumsum_prev, cumsum, lhs, rhs)\n values.append(CompressedHistogramValue(bps[j], weight))\n j += 1\n break\n else:\n break\n while j < len(bps):\n values.append(CompressedHistogramValue(bps[j], histo.max))\n j += 1\n return CompressedHistogramEvent(histo_ev.wall_time, histo_ev.step, values)\n\n\ndef _Remap(x, x0, x1, y0, y1):\n \"\"\"Linearly map from [x0, x1] unto [y0, y1].\"\"\"\n return y0 + (x - x0) * float(y1 - y0) / (x1 - x0)\n", "# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"A utility function for importing TensorFlow graphs.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport contextlib\nimport copy\n\nfrom tensorflow.core.framework import attr_value_pb2\nfrom tensorflow.core.framework import graph_pb2\nfrom tensorflow.core.framework import types_pb2\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import function\nfrom tensorflow.python.framework import op_def_registry\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import tensor_shape\nfrom tensorflow.python.util import compat\n\n\n# TODO(josh11b): SWIG the code from node_def_util instead of duplicating\n# the logic here.\ndef _GetNodeAttr(node_def, attr_name):\n if attr_name not in node_def.attr:\n raise ValueError('Expected one attr with name %r in %s.'\n % (attr_name, str(node_def)))\n return node_def.attr[attr_name]\n\n\ndef _ArgToTypesNoRef(node_def, arg_def):\n if arg_def.number_attr:\n repeats = _GetNodeAttr(node_def, arg_def.number_attr).i\n if arg_def.type_attr:\n dtype = _GetNodeAttr(node_def, arg_def.type_attr).type\n else:\n assert arg_def.type != types_pb2.DT_INVALID\n dtype = arg_def.type\n return [dtype] * repeats\n elif arg_def.type_attr:\n return [_GetNodeAttr(node_def, arg_def.type_attr).type]\n elif arg_def.type_list_attr:\n return _GetNodeAttr(node_def, arg_def.type_list_attr).list.type\n else:\n assert arg_def.type != types_pb2.DT_INVALID\n return [arg_def.type]\n\n\ndef _SingleArgToTypes(node_def, arg_def):\n types = _ArgToTypesNoRef(node_def, arg_def)\n if arg_def.is_ref:\n return [dtypes.as_dtype(dt)._as_ref.as_datatype_enum for dt in types] # pylint: disable=protected-access\n return types\n\n\ndef _ArgsToTypes(node_def, arg_list):\n types = []\n for arg_def in arg_list:\n types.extend(_SingleArgToTypes(node_def, arg_def))\n return types\n\n\ndef _InputTypes(node_def, op_dict):\n op_def = op_dict[node_def.op]\n return _ArgsToTypes(node_def, op_def.input_arg)\n\n\ndef _OutputTypes(node_def, op_dict):\n op_def = op_dict[node_def.op]\n return _ArgsToTypes(node_def, op_def.output_arg)\n\n\ndef _IsControlInput(input_name):\n # Expected format: '^operation_name' (control input).\n return input_name.startswith('^')\n\n\ndef _ParseTensorName(tensor_name):\n \"\"\"Parses a tensor name into an operation name and output index.\n\n This function will canonicalize tensor names as follows:\n\n * \"foo:0\" -> (\"foo\", 0)\n * \"foo:7\" -> (\"foo\", 7)\n * \"foo\" -> (\"foo\", 0)\n * \"foo:bar:baz\" -> ValueError\n\n Args:\n tensor_name: The name of a tensor.\n\n Returns:\n A tuple containing the operation name, and the output index.\n\n Raises:\n ValueError: If `tensor_name' cannot be interpreted as the name of a tensor.\n \"\"\"\n components = tensor_name.split(':')\n if len(components) == 2:\n # Expected format: 'operation_name:output_index'.\n try:\n output_index = int(components[1])\n except ValueError:\n raise ValueError('Cannot convert %r to a tensor name.' % (tensor_name,))\n return components[0], output_index\n elif len(components) == 1:\n # Expected format: 'operation_name' (implicit 0th output).\n return components[0], 0\n else:\n raise ValueError('Cannot convert %r to a tensor name.' % (tensor_name,))\n\n\ndef _CanonicalInputName(input_name):\n input_name = compat.as_str(input_name)\n if _IsControlInput(input_name):\n return input_name\n input_op_name, output_index = _ParseTensorName(input_name)\n return '%s:%d' % (input_op_name, output_index)\n\n\ndef _InvalidNodeMessage(node, message):\n return 'graph_def is invalid at node %r: %s.' % (node.name, message)\n\n\[email protected]\ndef _MaybeDevice(device):\n \"\"\"Applies the given device only if device is not None or empty.\"\"\"\n if device:\n with ops.device(device):\n yield\n else:\n yield\n\n\ndef _FindAttrInOpDef(attr_name, op_def):\n for attr_def in op_def.attr:\n if attr_name == attr_def.name:\n return attr_def\n return None\n\n\ndef import_graph_def(graph_def, input_map=None, return_elements=None,\n name=None, op_dict=None, producer_op_list=None):\n \"\"\"Imports the graph from `graph_def` into the current default `Graph`.\n\n This function provides a way to import a serialized TensorFlow\n [`GraphDef`](https://www.tensorflow.org/code/tensorflow/core/framework/graph.proto)\n protocol buffer, and extract individual objects in the `GraphDef` as\n @{tf.Tensor} and @{tf.Operation} objects. Once extracted,\n these objects are placed into the current default `Graph`. See\n @{tf.Graph.as_graph_def} for a way to create a `GraphDef`\n proto.\n\n Args:\n graph_def: A `GraphDef` proto containing operations to be imported into\n the default graph.\n input_map: A dictionary mapping input names (as strings) in `graph_def`\n to `Tensor` objects. The values of the named input tensors in the\n imported graph will be re-mapped to the respective `Tensor` values.\n return_elements: A list of strings containing operation names in\n `graph_def` that will be returned as `Operation` objects; and/or\n tensor names in `graph_def` that will be returned as `Tensor` objects.\n name: (Optional.) A prefix that will be prepended to the names in\n `graph_def`. Note that this does not apply to imported function names.\n Defaults to `\"import\"`.\n op_dict: (Optional.) A dictionary mapping op type names to `OpDef` protos.\n Must contain an `OpDef` proto for each op type named in `graph_def`.\n If omitted, uses the `OpDef` protos registered in the global registry.\n producer_op_list: (Optional.) An `OpList` proto with the (possibly stripped)\n list of `OpDef`s used by the producer of the graph. If provided, attrs\n for ops in `graph_def` that are not in `op_dict` that have their default\n value according to `producer_op_list` will be removed. This will allow\n some more `GraphDef`s produced by later binaries to be accepted by\n earlier binaries.\n\n Returns:\n A list of `Operation` and/or `Tensor` objects from the imported graph,\n corresponding to the names in `return_elements`.\n\n Raises:\n TypeError: If `graph_def` is not a `GraphDef` proto,\n `input_map` is not a dictionary mapping strings to `Tensor` objects,\n or `return_elements` is not a list of strings.\n ValueError: If `input_map`, or `return_elements` contains names that\n do not appear in `graph_def`, or `graph_def` is not well-formed (e.g.\n it refers to an unknown tensor).\n \"\"\"\n # Type checks for inputs.\n if not isinstance(graph_def, graph_pb2.GraphDef):\n # `graph_def` could be a dynamically-created message, so try a duck-typed\n # approach\n try:\n old_graph_def = graph_def\n graph_def = graph_pb2.GraphDef()\n graph_def.MergeFrom(old_graph_def)\n except TypeError:\n raise TypeError('graph_def must be a GraphDef proto.')\n if input_map is None:\n input_map = {}\n else:\n if not (isinstance(input_map, dict)\n and all(isinstance(k, compat.bytes_or_text_types)\n for k in input_map.keys())):\n raise TypeError('input_map must be a dictionary mapping strings to '\n 'Tensor objects.')\n if return_elements is not None:\n return_elements = tuple(return_elements)\n if not all(isinstance(x, compat.bytes_or_text_types)\n for x in return_elements):\n raise TypeError('return_elements must be a list of strings.')\n\n # Use a canonical representation for all tensor names.\n input_map = {_CanonicalInputName(k): v for k, v in input_map.items()}\n used_input_keys = set()\n\n name_to_op = {}\n\n if op_dict is None:\n op_dict = op_def_registry.get_registered_ops()\n\n if producer_op_list is None:\n producer_op_dict = None\n else:\n producer_op_dict = {op.name: op for op in producer_op_list.op}\n\n g = ops.get_default_graph()\n\n # Add any functions defined in `graph_def` to `g`\n if graph_def.library and graph_def.library.function:\n # Copy op_dict so we don't clobber the original\n op_dict = copy.copy(op_dict)\n # pylint: disable=protected-access\n # Note that we do not prepend `name` to the function name. The reasoning is\n # that function names are similar to op definition names, which currently do\n # not have a scoped name or namespace scheme.\n functions = function._from_library(graph_def.library)\n for f in functions:\n f.add_to_graph(g)\n op_dict[f.name] = f.definition.signature\n # pylint: enable=protected-access\n\n # LINT.IfChange\n with ops.name_scope(name, 'import', input_map.values()) as scope:\n # TODO(ashankar): Should this just copy over or should it do some\n # more nuanced merging? For example, the graph may already have some\n # marked \"bad versions\" and we don't want to lose those because of\n # what's in graph_def.versions? The C++ ImporGraphDef does something\n # more nuanced.\n g.graph_def_versions.CopyFrom(graph_def.versions)\n\n if not all(isinstance(v, ops.Tensor) for v in input_map.values()):\n if not scope:\n # The caller must have passed `name=''`.\n raise ValueError(\n 'tf.import_graph_def() requires a non-empty `name` if `input_map` '\n 'contains non-Tensor values. Try calling tf.convert_to_tensor() on '\n '`input_map` values before calling tf.import_graph_def().')\n with ops.name_scope('_inputs'):\n input_map = {k: ops.convert_to_tensor(v) for k, v in input_map.items()}\n\n # NOTE(mrry): We do this in two passes, because there may be a cycle in\n # `graph_def`.\n\n # 1. Add operations without their inputs.\n for node in graph_def.node:\n # Check to see if this op's name matches a previously seen op\n if node.name in name_to_op:\n raise ValueError('Duplicate name \\'%s\\' in GraphDef.' % node.name)\n # Set any default attr values that aren't present.\n if node.op not in op_dict:\n raise ValueError('No op named %s in defined operations.' % node.op)\n op_def = op_dict[node.op]\n for attr_def in op_def.attr:\n key = attr_def.name\n if attr_def.HasField('default_value'):\n value = node.attr[key]\n if value is None or value.WhichOneof('value') is None:\n node.attr[key].CopyFrom(attr_def.default_value)\n if producer_op_dict:\n # Remove any default attr values that aren't in op_def.\n if node.op in producer_op_dict:\n producer_op_def = producer_op_dict[node.op]\n # We make a copy of node.attr to iterate through since we\n # may modify node.attr inside the loop.\n for key in list(node.attr):\n if _FindAttrInOpDef(key, op_def) is None:\n # No attr_def in consumer, look in producer.\n attr_def = _FindAttrInOpDef(key, producer_op_def)\n if (attr_def and attr_def.HasField('default_value') and\n node.attr[key] == attr_def.default_value):\n # Unknown attr had default value in producer, delete it\n # so it can be understood by consumer.\n del node.attr[key]\n\n output_types = _OutputTypes(node, op_dict)\n name_to_op[node.name] = g.create_op(\n node.op, [], output_types, name=node.name, attrs=node.attr,\n compute_shapes=False, compute_device=False,\n op_def=op_def)\n\n # 2. Add inputs to the operations.\n for node in graph_def.node:\n op = name_to_op[node.name]\n input_types = _InputTypes(node, op_dict)\n\n # Rewrite the colocation attributes in the graph, since the\n # names of new ops may have changed.\n for key, value in op.node_def.attr.items():\n if key == '_class':\n class_values = value.list\n new_class_values = []\n for class_value in class_values.s:\n if class_value.startswith(b'loc:@'):\n op_to_bind_to = class_value[5:].decode()\n # Find the op by its original name.\n if op_to_bind_to not in name_to_op:\n raise ValueError('Specified colocation to an op that '\n 'does not exist during import: %s in %s' % (\n op_to_bind_to, node.name))\n original_op = name_to_op[op_to_bind_to]\n new_class_values.append(compat.as_bytes(\n 'loc:@' + original_op.name))\n else:\n new_class_values.append(class_value)\n value.list.CopyFrom(attr_value_pb2.AttrValue.ListValue(\n s=new_class_values))\n\n # NOTE(mrry): We cannot use zip here because control inputs do not appear\n # in the list of input_types.\n for i, input_name in enumerate(\n [_CanonicalInputName(x) for x in node.input]):\n\n if _IsControlInput(input_name):\n # (a) Input is a control input that should be taken from an op\n # in \"graph_def\".\n try:\n source_op = name_to_op[input_name[1:]]\n except KeyError:\n raise ValueError(\n _InvalidNodeMessage(\n node,\n 'Control input %r not found in graph_def.' % (input_name,)))\n # pylint: disable=protected-access\n op._add_control_input(source_op)\n # pylint: enable=protected-access\n\n else:\n try:\n input_type = input_types[i]\n except IndexError:\n raise ValueError(_InvalidNodeMessage(\n node, 'More inputs specified (%r) than the op expects.'\n % (input_name,)))\n\n if input_name in input_map:\n # (b) Input should be replaced by a tensor from the caller.\n source_tensor = input_map[input_name]\n used_input_keys.add(input_name)\n\n else:\n # (c) Input should be taken from an op in `graph_def`.\n operation_name, output_index = _ParseTensorName(input_name)\n try:\n source_op = name_to_op[operation_name]\n source_tensor = list(source_op.values())[output_index]\n except (KeyError, IndexError):\n raise ValueError(\n _InvalidNodeMessage(\n node,\n 'Input tensor %r not found in graph_def.'\n % (input_name,)))\n\n try:\n # pylint: disable=protected-access\n op._add_input(source_tensor, dtype=input_type)\n # pylint: enable=protected-access\n except TypeError as te:\n raise ValueError(_InvalidNodeMessage(\n node, 'Input tensor %r %s' % (input_name, te)))\n\n # pylint: disable=protected-access\n if op._input_dtypes != input_types:\n raise ValueError(\n _InvalidNodeMessage(\n node,\n 'Input types mismatch (expected %r but got %r)'\n % (', '.join(dtypes.as_dtype(x).name for x in input_types),\n ', '.join(x.name for x in op._input_dtypes))))\n # pylint: enable=protected-access\n\n if not g._is_function(op.type): # pylint: disable=protected-access\n # Execute shape inference for this op.\n # NOTE(mrry): If the graph contains a cycle, the full shape information\n # may not be available for this op's inputs.\n ops.set_shapes_for_outputs(op)\n # For nodes with _output_shapes set, set the output shapes.\n if '_output_shapes' in op.node_def.attr:\n for i, output in enumerate(op.outputs):\n dims = op.node_def.attr['_output_shapes'].list.shape[i]\n output_shape = tensor_shape.TensorShape(\n None if dims.unknown_rank else\n [dim.size if dim.size >= 0 else None for dim in dims.dim])\n\n try:\n output.set_shape(output_shape)\n except ValueError as e:\n # If the output shape is incompatible with what is inferred\n # by the graph for a very specific whitelist of ops, then we\n # ignore this output shape. This can happen if there is a\n # bug in the shape function for some operation, and the\n # serialized graph def has the incorrect shape set when\n # running on a newer binary with the fixed shape function.\n # This is an escape hatch that allows us to correct shape\n # functions that are not critical to correct execution but\n # would cause graphs to fail if imported after correcting.\n #\n # This can be removed after 2017/03/08.\n if op.type in ['RandomShuffleQueue', 'PaddingFIFOQueue',\n 'FIFOQueue', 'PriorityQueue', 'QueueSize',\n 'Stack', 'Barrier', 'BarrierReadySize',\n 'BarrierIncompleteSize', 'HashTable',\n 'MutableHashTable',\n 'MutableHashTableOfTensors', 'Mutex',\n 'CuckooTable', 'IndexTable',\n 'WholeFileReader', 'TextLineReader',\n 'FixedLengthRecordReader',\n 'TFRecordReader', 'IdentityReader',\n 'RefSwitch', 'RefEnter', 'RefNextIteration',\n 'RefMerge', 'RefIdentity']:\n pass\n elif op.type in [\n 'ConditionalAccumulator', 'SparseConditionalAccumulator',\n 'Table'\n ]:\n # This can be removed after 2017/04/24.\n pass\n else:\n raise e\n\n del op.node_def.attr['_output_shapes']\n\n # Apply device functions for this op.\n # NOTE(mrry): We do this after configuring the inputs, because\n # the result of the device functions may depend on the inputs.\n with _MaybeDevice(node.device):\n g._apply_device_functions(op) # pylint: disable=protected-access\n\n # Treat unused input mappings as an error, because they are likely to be\n # due to a typo.\n unused_input_keys = frozenset(input_map.keys()).difference(used_input_keys)\n if unused_input_keys:\n raise ValueError(\n 'Attempted to map inputs that were not found in graph_def: [%s]'\n % ', '.join(unused_input_keys))\n\n if return_elements is None:\n return None\n else:\n ret = []\n for name in return_elements:\n name = compat.as_str(name)\n if ':' in name:\n try:\n operation_name, output_index = _ParseTensorName(name)\n ret.append(name_to_op[operation_name].outputs[output_index])\n except (ValueError, KeyError, IndexError):\n raise ValueError(\n 'Requested return_element %r not found in graph_def.' % name)\n else:\n try:\n ret.append(name_to_op[name])\n except KeyError:\n raise ValueError(\n 'Requested return_element %r not found in graph_def.' % name)\n return ret\n # LINT.ThenChange(//tensorflow/core/graph/graph_constructor.cc)\n", "# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Momentum for TensorFlow.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.training import optimizer\nfrom tensorflow.python.training import training_ops\n\n\nclass MomentumOptimizer(optimizer.Optimizer):\n \"\"\"Optimizer that implements the Momentum algorithm.\n\n Computes (if `use_nesterov = False`):\n \n ```\n accumulation = momentum * accumulation + gradient\n variable -= learning_rate * accumulation\n ```\n\n Note that in the dense version of this algorithm, `accumulation` is updated\n and applied regardless of a gradient's value, whereas the sparse version (when\n the gradient is an `IndexedSlices`, typically because of `tf.gather` or an\n embedding) only updates variable slices and corresponding `accumulation` terms\n when that part of the variable was used in the forward pass.\n \"\"\"\n\n def __init__(self, learning_rate, momentum,\n use_locking=False, name=\"Momentum\", use_nesterov=False):\n \"\"\"Construct a new Momentum optimizer.\n\n Args:\n learning_rate: A `Tensor` or a floating point value. The learning rate.\n momentum: A `Tensor` or a floating point value. The momentum.\n use_locking: If `True` use locks for update operations.\n name: Optional name prefix for the operations created when applying\n gradients. Defaults to \"Momentum\".\n use_nesterov: If `True` use Nesterov Momentum.\n See [Sutskever et. al., 2013](\n http://jmlr.org/proceedings/papers/v28/sutskever13.pdf)\n\n \"\"\"\n super(MomentumOptimizer, self).__init__(use_locking, name)\n self._learning_rate = learning_rate\n self._momentum = momentum\n self._use_nesterov = use_nesterov\n\n def _create_slots(self, var_list):\n for v in var_list:\n self._zeros_slot(v, \"momentum\", self._name)\n\n def _prepare(self):\n self._learning_rate_tensor = ops.convert_to_tensor(self._learning_rate,\n name=\"learning_rate\")\n self._momentum_tensor = ops.convert_to_tensor(self._momentum,\n name=\"momentum\")\n\n def _apply_dense(self, grad, var):\n mom = self.get_slot(var, \"momentum\")\n return training_ops.apply_momentum(\n var, mom,\n math_ops.cast(self._learning_rate_tensor, var.dtype.base_dtype),\n grad,\n math_ops.cast(self._momentum_tensor, var.dtype.base_dtype),\n use_locking=self._use_locking,\n use_nesterov=self._use_nesterov).op\n\n def _resource_apply_dense(self, grad, var):\n mom = self.get_slot(var, \"momentum\")\n return training_ops.resource_apply_momentum(\n var.handle, mom.handle,\n math_ops.cast(self._learning_rate_tensor, grad.dtype.base_dtype),\n grad,\n math_ops.cast(self._momentum_tensor, grad.dtype.base_dtype),\n use_locking=self._use_locking,\n use_nesterov=self._use_nesterov)\n\n def _apply_sparse(self, grad, var):\n mom = self.get_slot(var, \"momentum\")\n return training_ops.sparse_apply_momentum(\n var, mom,\n math_ops.cast(self._learning_rate_tensor, var.dtype.base_dtype),\n grad.values, grad.indices,\n math_ops.cast(self._momentum_tensor, var.dtype.base_dtype),\n use_locking=self._use_locking,\n use_nesterov=self._use_nesterov).op\n\n def _resource_apply_sparse(self, grad, var, indices):\n mom = self.get_slot(var, \"momentum\")\n return training_ops.resource_sparse_apply_momentum(\n var.handle, mom.handle,\n math_ops.cast(self._learning_rate_tensor, grad.dtype),\n grad, indices,\n math_ops.cast(self._momentum_tensor, grad.dtype),\n use_locking=self._use_locking,\n use_nesterov=self._use_nesterov)\n", "# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport shutil\n\nimport tensorflow as tf\n\nfrom tensorflow.tensorboard.backend.event_processing import event_file_inspector as efi\n\n\nclass EventFileInspectorTest(tf.test.TestCase):\n\n def setUp(self):\n self.logdir = os.path.join(self.get_temp_dir(), 'tfevents')\n self._MakeDirectoryIfNotExists(self.logdir)\n\n def tearDown(self):\n shutil.rmtree(self.logdir)\n\n def _MakeDirectoryIfNotExists(self, path):\n if not os.path.exists(path):\n os.mkdir(path)\n\n def _WriteScalarSummaries(self, data, subdirs=('',)):\n # Writes data to a tempfile in subdirs, and returns generator for the data.\n # If subdirs is given, writes data identically to all subdirectories.\n for subdir_ in subdirs:\n subdir = os.path.join(self.logdir, subdir_)\n self._MakeDirectoryIfNotExists(subdir)\n\n sw = tf.summary.FileWriter(subdir)\n for datum in data:\n summary = tf.Summary()\n if 'simple_value' in datum:\n summary.value.add(tag=datum['tag'],\n simple_value=datum['simple_value'])\n sw.add_summary(summary, global_step=datum['step'])\n elif 'histo' in datum:\n summary.value.add(tag=datum['tag'], histo=tf.HistogramProto())\n sw.add_summary(summary, global_step=datum['step'])\n elif 'session_log' in datum:\n sw.add_session_log(datum['session_log'], global_step=datum['step'])\n sw.close()\n\n def testEmptyLogdir(self):\n # Nothing was written to logdir\n units = efi.get_inspection_units(self.logdir)\n self.assertEqual([], units)\n\n def testGetAvailableTags(self):\n data = [{'tag': 'c', 'histo': 2, 'step': 10},\n {'tag': 'c', 'histo': 2, 'step': 11},\n {'tag': 'c', 'histo': 2, 'step': 9},\n {'tag': 'b', 'simple_value': 2, 'step': 20},\n {'tag': 'b', 'simple_value': 2, 'step': 15},\n {'tag': 'a', 'simple_value': 2, 'step': 3}]\n self._WriteScalarSummaries(data)\n units = efi.get_inspection_units(self.logdir)\n tags = efi.get_unique_tags(units[0].field_to_obs)\n self.assertEqual(['a', 'b'], tags['scalars'])\n self.assertEqual(['c'], tags['histograms'])\n\n def testInspectAll(self):\n data = [{'tag': 'c', 'histo': 2, 'step': 10},\n {'tag': 'c', 'histo': 2, 'step': 11},\n {'tag': 'c', 'histo': 2, 'step': 9},\n {'tag': 'b', 'simple_value': 2, 'step': 20},\n {'tag': 'b', 'simple_value': 2, 'step': 15},\n {'tag': 'a', 'simple_value': 2, 'step': 3}]\n self._WriteScalarSummaries(data)\n units = efi.get_inspection_units(self.logdir)\n printable = efi.get_dict_to_print(units[0].field_to_obs)\n self.assertEqual(printable['histograms']['max_step'], 11)\n self.assertEqual(printable['histograms']['min_step'], 9)\n self.assertEqual(printable['histograms']['num_steps'], 3)\n self.assertEqual(printable['histograms']['last_step'], 9)\n self.assertEqual(printable['histograms']['first_step'], 10)\n self.assertEqual(printable['histograms']['outoforder_steps'], [(11, 9)])\n\n self.assertEqual(printable['scalars']['max_step'], 20)\n self.assertEqual(printable['scalars']['min_step'], 3)\n self.assertEqual(printable['scalars']['num_steps'], 3)\n self.assertEqual(printable['scalars']['last_step'], 3)\n self.assertEqual(printable['scalars']['first_step'], 20)\n self.assertEqual(printable['scalars']['outoforder_steps'], [(20, 15),\n (15, 3)])\n\n def testInspectTag(self):\n data = [{'tag': 'c', 'histo': 2, 'step': 10},\n {'tag': 'c', 'histo': 2, 'step': 11},\n {'tag': 'c', 'histo': 2, 'step': 9},\n {'tag': 'b', 'histo': 2, 'step': 20},\n {'tag': 'b', 'simple_value': 2, 'step': 15},\n {'tag': 'a', 'simple_value': 2, 'step': 3}]\n self._WriteScalarSummaries(data)\n units = efi.get_inspection_units(self.logdir, tag='c')\n printable = efi.get_dict_to_print(units[0].field_to_obs)\n self.assertEqual(printable['histograms']['max_step'], 11)\n self.assertEqual(printable['histograms']['min_step'], 9)\n self.assertEqual(printable['histograms']['num_steps'], 3)\n self.assertEqual(printable['histograms']['last_step'], 9)\n self.assertEqual(printable['histograms']['first_step'], 10)\n self.assertEqual(printable['histograms']['outoforder_steps'], [(11, 9)])\n self.assertEqual(printable['scalars'], None)\n\n def testSessionLogSummaries(self):\n data = [\n {\n 'session_log': tf.SessionLog(status=tf.SessionLog.START),\n 'step': 0\n },\n {\n 'session_log': tf.SessionLog(status=tf.SessionLog.CHECKPOINT),\n 'step': 1\n },\n {\n 'session_log': tf.SessionLog(status=tf.SessionLog.CHECKPOINT),\n 'step': 2\n },\n {\n 'session_log': tf.SessionLog(status=tf.SessionLog.CHECKPOINT),\n 'step': 3\n },\n {\n 'session_log': tf.SessionLog(status=tf.SessionLog.STOP),\n 'step': 4\n },\n {\n 'session_log': tf.SessionLog(status=tf.SessionLog.START),\n 'step': 5\n },\n {\n 'session_log': tf.SessionLog(status=tf.SessionLog.STOP),\n 'step': 6\n },\n ]\n\n self._WriteScalarSummaries(data)\n units = efi.get_inspection_units(self.logdir)\n self.assertEqual(1, len(units))\n printable = efi.get_dict_to_print(units[0].field_to_obs)\n self.assertEqual(printable['sessionlog:start']['steps'], [0, 5])\n self.assertEqual(printable['sessionlog:stop']['steps'], [4, 6])\n self.assertEqual(printable['sessionlog:checkpoint']['num_steps'], 3)\n\n def testInspectAllWithNestedLogdirs(self):\n data = [{'tag': 'c', 'simple_value': 2, 'step': 10},\n {'tag': 'c', 'simple_value': 2, 'step': 11},\n {'tag': 'c', 'simple_value': 2, 'step': 9},\n {'tag': 'b', 'simple_value': 2, 'step': 20},\n {'tag': 'b', 'simple_value': 2, 'step': 15},\n {'tag': 'a', 'simple_value': 2, 'step': 3}]\n\n subdirs = ['eval', 'train']\n self._WriteScalarSummaries(data, subdirs=subdirs)\n units = efi.get_inspection_units(self.logdir)\n self.assertEqual(2, len(units))\n directory_names = [os.path.join(self.logdir, name) for name in subdirs]\n self.assertEqual(directory_names, sorted([unit.name for unit in units]))\n\n for unit in units:\n printable = efi.get_dict_to_print(unit.field_to_obs)['scalars']\n self.assertEqual(printable['max_step'], 20)\n self.assertEqual(printable['min_step'], 3)\n self.assertEqual(printable['num_steps'], 6)\n self.assertEqual(printable['last_step'], 3)\n self.assertEqual(printable['first_step'], 10)\n self.assertEqual(printable['outoforder_steps'], [(11, 9), (20, 15),\n (15, 3)])\n\nif __name__ == '__main__':\n tf.test.main()\n" ]
[ [ "tensorflow.core.protobuf.config_pb2.RunMetadata", "tensorflow.python.ops.math_ops.greater_equal", "tensorflow.core.protobuf.config_pb2.RunOptions", "tensorflow.python.summary.summary.histogram", "tensorflow.python.training.training_util.global_step", "tensorflow.python.client.timeline.Timeline", "tensorflow.python.ops.variables.report_uninitialized_variables", "tensorflow.python.framework.ops.IndexedSlices", "tensorflow.python.summary.summary.merge_all", "tensorflow.python.framework.ops.control_dependencies", "tensorflow.contrib.framework.python.ops.variables.get_or_create_global_step", "tensorflow.python.ops.lookup_ops.tables_initializer", "tensorflow.python.ops.clip_ops.clip_by_norm", "tensorflow.contrib.training.python.training.training.create_train_op", "tensorflow.python.ops.clip_ops.global_norm", "tensorflow.python.lib.io.file_io.write_string_to_file", "tensorflow.python.platform.tf_logging.info", "tensorflow.python.ops.math_ops.mod", "tensorflow.python.training.supervisor.Supervisor", "tensorflow.python.framework.ops.get_default_graph", "tensorflow.python.framework.ops.name_scope", "tensorflow.python.ops.variables.global_variables_initializer", "tensorflow.python.ops.variables.local_variables_initializer", "tensorflow.python.training.saver.Saver", "tensorflow.python.framework.constant_op.constant" ], [ "tensorflow.tensorboard.backend.event_processing.reservoir.Reservoir", "tensorflow.python.framework.tensor_util.MakeNdarray", "tensorflow.core.protobuf.meta_graph_pb2.MetaGraphDef", "tensorflow.tensorboard.backend.event_processing.plugin_asset_util.ListAssets", "tensorflow.tensorboard.backend.event_processing.event_file_loader.EventFileLoader", "tensorflow.RunMetadata", "tensorflow.logging.log_first_n", "tensorflow.logging.warn", "tensorflow.tensorboard.backend.event_processing.directory_watcher.DirectoryWatcher", "tensorflow.python.framework.dtypes.as_dtype", "numpy.searchsorted", "tensorflow.GraphDef", "numpy.array", "tensorflow.tensorboard.backend.event_processing.plugin_asset_util.RetrieveAsset" ], [ "tensorflow.python.framework.tensor_shape.TensorShape", "tensorflow.python.util.compat.as_bytes", "tensorflow.python.framework.op_def_registry.get_registered_ops", "tensorflow.python.framework.ops.get_default_graph", "tensorflow.python.framework.ops.name_scope", "tensorflow.python.framework.dtypes.as_dtype", "tensorflow.core.framework.attr_value_pb2.AttrValue.ListValue", "tensorflow.python.util.compat.as_str", "tensorflow.python.framework.ops.convert_to_tensor", "tensorflow.python.framework.ops.device", "tensorflow.python.framework.ops.set_shapes_for_outputs", "tensorflow.core.framework.graph_pb2.GraphDef", "tensorflow.python.framework.function._from_library" ], [ "tensorflow.python.framework.ops.convert_to_tensor", "tensorflow.python.ops.math_ops.cast" ], [ "tensorflow.summary.FileWriter", "tensorflow.test.main", "tensorflow.tensorboard.backend.event_processing.event_file_inspector.get_dict_to_print", "tensorflow.tensorboard.backend.event_processing.event_file_inspector.get_inspection_units", "tensorflow.SessionLog", "tensorflow.HistogramProto", "tensorflow.Summary", "tensorflow.tensorboard.backend.event_processing.event_file_inspector.get_unique_tags" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "1.12", "1.4", "1.13", "1.5", "1.7", "1.2" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.2" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.8", "1.10", "1.12", "2.7", "2.6", "1.4", "1.13", "2.3", "2.4", "2.9", "1.5", "1.7", "2.5", "0.12", "1.0", "2.2", "1.2", "2.10" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.2" ] } ]
scotthavens/smrf
[ "a492d01a5eef994e00728c1cbed9f693879bbade", "a492d01a5eef994e00728c1cbed9f693879bbade" ]
[ "smrf/utils/wind/model.py", "smrf/envphys/phys.py" ]
[ "from __future__ import print_function\n\nimport os\n# import matplotlib.pyplot as plt\n# import progressbar\nfrom datetime import datetime\n\nimport netCDF4 as nc\nimport numpy as np\n\nfrom . import wind_c\n\n\nclass wind_model():\n \"\"\"\n\n Estimating wind speed and direction is complex terrain can be difficult due\n to the interaction of the local topography with the wind. The methods\n described here follow the work developed by Winstral and Marks (2002) and\n Winstral et al. (2009) :cite:`Winstral&Marks:2002` :cite:`Winstral&al:2009`\n which parameterizes the terrain based on the upwind direction. The\n underlying method calulates the maximum upwind slope (maxus) within a\n search distance to determine if a cell is sheltered or exposed.\n\n The azimuth **A** is the direction of the prevailing wind for which the\n maxus value will be calculated within a maximum search distance **dmax**.\n The maxus (**Sx**) parameter can then be estimated as the maximum value of\n the slope from the cell of interest to all of the grid cells along the\n search vector. The efficiency in selection of the maximum value can be\n increased by using the techniques from the horizon functio which calculates\n the horizon for each pixel. Therefore, less calculations can be performed.\n Negative **Sx** values indicate an exposed pixel location (shelter pixel\n was lower) and positive **Sx** values indicate a sheltered pixel (shelter\n pixel was higher).\n\n After all the upwind direction are calculated, the average **Sx** over a\n window is calculated. The average **Sx** accounts for larger lanscape\n obsticles that may be adjacent to the upwind direction and affect the flow.\n A window size in degrees takes the average of all **Sx**.\n\n Args:\n x: array of x locations\n y: array of y locations\n dem: matrix of the dem elevation values\n nthread: number of threads to use for maxus calculation\n\n \"\"\"\n\n def __init__(self, x, y, dem, nthreads=1):\n\n self.x = x\n self.y = y\n self.dem = dem\n self.nx = len(x)\n self.ny = len(y)\n self.ngrid = self.ny * self.nx\n self.nthreads = nthreads\n\n self.dx = np.abs(x[1] - x[0])\n self.dy = np.abs(y[1] - y[0])\n\n X, Y = np.meshgrid(np.arange(0, self.nx), np.arange(0, self.ny))\n self.X = X\n self.Y = Y\n self.shape = X.shape\n\n def maxus(self, dmax, inc=5, inst=2, out_file='smrf_maxus.nc'):\n \"\"\"\n Calculate the maxus values\n\n Args:\n dmax: length of outlying upwind search vector (meters)\n inc: increment between direction calculations (degrees)\n inst: Anemometer height (meters)\n out_file: NetCDF file for output results\n\n Returns:\n None, outputs maxus array straight to file\n \"\"\"\n\n if (dmax % self.dx != 0):\n raise ValueError('dmax must divide evenly into the DEM')\n\n self.dmax = dmax\n self.inc = inc\n self.inst_hgt = inst\n\n # All angles that model will consider.\n swa = np.arange(0, 360, inc)\n self.directions = swa\n\n # initialize the output file\n self.out_file = out_file\n self.type = 'maxus'\n ex_att = {}\n ex_att['dmax'] = dmax\n #initialize output\n self.output_init(self.type, out_file, ex_att=ex_att)\n\n # run model over range in wind directions\n for i, angle in enumerate(swa):\n\n self.maxus_val = self.maxus_angle(angle, self.dmax)\n self.output(self.type, i)\n\n def tbreak(self, dmax, sepdist, inc=5, inst=2, out_file='smrf_tbreak.nc'):\n \"\"\"\n Calculate the topobreak values\n\n Args:\n dmax: length of outlying upwind search vector (meters)\n sepdist: length of local max upwind slope search vector (meters)\n angle: middle upwind direction around which to run model (degrees)\n inc: increment between direction calculations (degrees)\n inst: Anemometer height (meters)\n out_file: NetCDF file for output results\n\n Returns:\n None, outputs maxus array straight to file\n\n \"\"\"\n\n if (sepdist % self.dx != 0) | (dmax % self.dx != 0):\n raise ValueError('sepdist and dmax must divide evenly into the DEM')\n\n self.dmax = dmax\n self.sepdist = sepdist\n self.inc = inc\n self.inst_hgt = inst\n\n # All angles that model will consider.\n swa = np.arange(0, 360, inc)\n self.directions = swa\n\n # initialize the output file\n self.out_file = out_file\n self.type = 'tbreak'\n # extra attributes\n ex_att = {}\n ex_att['dmax'] = dmax\n ex_att['sepdist'] = sepdist\n # initialize output\n self.output_init(self.type, out_file, ex_att=ex_att)\n\n\n # run model over range in wind directions\n for i, angle in enumerate(swa):\n\n # calculate the maxus value\n maxus_outlying = self.maxus_angle(angle, self.dmax)\n\n # calculate the local maxus value\n maxus_local = self.maxus_angle(angle, self.sepdist)\n\n self.maxus_val = maxus_local - maxus_outlying\n\n self.output(self.type, i)\n\n def maxus_angle(self, angle, dmax):\n \"\"\"\n Calculate the maxus for a single direction for a search distance dmax\n\n Note:\n This will produce different results than the original maxus\n program. The differences are due to:\n\n 1. Using dtype=double for the elevations\n 2. Using different type of search method to find the endpoints.\n\n However, if the elevations are rounded to integers, the cardinal\n directions will reproduce the original results.\n\n Args:\n angle: middle upwind direction around which to run model (degrees)\n dmax: length of outlying upwind search vector (meters)\n\n Returns:\n maxus: array of maximum upwind slope values within dmax\n\n \"\"\"\n\n print(\"Calculating maxus for direction: {}\".format(angle))\n\n angle *= np.pi / 180\n\n # calculate the endpoints\n # accually use the distances to ensure that we are searching far enough\n Xi = self.X*self.dx + dmax * np.cos(angle-np.pi/2)\n Yi = self.Y*self.dy + dmax * np.sin(angle-np.pi/2)\n\n self.Xi = np.floor(Xi/self.dx + 0.5)\n self.Yi = np.floor(Yi/self.dy + 0.5)\n\n # underlying C code similar to Adams\n maxus = wind_c.call_maxus(self.x, self.y, self.dem, self.X, self.Y,\n self.Xi, self.Yi, self.inst_hgt,\n self.nthreads)\n\n# # my interpretation of the calculations in Python form\n# maxus = np.zeros((self.ngrid,))\n# pbar = progressbar.ProgressBar(max_value=self.ngrid)\n# j = 0\n# for index in range(5000, self.ngrid):\n# maxus[index] = self.find_maxus(index)\n# j += 1\n# pbar.update(j)\n# if j > 4999:\n# break\n# pbar.finish()\n# maxus = np.reshape(maxus, self.shape)\n\n # correct for values that are their own horizon\n maxus[maxus <= -89.0] = 0\n\n return maxus\n\n def windower(self, maxus_file, window_width, wtype):\n \"\"\"\n Take the maxus output and average over the window width\n\n Args:\n maxus_file: location of the previously calculated maxus values\n window_width: window width about the wind direction\n wtype: type of wind calculation 'maxus' or 'tbreak'\n\n Return:\n New file containing the windowed values\n \"\"\"\n\n # open the previous file and get the directions\n n = nc.Dataset(maxus_file, 'r')\n directions = n.variables['direction'][:]\n self.directions = directions\n\n # create a new file based on the old file\n name = os.path.splitext(maxus_file)\n out_file = \"%s_%iwindow.nc\" % (name[0], window_width)\n self.output_init(wtype, out_file)\n self.out_file = out_file\n\n # determine which directions are required for each single direction\n window_width /= 2\n inc = np.mean(np.diff(directions), dtype=int)\n\n for i, d in enumerate(directions):\n\n print(\"Windowing direction {}\".format(d))\n\n # determine which directions to include\n window_start = d - window_width\n window_end = d + window_width\n\n # to ensure that it contains the end points\n sl = np.arange(window_start, window_end+1, inc)\n\n # correct for edge effects\n sl[sl < 0] = sl[sl < 0] + 360\n sl[sl > 360] = sl[sl > 360] - 360\n\n # determine the indicies to the input file\n idx = self.ismember(directions, sl)\n\n # grab all the data for all directions and average\n self.maxus_val = np.mean(n.variables[wtype][idx, :], axis=0)\n\n # put it into the output file\n self.output(wtype, i)\n\n n.close()\n\n def ismember(self, a, b):\n bind = {}\n for i, elt in enumerate(b):\n if elt not in bind:\n bind[elt] = True\n return [bind.get(itm, False) for itm in a]\n\n def find_maxus(self, index):\n \"\"\"\n Calculate the maxus given the start and end point\n\n Args:\n index: index to a point in the array\n\n Returns:\n maxus value for the point\n \"\"\"\n\n start_point = np.unravel_index(index, self.shape)\n\n # determine the points along the endpoint line\n end_point = (self.Yi[start_point], self.Xi[start_point])\n p = self.bresenham(start_point, end_point)\n\n # ensure the cases where it's on the edge\n p = np.delete(p, np.where(p[:, 0] < 0)[0], axis=0)\n p = np.delete(p, np.where(p[:, 1] < 0)[0], axis=0)\n p = np.delete(p, np.where(p[:, 0] > self.ny)[0], axis=0)\n p = np.delete(p, np.where(p[:, 1] > self.nx)[0], axis=0)\n\n # determine the relative heights along the path\n h = self.dem[p[:, 0], p[:, 1]] # - (self.inst_hgt + self.dem[index])\n\n# # determine the distrance along the path\n# xpath = self.x[p[:,1]]\n# ypath = self.y[p[:,0]]\n#\n# xstart = self.x[start_point[1]]\n# ystart = self.y[start_point[0]]\n#\n# dpath = np.sqrt(np.power(xpath - xstart, 2) + np.power(ypath - ystart, 2))\n#\n# # calculate the slope to each cell\n# rise = h - (h[0] + self.inst_hgt)\n#\n# slope = rise/dpath\n#\n# np.max(np.abs(slope[1:]))\n\n # find the horizon for each pixel along the path\n hord = self.hord(self.x[p[:, 1]], self.y[p[:, 0]], h)\n\n # calculate the angle to that point\n pt = p[hord[0], :] # point that was found for horizon\n d = np.sqrt(np.power(self.x[pt[1]] - self.x[start_point[1]], 2) +\n np.power(self.y[pt[0]] - self.y[start_point[0]], 2))\n\n slope = (h[hord[0]] - (h[0] + self.inst_hgt)) / d\n maxus = np.arctan(slope) * 180 / np.pi\n\n return maxus\n\n def bresenham(self, start, end):\n \"\"\"\n Python implementation of the Bresenham algorthim to find\n all the pixels that a line between start and end interscet\n\n Args:\n start: list of start point\n end: list of end point\n\n Returns:\n Array path of all points between start and end\n \"\"\"\n# start = list(start)\n# end = list(end)\n path = []\n\n x0 = start[0]\n y0 = start[1]\n x1 = end[0]\n y1 = end[1]\n\n steep = abs(y1 - y0) > abs(x1 - x0)\n backward = x0 > x1\n\n if steep:\n x0, y0 = y0, x0\n x1, y1 = y1, x1\n if backward:\n x0, x1 = x1, x0\n y0, y1 = y1, y0\n\n dx = x1 - x0\n dy = abs(y1 - y0)\n error = dx / 2\n y = y0\n\n if y0 < y1:\n ystep = 1\n else:\n ystep = -1\n\n for x in range(x0, x1+1):\n if steep:\n path.append((y, x))\n else:\n path.append((x, y))\n\n error -= dy\n\n if error <= 0:\n y += ystep\n error += dx\n\n if backward:\n path.reverse()\n\n return np.array(path)\n\n def hord(self, x, y, z):\n '''\n Calculate the horizon pixel for all z\n This mimics the simple algorthim from Dozier 1981 but\n was adapated for use in finding the maximum upwind slope\n\n Works backwards from the end but looks forwards for\n the horizon\n\n Args:\n x: x locations for the points\n y: y locations for the points\n z: elevations for the points\n\n Returns:\n array of the horizon index for each point\n\n '''\n\n N = len(z) # number of points to look at\n # offset = 1 # offset from current point to start looking\n\n # preallocate the h array\n h = np.zeros(N, dtype=int)\n h[N-1] = N-1\n i = N - 2\n\n # work backwarks from the end for the pixels\n while i >= 0:\n h[i] = i\n j = i + 1 # looking forward\n found = False\n\n while not found:\n\n d_i = np.sqrt(np.power(x[i] - x[j], 2) +\n np.power(y[i] - y[j], 2))\n d_h = np.sqrt(np.power(x[i] - x[h[j]], 2) +\n np.power(y[i] - y[h[j]], 2))\n\n pt_i = self._slope(0, z[i]+self.inst_hgt, d_i, z[j])\n pt_h = self._slope(0, z[i]+self.inst_hgt, d_h, z[h[j]])\n\n if (pt_i < pt_h):\n if (j == N-1):\n found = True\n h[i] = j\n else:\n j = h[j]\n else:\n found = True\n if (pt_i > pt_h):\n h[i] = j\n else:\n h[i] = h[j]\n\n i -= 1\n\n return h\n\n def _slope(self, xi, zi, xj, zj):\n '''\n Slope between the two points\n '''\n\n return (zj - zi) / (xj - float(xi))\n\n def output_init(self, ptype, filename, ex_att=None):\n \"\"\"\n Initialize a NetCDF file for outputing the maxus values or tbreak\n\n Args:\n ptype: type of calculation that will be saved, either 'maxus' or\n 'tbreak'\n filename: filename to save the output into\n ex_att: extra attributes to add\n \"\"\"\n\n if ptype == 'maxus':\n var = 'maxus'\n desc = 'Maximum upwind slope'\n\n elif ptype == 'tbreak':\n var = 'tbreak'\n desc = 'tbreak'\n\n else:\n raise ValueError('''Could not determine what to output, check type\n value (maxus or tbreak)''')\n\n dimensions = ('Direction', 'y', 'x')\n\n s = nc.Dataset(filename, 'w', 'NETCDF4')\n\n s.createDimension(dimensions[0], len(self.directions))\n s.createDimension(dimensions[1], self.ny)\n s.createDimension(dimensions[2], self.nx)\n\n # create the variables\n s.createVariable('direction', 'i', dimensions[0])\n s.createVariable('y', 'f', dimensions[1])\n s.createVariable('x', 'f', dimensions[2])\n s.createVariable(var, 'f', dimensions)\n\n # define some attributes\n setattr(s.variables['y'], 'units', 'meters')\n setattr(s.variables['y'], 'description', 'UTM, north south')\n setattr(s.variables['x'], 'units', 'meters')\n setattr(s.variables['x'], 'description', 'UTM, east west')\n setattr(s.variables['direction'], 'units', 'bearing')\n setattr(s.variables['direction'], 'description',\n 'Wind direction from North')\n setattr(s.variables[var], 'units', 'angle')\n setattr(s.variables[var], 'description', desc)\n setattr(s, 'dateCreated', datetime.now().isoformat())\n\n # set attributes\n if ex_att is not None:\n for key, value in ex_att.items():\n setattr(s, key, value)\n\n s.variables['y'][:] = self.y\n s.variables['x'][:] = self.x\n\n def output(self, ptype, index):\n \"\"\"\n Output the data into the out file that has previously been initialized.\n\n Args:\n ptype: type of calculation that will be saved, either 'maxus'\n or 'tbreak'\n index: index into the file for where to place the output\n \"\"\"\n\n s = nc.Dataset(self.out_file, 'r+')\n s.variables['direction'][:] = self.directions\n s.variables[ptype][index, :] = self.maxus_val\n s.close()\n", "\"\"\"\nCreated April 15, 2015\n\nCollection of functions to calculate various physical parameters\n\n@author: Scott Havens\n\"\"\"\n\nimport numpy as np\n\nfrom smrf.envphys import thermal_radiation\n\n\ndef idewpt(vp):\n \"\"\"\n Calculate the dew point given the vapor pressure\n\n Args:\n vp - array of vapor pressure values in [Pa]\n\n Returns:\n dewpt - array same size as vp of the calculated\n dew point temperature [C] (see Dingman 2002).\n\n \"\"\"\n\n # ensure that vp is a numpy array\n vp = np.array(vp)\n\n # take the log and convert to kPa\n vp = np.log(vp/float(1000))\n\n # calculate the vapor pressure\n Td = (vp + 0.4926) / (0.0708 - 0.00421*vp)\n\n return Td\n\ndef rh2vp(ta, rh):\n \"\"\"\n Calculate the vapor pressure given the air temperature\n and relative humidity\n \n Args:\n ta: array of air temperature in [C]\n rh: array of relative humidity from 0-100 [%]\n \n Returns:\n vapor pressure\n \"\"\"\n \n if rh.flat[0] >= 1.0:\n rh = rh/100.0\n \n satvp = thermal_radiation.sati(ta + 273.15)\n \n return satvp * rh\n\ndef satvp(dpt):\n \"\"\"\n Calculate the saturation vapor pressure at the dew point\n temperature.\n \n Args:\n dwpt: array of dew point temperature in [C]\n \n Returns\n vapor_pressure\n \"\"\"\n \n return thermal_radiation.sati(dpt + 273.15)\n" ]
[ [ "numpy.abs", "numpy.arctan", "numpy.power", "numpy.arange", "numpy.cos", "numpy.sin", "numpy.diff", "numpy.mean", "numpy.floor", "numpy.array", "numpy.unravel_index", "numpy.where", "numpy.zeros" ], [ "numpy.array" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
saeedashrraf/ivy
[ "ad57c359a11459d68965d9044a76ea30e175bf16" ]
[ "ivy/functional/backends/tensorflow/creation.py" ]
[ "# global\nimport tensorflow as tf\nfrom tensorflow import Tensor\nfrom typing import Union, Tuple, List, Optional\nfrom tensorflow.python.framework.dtypes import DType\n\n# local\nimport ivy\nfrom ivy.functional.backends.tensorflow import Dtype\nfrom ivy import (\n dev_from_str,\n default_device,\n dtype_from_str,\n default_dtype,\n dtype_to_str,\n)\n\n\n# Array API Standard #\n# -------------------#\n\n\ndef asarray(object_in, dtype=None, device=None, copy=None):\n device = default_device(device)\n with tf.device(dev_from_str(device)):\n if copy:\n if dtype is None and isinstance(object_in, tf.Tensor):\n return tf.identity(object_in)\n if dtype is None and not isinstance(object_in, tf.Tensor):\n try:\n dtype = dtype_from_str(default_dtype(item=object_in))\n tensor = tf.convert_to_tensor(object_in,dtype= dtype)\n except (TypeError, ValueError):\n dtype = dtype_to_str(default_dtype(dtype, object_in))\n tensor = tf.convert_to_tensor(\n ivy.nested_map(object_in, lambda x: tf.cast(x, dtype)),\n dtype=dtype,\n )\n return tf.identity(tf.cast(tensor, dtype))\n else:\n dtype = dtype_to_str(default_dtype(dtype, object_in))\n try:\n tensor = tf.convert_to_tensor(object_in, dtype=dtype)\n except (TypeError, ValueError):\n tensor = tf.convert_to_tensor(\n ivy.nested_map(object_in, lambda x: tf.cast(x, dtype)),\n dtype=dtype,\n )\n return tf.identity(tf.cast(tensor, dtype))\n else:\n if dtype is None and isinstance(object_in, tf.Tensor):\n return object_in\n if dtype is None and not isinstance(object_in, tf.Tensor):\n try:\n return tf.convert_to_tensor(object_in)\n except (TypeError, ValueError):\n dtype = dtype_to_str(default_dtype(dtype, object_in))\n return tf.convert_to_tensor(\n ivy.nested_map(object_in, lambda x: tf.cast(x, dtype)),\n dtype=dtype,\n )\n else:\n dtype = dtype_to_str(default_dtype(dtype, object_in))\n try:\n tensor = tf.convert_to_tensor(object_in, dtype=dtype)\n except (TypeError, ValueError):\n tensor = tf.convert_to_tensor(\n ivy.nested_map(object_in, lambda x: tf.cast(x, dtype)),\n dtype=dtype,\n )\n return tf.cast(tensor, dtype)\n\n\ndef zeros(\n shape: Union[int, Tuple[int]],\n dtype: Optional[Dtype] = None,\n device: Optional[str] = None,\n) -> Tensor:\n device = default_device(device)\n with tf.device(dev_from_str(device)):\n return tf.zeros(shape, dtype_from_str(default_dtype(dtype)))\n\n\ndef ones(\n shape: Union[int, Tuple[int]],\n dtype: Optional[DType] = None,\n device: Optional[str] = None,\n) -> tf.Tensor:\n dtype = dtype_from_str(default_dtype(dtype))\n device = dev_from_str(default_device(device))\n with tf.device(device):\n return tf.ones(shape, dtype)\n\n\ndef full_like(\n x: Tensor,\n fill_value: Union[int, float],\n dtype: Optional[Union[DType, str, None]] = None,\n device: Optional[str] = None,\n) -> Tensor:\n dtype = tf.DType(dtype) if dtype is str else dtype\n device = dev_from_str(default_device(device))\n with tf.device(device):\n return tf.experimental.numpy.full_like(x, fill_value, dtype=dtype)\n\n\ndef ones_like(\n x: Tensor,\n dtype: Optional[Union[DType, str, None]] = None,\n device: Optional[str] = None,\n) -> Tensor:\n dtype = tf.DType(dtype) if dtype is str else dtype\n device = default_device(device)\n with tf.device(dev_from_str(device)):\n return tf.ones_like(x, dtype=dtype)\n\n\ndef zeros_like(\n x: Tensor, dtype: Optional[Dtype] = None, device: Optional[str] = None\n) -> Tensor:\n device = default_device(device)\n with tf.device(dev_from_str(device)):\n return tf.zeros_like(x, dtype=dtype)\n\n\ndef tril(x: tf.Tensor, k: int = 0) -> tf.Tensor:\n return tf.experimental.numpy.tril(x, k)\n\n\ndef triu(x: tf.Tensor, k: int = 0) -> tf.Tensor:\n return tf.experimental.numpy.triu(x, k)\n\n\ndef empty(\n shape: Union[int, Tuple[int]],\n dtype: Optional[Dtype] = None,\n device: Optional[str] = None,\n) -> Tensor:\n device = default_device(device)\n with tf.device(dev_from_str(device)):\n return tf.experimental.numpy.empty(shape, dtype_from_str(default_dtype(dtype)))\n\n\ndef empty_like(\n x: Tensor,\n dtype: Optional[Union[DType, str, None]] = None,\n device: Optional[str] = None,\n) -> Tensor:\n dtype = tf.DType(dtype) if dtype is str else dtype\n device = default_device(device)\n with tf.device(dev_from_str(device)):\n return tf.experimental.numpy.empty_like(x, dtype=dtype)\n\n\ndef linspace(start, stop, num, axis=None, device=None, dtype=None, endpoint=True):\n if axis is None:\n axis = -1\n device = default_device(device)\n with tf.device(ivy.dev_from_str(device)):\n start = tf.constant(start, dtype=dtype)\n stop = tf.constant(stop, dtype=dtype)\n if not endpoint:\n ans = tf.linspace(start, stop, num + 1, axis=axis)[:-1]\n else:\n ans = tf.linspace(start, stop, num, axis=axis)\n if dtype is None:\n dtype = tf.float32\n ans = tf.cast(ans, dtype)\n return ans\n\n\ndef meshgrid(*arrays: tf.Tensor, indexing: str = \"xy\") -> List[tf.Tensor]:\n return tf.meshgrid(*arrays, indexing=indexing)\n\n\ndef eye(\n n_rows: int,\n n_cols: Optional[int] = None,\n k: Optional[int] = 0,\n dtype: Optional[Dtype] = None,\n device: Optional[str] = None,\n) -> tf.Tensor:\n dtype = dtype_from_str(default_dtype(dtype))\n device = dev_from_str(default_device(device))\n with tf.device(device):\n if n_cols is None:\n n_cols = n_rows\n i = tf.eye(n_rows, n_cols, dtype=dtype)\n if k == 0:\n return i\n elif -n_rows < k < 0:\n return tf.concat([tf.zeros([-k, n_cols], dtype=dtype), i[: n_rows + k]], 0)\n elif 0 < k < n_cols:\n return tf.concat(\n [tf.zeros([n_rows, k], dtype=dtype), i[:, : n_cols - k]], 1\n )\n else:\n return tf.zeros([n_rows, n_cols], dtype=dtype)\n\n\n# noinspection PyShadowingNames\ndef arange(start, stop=None, step=1, dtype=None, device=None):\n\n if stop is None:\n stop = start\n start = 0\n if (step > 0 and start > stop) or (step < 0 and start < stop):\n if isinstance(stop, float):\n stop = float(start)\n else:\n stop = start\n\n device = dev_from_str(default_device(device))\n with tf.device(device):\n\n if dtype is None:\n if (\n isinstance(start, int)\n and isinstance(stop, int)\n and isinstance(step, int)\n ):\n return tf.cast(\n tf.range(start, stop, delta=step, dtype=tf.int64), tf.int32\n )\n else:\n return tf.range(start, stop, delta=step)\n else:\n dtype = dtype_from_str(default_dtype(dtype))\n if dtype in [tf.int8, tf.uint8, tf.int16, tf.uint16, tf.uint32, tf.uint64]:\n return tf.cast(tf.range(start, stop, delta=step, dtype=tf.int64), dtype)\n else:\n return tf.range(start, stop, delta=step, dtype=dtype)\n\n\ndef full(\n shape: Union[int, Tuple[int, ...]],\n fill_value: Union[int, float],\n dtype: Optional[Dtype] = None,\n device: Optional[str] = None,\n) -> Tensor:\n with tf.device(dev_from_str(default_device(device))):\n return tf.fill(\n shape,\n tf.constant(\n fill_value, dtype=dtype_from_str(default_dtype(dtype, fill_value))\n ),\n )\n\n\ndef from_dlpack(x):\n return tf.experimental.dlpack.from_dlpack(x)\n\n\n# Extra #\n# ------#\n\narray = asarray\n\n\ndef logspace(start, stop, num, base=10.0, axis=None, device=None):\n power_seq = linspace(start, stop, num, axis, default_device(device))\n return base**power_seq\n" ]
[ [ "tensorflow.convert_to_tensor", "tensorflow.device", "tensorflow.zeros", "tensorflow.cast", "tensorflow.DType", "tensorflow.linspace", "tensorflow.experimental.numpy.empty_like", "tensorflow.experimental.numpy.full_like", "tensorflow.experimental.numpy.triu", "tensorflow.identity", "tensorflow.zeros_like", "tensorflow.experimental.dlpack.from_dlpack", "tensorflow.meshgrid", "tensorflow.constant", "tensorflow.range", "tensorflow.ones_like", "tensorflow.ones", "tensorflow.eye", "tensorflow.experimental.numpy.tril" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
mengfanShi/Pose-Estimate
[ "82de2ec53b2de83ccc47fbbadc97bed8924f6d60" ]
[ "Eval/eval.py" ]
[ "# -*- coding:utf-8 -*-\r\n# @TIME :2019/3/11 16:02\r\n# @Author :Fan\r\n# @File :eval.py\r\n\r\nimport torch\r\nimport os\r\nimport sys\r\nsys.path.insert(0, '..')\r\nimport argparse\r\nfrom Eval.COCO_eval import run_eval\r\nfrom Train.Network.Hourglass import Get_Hourglass\r\nfrom Train.Network.rtpose_vgg import get_model\r\n\r\nParse = argparse.ArgumentParser(description='Type of image')\r\nParse.add_argument('--img_size', type=int, default=256)\r\nParse.add_argument('--stride', type=int, default=4)\r\nargs = Parse.parse_args()\r\n\r\ngpu_id = 2\r\nos.environ['CUDA_VISIBLE_DEVICES'] = str(gpu_id)\r\n\r\n# Load Hourglass Network\r\ndef load_model(num_stack=4, num_block=3, depth=4):\r\n\tweight_name = sys.path[0] + '/Data/Stored_model/' + \\\r\n\t 'pose_Hourglass_{}_{}_{}.pth'.format(num_stack, num_block, depth)\r\n\tmodel = Get_Hourglass(num_stacks=num_stack, num_blocks=num_block,\r\n\t paf_classes=38, ht_classes=19, depth=depth)\r\n\tprint('pose_Hourglass_{}_{}_{}'.format(num_stack, num_block, depth))\r\n\tmodel.load_state_dict(torch.load(weight_name))\r\n\tmodel.eval()\r\n\tmodel.float()\r\n\tmodel = model.cuda()\r\n\treturn model\r\n\r\ndef load_vgg():\r\n\tweight_name = sys.path[0] + '/Data/Stored_model/'+'pose_model.pth'\r\n\r\n\tmodel = get_model('vgg19') \r\n\tmodel.load_state_dict(torch.load(weight_name), strict=False)\r\n\tmodel.float()\r\n\tmodel.eval()\r\n\tmodel = model.cuda()\r\n\treturn model\r\n\r\nif __name__ == '__main__':\r\n model = load_model()\r\n with torch.no_grad():\r\n AP = run_eval(image_dir=sys.path[0] + '/Data/COCO/image',\r\n anno_dir=sys.path[0] + '/Data/COCO',\r\n store_dir=sys.path[0] + '/Data/Stored_image',\r\n image_list_txt=sys.path[0] + '/Data/COCO/image_info_val2014_1k.txt',\r\n model=model, preprocess='rtpose',\r\n size=args.img_size, stride=args.stride)\r\n print('\\nThe Average Precision is %.3f' % AP)\r\n" ]
[ [ "torch.no_grad", "torch.load" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
fsoubelet/PyHEADTAIL
[ "51cae8845cceb61cc3f140db4ab0eeb68469110f", "51cae8845cceb61cc3f140db4ab0eeb68469110f", "51cae8845cceb61cc3f140db4ab0eeb68469110f" ]
[ "tests/simulation_tests/test_bb_resonator.py", "examples/impedances/002_wake_table.py", "tests/test_particles.py" ]
[ "import numpy as np\nfrom scipy.constants import c as c_light, e as qe, m_p\nfrom scipy.signal import hilbert\nfrom scipy.stats import linregress\n\nfrom PyHEADTAIL.impedances import wakes\nfrom PyHEADTAIL.machines.synchrotron import Synchrotron\nfrom PyHEADTAIL.particles.slicing import UniformBinSlicer\n\n\ndef test_bbresonator():\n\n n_attempts = 5\n\n n_turns = 3000\n macroparticlenumber = int(1e5)\n\n expected_growth_rate_x = 59e-4\n expected_growth_rate_y = 25e-4\n rel_tolerance = 5e-2\n\n # Beam and machine parameters\n intensity = 2.5e11\n epsn_x = 2e-6 # normalised horizontal emittance\n epsn_y = 2e-6 # normalised vertical emittance\n sigma_z = 0.23 # RMS bunch length in meters\n\n circumference = 6911.5038378975451\n\n p0_eVperc = 26e9\n p0 = p0_eVperc * qe / c_light\n\n beta_x = 54.644808743169399\n beta_y = 54.509415262636274\n\n Q_x = 20.13\n Q_y = 20.18\n\n alpha_mom = 0.0030864197530864196\n\n h_RF = [4620, 4*4620]\n V_RF = [4.5e6, 0.45e6]\n dphi_RF = [0., np.pi]\n p_increment = 0.\n\n # Create machine\n machine = Synchrotron(optics_mode='smooth', circumference=circumference,\n n_segments=1, beta_x=beta_x, beta_y=beta_y,\n D_x=0.0, D_y=0.0,\n accQ_x=Q_x, accQ_y=Q_y, Qp_x=0.0, Qp_y=0.0,\n alpha_mom_compaction=alpha_mom,\n longitudinal_mode='non-linear', h_RF=h_RF, V_RF=V_RF,\n dphi_RF=dphi_RF, p_increment=p_increment,\n p0=p0, charge=qe, mass=m_p)\n\n # Create BB resonator wake\n # Resonator parameters\n R_shunt = 10.2e6 # Shunt impedance [Ohm/m]\n frequency = 1.3e9 # Resonance frequency [Hz]\n Q = 1 # Quality factor\n\n slices = 200\n slicer_for_wakes = UniformBinSlicer(slices, n_sigma_z=6)\n\n # Wake\n wake = wakes.CircularResonator(R_shunt, frequency, Q)\n wake_field = wakes.WakeField(slicer_for_wakes, wake)\n\n machine.one_turn_map.append(wake_field)\n\n # Loop over attempts\n i_attempt = 0\n while i_attempt < n_attempts:\n\n print(f\"Attempt {i_attempt+1}:\")\n\n # Create beam\n bunch = machine.generate_6D_Gaussian_bunch_matched(\n n_macroparticles=macroparticlenumber,\n intensity=intensity,\n epsn_x=epsn_x,\n epsn_y=epsn_y,\n sigma_z=sigma_z,\n )\n\n # Create arrays for saving\n x = np.zeros(n_turns, dtype=float)\n y = np.zeros(n_turns, dtype=float)\n\n # Tracking loop\n for i in range(n_turns):\n\n for m in machine.one_turn_map:\n m.track(bunch)\n\n x[i], y[i] = bunch.mean_x(), bunch.mean_y()\n\n # Check results\n turns = np.arange(n_turns)\n\n iMin = 1500\n iMax = n_turns\n\n ampl_x = np.abs(hilbert(x))\n b, a, r, p, stde = linregress(turns[iMin:iMax], np.log(ampl_x[iMin:iMax]))\n print(f\"Growth rate x {b*1e4:.2f} [10^-4/turn]\")\n\n # assert np.isclose(b, expected_growth_rate_x, rtol=rel_tolerance), \\\n # \"Horizontal growth rate does not match expectation.\"\n check_x = np.isclose(b, expected_growth_rate_x, rtol=rel_tolerance)\n\n ampl_y = np.abs(hilbert(y))\n b, a, r, p, stde = linregress(turns[iMin:iMax], np.log(ampl_y[iMin:iMax]))\n print(f\"Growth rate y {b*1e4:.2f} [10^-4/turn]\")\n\n # assert np.isclose(b, expected_growth_rate_y, rtol=rel_tolerance), \\\n # \"Vertical growth rate does not match expectation.\"\n check_y = np.isclose(b, expected_growth_rate_y, rtol=rel_tolerance)\n\n assert check_x or i_attempt < n_attempts-1, \\\n f\"After {n_attempts} attempts horizontal growth rate \" \\\n \"doesn't match expectation.\"\n assert check_y or i_attempt < n_attempts-1, \\\n f\"After {n_attempts} attempts vertical growth rate \" \\\n \"doesn't match expectation.\"\n if check_x and check_y:\n print(f\"Passed on {i_attempt + 1}. attempt\")\n break\n i_attempt += 1\n\n", "import time\n\nfrom matplotlib import pyplot as plt\nimport numpy as np\nfrom scipy.constants import c as c_light, e as qe, m_p\nfrom scipy.stats import linregress\nfrom scipy.signal import hilbert\n\nfrom PyHEADTAIL.feedback.transverse_damper import TransverseDamper\nfrom PyHEADTAIL.impedances.wakes import WakeTable, WakeField\nfrom PyHEADTAIL.machines.synchrotron import Synchrotron\nfrom PyHEADTAIL.particles.slicing import UniformBinSlicer\n\n\nn_turns = 7000\nn_macroparticles = int(1e4)\n\n# Machine parameters\nmachine_name = 'LHC'\nenergy = 7e12 # [eV]\nrest_energy = m_p * c_light**2 / qe # [eV]\ngamma = energy / rest_energy\nbetar = np.sqrt(1 - 1 / gamma ** 2)\np0 = m_p * betar * gamma * c_light\n\nbeta_x = 68.9\nbeta_y = 70.34\n\nQ_x = 64.31\nQ_y = 59.32\n\nalpha_mom = 3.483575072011584e-04\n\neta = alpha_mom - 1.0 / gamma**2\nV_RF = 12.0e6\nh_RF = 35640\nQ_s = np.sqrt(qe * V_RF * eta * h_RF / (2 * np.pi * betar * c_light * p0))\n\ncircumference = 26658.883199999\n\nsigma_z = 1.2e-9 / 4.0 * c_light\n\nbunch_intensity = 2e11\nepsn = 1.8e-6\n\n# Wake field\nn_slices_wakes = 200\nlimit_z = 3 * sigma_z\nslicer_for_wakefields = UniformBinSlicer(n_slices_wakes,\n z_cuts=(-limit_z, limit_z))\n\nwakefile = ('wakes/wakeforhdtl_PyZbase_Allthemachine_7000GeV'\n '_B1_2021_TeleIndex1_wake.dat')\n\nwaketable = WakeTable(wakefile, ['time', 'dipole_x', 'dipole_y',\n 'quadrupole_x', 'quadrupole_y'])\n\nwake_field = WakeField(slicer_for_wakefields, waketable)\n\n# Damper\ndamping_time = 7000 # [turns]\ndamper = TransverseDamper(dampingrate_x=damping_time,\n dampingrate_y=damping_time)\n\n# Detuners\nQp_x = -5.0\nQp_y = 0.0\ni_oct = 15.\ndetx_x = 1.4e5 * i_oct / 550.0\ndetx_y = -1.0e5 * i_oct / 550.0\n\n# Create synchrotron\nmachine = Synchrotron(optics_mode='smooth', circumference=circumference,\n n_segments=1,\n alpha_x=0.0, beta_x=beta_x, D_x=0.0,\n alpha_y=0.0, beta_y=beta_y, D_y=0.0,\n accQ_x=Q_x, accQ_y=Q_y, Qp_x=Qp_x, Qp_y=Qp_y,\n app_x=detx_x * p0, app_y=detx_x * p0, app_xy=detx_y * p0,\n alpha_mom_compaction=alpha_mom,\n longitudinal_mode='linear', Q_s=Q_s,\n dphi_RF=0.0, p_increment=0.0, p0=p0,\n charge=qe, mass=m_p, RF_at='end_of_transverse')\n\nmachine.one_turn_map.append(wake_field)\nmachine.one_turn_map.append(damper)\n\nparticles = machine.generate_6D_Gaussian_bunch_matched(\n n_macroparticles,\n intensity=bunch_intensity,\n epsn_x=epsn,\n epsn_y=epsn,\n sigma_z=sigma_z,\n)\n\nprint(\"\\n--> Bunch length and emittance: {:g} m, {:g} eVs.\".format(\n particles.sigma_z(), particles.epsn_z()))\n\nsx = np.sqrt(epsn * beta_x / gamma / betar)\n\n# Array for saving\nx = np.zeros(n_turns, dtype=float)\n\n# Tracking loop\nprint('\\nTracking...')\n\ntime_0 = time.time()\nfor turn in range(n_turns):\n\n if turn % 500 == 0:\n print('Turn {:d}/{:d}'.format(turn, n_turns))\n\n machine.track(particles)\n x[turn] = particles.mean_x()\n\nprint(f\"Time for tracking: {time.time() - time_0} s\")\n\nturns = np.arange(n_turns)\niMin = 1000\niMax = n_turns - 1000\n\n# Plot results\nplt.figure(0)\n\nplt.plot(turns, x/sx)\n\nampl = np.abs(hilbert(x))\nb, a, r, p, stderr = linregress(turns[iMin:iMax], np.log(ampl[iMin:iMax]))\nplt.plot(turns, np.exp(a + b * turns)/sx, \"--k\", label=f\"{1/b:.2f} turns\")\nprint(f\"Growth rate {b*1e4:.2f} [10^-4/turn]\")\n\nplt.title(f\"{machine_name} {energy*1e-12:.0f} TeV\")\nplt.legend()\nplt.xlabel(\"Turn\")\nplt.ylabel(r\"x [$\\sigma_x$]\")\n\nplt.show()\n", "'''\n@date: 10/07/2015\n@author: Stefan Hegglin, Adrian Oeftiger\n'''\n\n\n\nimport unittest\nimport numpy as np\nfrom scipy.constants import c, e, m_p\n\nfrom PyHEADTAIL.particles.particles import Particles\nfrom PyHEADTAIL.particles.slicing import UniformBinSlicer\nfrom PyHEADTAIL.general.printers import SilentPrinter\n\nclass TestParticles(unittest.TestCase):\n\n def setUp(self):\n #beam parameters\n self.intensity = 1.234e9\n self.circumference = 111.\n self.gamma = 20.1\n\n #simulation parameters\n self.macroparticlenumber = 2048\n self.particlenumber_per_mp = self.intensity/self.macroparticlenumber\n\n #create a bunch\n self.bunch = self.create_bunch()\n self.slicer = self.create_slicer()\n\n def tearDown(self):\n pass\n\n def test_particles_initialisation(self):\n '''Tests whether the parameters passed to Particles()\n are initialized correctly\n '''\n self.assertEqual(self.macroparticlenumber,\n self.bunch.macroparticlenumber,\n 'initialisation of macroparticlenumber incorrect')\n self.assertEqual(self.gamma,self.bunch.gamma,\n 'initialisation of gamma incorrect')\n self.assertEqual(self.intensity,self.bunch.intensity,\n 'initialisation of intensity incorrect')\n self.assertEqual(self.circumference,self.bunch.circumference,\n 'initialisation of circumference incorrect')\n\n def test_coords_dict_copy(self):\n '''Tests whether get_coords_n_momenta() returns a copy'''\n coords_n_momenta_copy = self.bunch.get_coords_n_momenta_dict()\n self.assertFalse(coords_n_momenta_copy\n is self.bunch.coords_n_momenta,\n 'get_coords_n_momenta() returns a reference')\n\n def test_update_faulty_coords(self):\n '''Tests whether an exception is raised if coords/momenta have\n different lengths than the number of macroparticles\n '''\n coords_n_momenta = self.bunch.get_coords_n_momenta_dict()\n len_mismatch = -1\n coords_n_momenta['x'] = np.zeros(self.macroparticlenumber-len_mismatch)\n with self.assertRaises(Exception):\n self.bunch.update(coords_n_momenta)\n\n def test_coords_add(self):\n '''Tests whether adding two extra coordinates to the coords_n_momenta\n dictionary works or raises an exception iff the keys are already used\n '''\n extra_coords = {\n 'a': np.zeros(self.macroparticlenumber),\n 'ap': np.ones(self.macroparticlenumber)\n }\n n_keys_before = len(self.bunch.coords_n_momenta)\n self.bunch.add(extra_coords)\n n_keys_after = len(self.bunch.coords_n_momenta)\n self.assertEqual(n_keys_before+len(extra_coords),n_keys_after,\n 'Particles.update() not working correctly')\n duplicate_coords = {\n 'b': np.zeros(self.macroparticlenumber),\n 'y': np.ones(self.macroparticlenumber)\n }\n with self.assertRaises(Exception):\n self.bunch.add(duplicate_coords)\n\n def test_setters_getters(self):\n '''Tests all setters and getters properties of the Particles class'''\n properties=[prop for prop in dir(Particles)\n if isinstance(getattr(Particles, prop), property)]\n for p in properties:\n self.setter_getter_test(p)\n\n def setter_getter_test(self, prop):\n '''Tests the setter/getter of property prop via the\n getattr()/setattr() functions. Called by test_setters_getters()\n '''\n new_value = 0.9 * getattr(self.bunch, prop)\n setattr(self.bunch, prop, new_value)\n if isinstance(getattr(self.bunch, prop), np.ndarray):\n self.assertTrue(np.allclose(getattr(self.bunch, prop), new_value),\n msg='getter/setter for property '\n + prop + ' incorrect')\n else:\n self.assertAlmostEqual(getattr(self.bunch, prop), new_value,\n msg='getter/setter for property '\n + prop + ' incorrect')\n\n def test_get_slices(self):\n '''Tests the get_slices() method on consistency after\n multiple calls.\n '''\n slice_set = self.bunch.get_slices(self.slicer)\n self.assertEqual(slice_set,self.bunch.get_slices(self.slicer),\n 'get_slices() incorrect')\n\n def test_clean_slices(self):\n '''Tests whether clean_slices() works correctly'''\n slice_set = self.bunch.get_slices(self.slicer)\n self.bunch.clean_slices()\n self.assertTrue(len(self.bunch._slice_sets) == 0,\n 'clean_slices() does not delete the slice set')\n\n def test_means(self):\n ''' Tests the mean() method of the Particle class '''\n self.assertAlmostEqual(self.bunch.mean_xp(), np.mean(self.bunch.xp),\n places=5, msg='np.mean() and bunch.mean_xp() '\n 'yield different results')\n\n def test_sigmaz(self):\n '''Test the sigma_z() method of the Particle class\n Only check the first 3 digits because the sample is small (2048)\n '''\n self.assertAlmostEqual(self.bunch.sigma_z(), np.std(self.bunch.z),\n places=3, msg='np.std() and bunch.sigma_z() '\n 'yield different results')\n\n def test_alpha_trans_only(self):\n '''Test whether the computation of alpha, beta, gamma,\n eps works when the beam has no longitudinal phase space.\n '''\n beam_transverse = self.create_transverse_only_bunch()\n beam_transverse.alpha_Twiss_x()\n beam_transverse.alpha_Twiss_y()\n beam_transverse.beta_Twiss_x()\n beam_transverse.beta_Twiss_y()\n beam_transverse.gamma_Twiss_x()\n beam_transverse.gamma_Twiss_y()\n beam_transverse.epsn_x()\n beam_transverse.epsn_y()\n\n def test_check_error_thrown_dispersion_trans_only(self):\n '''Test whether an AttributeError gets raised when trying to\n compute the dispersion of a beam with no longitudinal phase\n space.\n '''\n beam_transverse = self.create_transverse_only_bunch()\n with self.assertRaises(AttributeError):\n beam_transverse.dispersion_y()\n with self.assertRaises(AttributeError):\n beam_transverse.dispersion_x()\n\n def test_effective_emittance_vs_emittance(self):\n '''Test whether the effective emittance is the same as the\n emittance for a transverse-only beam.\n '''\n beam_transverse = self.create_transverse_only_bunch()\n self.assertAlmostEqual(\n beam_transverse.epsn_x(),\n beam_transverse.effective_normalized_emittance_x(),\n places = 5,\n msg='beam.effective_normalized_emittance_x() ' +\n 'yields a different result than beam.epsn_x() '+\n 'for a transverse only beam.'\n )\n\n self.assertAlmostEqual(\n beam_transverse.epsn_y(),\n beam_transverse.effective_normalized_emittance_y(),\n places = 5,\n msg='beam.effective_normalized_emittance_y() ' +\n 'yields a different result than beam.epsn_y() '+\n 'for a transverse only beam.'\n )\n\n def test_id_is_sequence(self):\n '''The beam.id should be a monotonically increasing sequence.'''\n bunch = self.create_bunch()\n self.assertTrue(np.all(bunch.id ==\n np.arange(1, bunch.macroparticlenumber + 1)),\n msg='beam.id should be a monotonically increasing'\n 'sequence!')\n\n def test_sort_particles(self):\n '''Test whether sorting of particles works properly and all particle\n attribute arrays are properly reordered.\n '''\n bunch = self.create_bunch()\n old = {}\n for attr in ['id'] + list(bunch.coords_n_momenta):\n old[attr] = getattr(bunch, attr).copy()\n bunch.sort_for('z')\n new_idx = bunch.id - 1\n for attr, oldarray in old.items():\n self.assertTrue(np.all(oldarray[new_idx] == getattr(bunch, attr)),\n msg=\"beam.sort_for('z') should reorder all beam \"\n \"particle arrays, but beam.\" + str(attr) + \" is \"\n \"missing.\")\n\n def create_bunch(self):\n x = np.random.uniform(-1, 1, self.macroparticlenumber)\n y = np.random.uniform(-1, 1, self.macroparticlenumber)\n z = np.random.uniform(-1, 1, self.macroparticlenumber)\n xp = np.random.uniform(-0.5, 0.5, self.macroparticlenumber)\n yp = np.random.uniform(-0.5, 0.5, self.macroparticlenumber)\n dp = np.random.uniform(-0.5, 0.5, self.macroparticlenumber)\n coords_n_momenta_dict = {\n 'x': x, 'y': y, 'z': z,\n 'xp': xp, 'yp': yp, 'dp': dp\n }\n return Particles(\n self.macroparticlenumber, self.particlenumber_per_mp, e, m_p,\n self.circumference, self.gamma, coords_n_momenta_dict\n )\n\n def create_transverse_only_bunch(self):\n x = np.random.uniform(-1, 1, self.macroparticlenumber)\n y = np.random.uniform(-1, 1, self.macroparticlenumber)\n xp = np.random.uniform(-0.5, 0.5, self.macroparticlenumber)\n yp = np.random.uniform(-0.5, 0.5, self.macroparticlenumber)\n coords_n_momenta_dict = {\n 'x': x, 'y': y,\n 'xp': xp, 'yp': yp\n }\n return Particles(\n self.macroparticlenumber, self.particlenumber_per_mp, e, m_p,\n self.circumference, self.gamma, coords_n_momenta_dict\n )\n\n def create_slicer(self):\n n_slices = 2\n n_sigma_z = 0.1\n return UniformBinSlicer(n_slices,n_sigma_z)\n\n\nif __name__ == '__main__':\n unittest.main()\n" ]
[ [ "numpy.log", "numpy.arange", "numpy.isclose", "numpy.zeros", "scipy.signal.hilbert" ], [ "matplotlib.pyplot.legend", "numpy.log", "numpy.sqrt", "matplotlib.pyplot.title", "numpy.arange", "matplotlib.pyplot.plot", "matplotlib.pyplot.ylabel", "scipy.signal.hilbert", "numpy.exp", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.show", "numpy.zeros", "matplotlib.pyplot.figure" ], [ "numpy.arange", "numpy.ones", "numpy.std", "numpy.mean", "numpy.random.uniform", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "1.7", "1.0", "0.10", "1.2", "0.14", "0.19", "1.5", "0.12", "0.17", "0.13", "1.6", "1.4", "1.9", "1.3", "1.10", "0.15", "0.18", "0.16", "1.8" ], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
toddytharmonkey/EXOSIMS
[ "526345356e537fd201676b7cc998380fb22229bb" ]
[ "tests/OpticalSystem/test_Nemati_2019.py" ]
[ "import unittest \nfrom EXOSIMS.OpticalSystem.Nemati_2019 import Nemati_2019\nimport numpy as np\n\nclass TestNemati2019(unittest.TestCase): \n\n \"\"\"\n Sonny Rappaport, August 2021, Cornell\n\n This class tests particular methods Nemanti_2019.\n \n \"\"\"\n\n def test_get_csv_values(self):\n\n \"\"\"\n Tests whether get_csv_values returns the correct columns of data given \n the corresponding input \n \"\"\"\n\n path = 'tests/TestSupport/test-scripts/nemanti_2019_testCSV.csv'\n\n #the three columns represented in the test csv files\n test1 = [1,1,1,1,1]\n test2 = [2,2,2,2,2]\n test3 = [3,3,3,3,3]\n\n #test that three headers are correctly called in order \n output = Nemati_2019.get_csv_values(self,path,'test1','test2','test3')\n np.testing.assert_array_equal(output,[test1,test2,test3])\n\n #tests that two headers are correctly called in order\n output = Nemati_2019.get_csv_values(self,path,'test2','test3')\n # np.testing.assert_array_equal(output,[test2,test3])\n\n #test that a comment row is filtered out \n path2 = 'tests/TestSupport/test-scripts/nemanti_2019_testCSV_comments.csv'\n output = Nemati_2019.get_csv_values(self,path2,'test1','test2')\n # np.testing.assert_array_equal(output,[test1,test2])\n\n #test that function handles a 1d csv properly\n path3 = 'tests/TestSupport/test-scripts/nemanti_2019_testCSV_1D.csv'\n output = Nemati_2019.get_csv_values(self,path3,'test1')\n np.testing.assert_array_equal(output,[test1])\n\n\n\n " ]
[ [ "numpy.testing.assert_array_equal" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Darrenonly/ocr_pre_process
[ "e5aa96a8eb516ab1bcea1f2c99009c8329b2aee7" ]
[ "model/STNNET.py" ]
[ "#!/usr/bin/env python\r\n# -*- coding: utf-8 -*-\r\n# @Time : 2021/4/26 14:29\r\n# @Author : Darren\r\n# @Site : \r\n# @File : STNNET.py\r\n# @Software: PyCharm\r\nimport torch.nn as nn\r\nimport torch\r\nimport torch.nn.functional as F\r\n\r\n\r\nclass STN_Net(nn.Module):\r\n def __init__(self, use_stn=True):\r\n super(STN_Net, self).__init__()\r\n self.conv1 = nn.Conv2d(1, 10, kernel_size=5)\r\n self.conv2 = nn.Conv2d(10, 20, kernel_size=5)\r\n self.conv2_drop = nn.Dropout2d()\r\n self.fc1 = nn.Linear(320, 50)\r\n self.fc2 = nn.Linear(50, 10)\r\n # 用来判断是否使用STN\r\n self._use_stn = use_stn\r\n\r\n # localisation net\r\n # 从输入图像中提取特征\r\n # 输入图片的shape为(-1,1,28,28)\r\n self.localization = nn.Sequential(\r\n # 卷积输出shape为(-1,8,22,22)\r\n nn.Conv2d(1, 8, kernel_size=7),\r\n # 最大池化输出shape为(-1,1,11,11)\r\n nn.MaxPool2d(2, stride=2),\r\n nn.ReLU(True),\r\n # 卷积输出shape为(-1,10,7,7)\r\n nn.Conv2d(8, 10, kernel_size=5),\r\n # 最大池化层输出shape为(-1,10,3,3)\r\n nn.MaxPool2d(2, stride=2),\r\n nn.ReLU(True)\r\n )\r\n # 利用全连接层回归\\theta参数\r\n self.fc_loc = nn.Sequential(\r\n nn.Linear(10 * 3 * 3, 32),\r\n nn.ReLU(True),\r\n nn.Linear(32, 2 * 3)\r\n )\r\n\r\n self.fc_loc[2].weight.data.zero_()\r\n self.fc_loc[2].bias.data.copy_(torch.tensor([1, 0, 0, 0, 1, 0]\r\n , dtype=torch.float))\r\n\r\n def stn(self, x):\r\n # 提取输入图像中的特征\r\n xs = self.localization(x)\r\n xs = xs.view(-1, 10 * 3 * 3)\r\n # 回归theta参数\r\n theta = self.fc_loc(xs)\r\n theta = theta.view(-1, 2, 3)\r\n\r\n # 利用theta参数计算变换后图片的位置\r\n grid = F.affine_grid(theta, x.size())\r\n # 根据输入图片计算变换后图片位置填充的像素值\r\n x = F.grid_sample(x, grid)\r\n\r\n return x\r\n\r\n def forward(self, x):\r\n # 使用STN模块\r\n if self._use_stn:\r\n x = self.stn(x)\r\n # 利用STN矫正过的图片来进行图片的分类\r\n # 经过conv1卷积输出的shape为(-1,10,24,24)\r\n # 经过max pool的输出shape为(-1,10,12,12)\r\n x = F.relu(F.max_pool2d(self.conv1(x), 2))\r\n # 经过conv2卷积输出的shape为(-1,20,8,8)\r\n # 经过max pool的输出shape为(-1,20,4,4)\r\n x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))\r\n x = x.view(-1, 320)\r\n x = F.relu(self.fc1(x))\r\n x = F.dropout(x, training=self.training)\r\n x = self.fc2(x)\r\n\r\n return F.log_softmax(x, dim=1)\r\n" ]
[ [ "torch.nn.Dropout2d", "torch.nn.functional.log_softmax", "torch.nn.functional.dropout", "torch.nn.Conv2d", "torch.tensor", "torch.nn.Linear", "torch.nn.MaxPool2d", "torch.nn.functional.grid_sample", "torch.nn.ReLU" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
Darkhunter9/pix2pix_for_image_segmentation
[ "927a8bf4e38d805eb3530a94393eb327c05799b9" ]
[ "trash/train_GAN_with_src_image.py" ]
[ "import numpy as np\nfrom tqdm import tqdm\nimport os, sys\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport segmentation_models_pytorch as smp\n\nfrom dataset import get_dataloaders\nfrom utils import AverageMeter, ConfusionMeter, Metric, Recorder, get_bool\nfrom config import get_config\n\nDEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\nLAMBDA = 1\nLAMBDA_DIS = 0.1\n\ndef train(train_loader, model, model_dis, criterion, criterion_dis, criterion_l1, optimizer, optimizer_dis, lr_scheduler):\n loss_meter = AverageMeter('train loss')\n acc_meter = AverageMeter('train accuracy')\n miou_meter = AverageMeter('train mIOU')\n conf_meter = ConfusionMeter(cfg.NUM_CLASS)\n model.train()\n model_dis.train()\n\n for i, (images, targets, _) in enumerate(tqdm(train_loader)):\n # image is input raw image, target is the real results\n targets += 1\n images = images.to(DEVICE)\n targets = targets.long().to(DEVICE)\n\n # generate fake images\n outputs = model(images)\n # now have both real and fake results, concatenate these and pass into model_dis\n\n # update discriminiator net with real results\n targets_input = F.one_hot(targets, num_classes=20)\n targets_input = torch.swapaxes(targets_input, 1, 3)\n targets_input = torch.swapaxes(targets_input, 2, 3)\n dis_input_real = torch.cat((images, targets_input), 1).to(DEVICE)\n dis_outputs_real = model_dis(dis_input_real.float())\n # real y, set to one\n y_real = torch.ones(dis_outputs_real.shape).to(DEVICE)\n # print(\"Dis net output shape\", dis_outputs_real.shape)\n loss_dis_real = criterion_dis(dis_outputs_real, y_real)\n\n print(\"loss_dis_real\", loss_dis_real)\n\n # update discriminator with fake results\n dis_input_fake = torch.cat((images, outputs), 1).to(DEVICE)\n dis_outputs_fake = model_dis(dis_input_fake.float())\n # fake y, set to zero\n y_fake = torch.zeros(dis_outputs_fake.shape).to(DEVICE)\n loss_dis_fake = criterion_dis(dis_outputs_fake, y_fake)\n\n print(\"loss_dis_fake\", loss_dis_fake)\n\n # sum the two loss\n loss_dis = (loss_dis_real + loss_dis_fake) / 2\n optimizer_dis.zero_grad()\n loss_dis.backward(retain_graph=True)\n optimizer_dis.step()\n\n # update generator weight\n loss = criterion(outputs, targets)\n print(\"loss\", loss)\n dis_penalty = criterion_dis(torch.zeros(dis_outputs_fake.shape).to(DEVICE), dis_outputs_fake.detach())\n # TODO: should include l1 loss?\n loss_l1 = criterion_l1(outputs, targets.unsqueeze(1))\n print(\"loss_l1\", loss_l1)\n # TODO: summation of the three losses?\n loss_gen = loss + loss_l1 * LAMBDA + dis_penalty * LAMBDA_DIS\n print(\"dis_penalty\", dis_penalty)\n\n loss_meter.update(loss.item(), images.size(0))\n conf_meter.update(outputs.argmax(1), targets)\n metric = Metric(conf_meter.value())\n acc_meter.update(metric.accuracy())\n miou_meter.update(metric.miou())\n\n optimizer.zero_grad()\n loss_gen.backward(retain_graph=True)\n optimizer.step()\n lr_scheduler.step()\n\n return loss_meter.avg, acc_meter.avg, miou_meter.avg\n\n\ndef validate(val_loader, model, model_dis, criterion):\n loss_meter = AverageMeter('validation loss')\n acc_meter = AverageMeter('validation accuracy')\n miou_meter = AverageMeter('validation mIOU')\n conf_meter = ConfusionMeter(cfg.NUM_CLASS)\n\n with torch.no_grad():\n model.eval()\n for i, (images, targets, _) in enumerate(tqdm(val_loader)):\n images = images.to(DEVICE)\n targets += 1\n targets = targets.long().to(DEVICE)\n\n outputs = model(images)\n loss = criterion(outputs, targets)\n\n loss_meter.update(loss.item(), images.size(0))\n conf_meter.update(outputs.argmax(1), targets)\n\n metric = Metric(conf_meter.value())\n acc_meter.update(metric.accuracy())\n miou_meter.update(metric.miou())\n\n return loss_meter.avg, acc_meter.avg, miou_meter.avg\n\n\n### Notes:\n# this discriminator has input shape 23, 512, 512 because the input is concatenation of src image and outputs\nclass Discriminator_Net(nn.Module):\n def __init__(self):\n super(Discriminator_Net, self).__init__()\n self.net = nn.Sequential(\n # C64\n nn.Conv2d(23, 64, kernel_size=(4, 4), stride=(2, 2), bias=False),\n nn.LeakyReLU(0.2, inplace=True),\n # C128\n nn.Conv2d(64, 128, kernel_size=(4, 4), stride=(2, 2), bias=False),\n nn.BatchNorm2d(128),\n nn.LeakyReLU(0.2, inplace=True),\n # C256\n nn.Conv2d(128, 256, kernel_size=(4, 4), stride=(2, 2), bias=False),\n nn.BatchNorm2d(256),\n nn.LeakyReLU(0.2, inplace=True),\n # 512\n nn.Conv2d(256, 512, kernel_size=(4, 4), stride=(2, 2), bias=False),\n nn.BatchNorm2d(512),\n nn.LeakyReLU(0.2, inplace=True),\n # The last layer, no batchnorm, \n nn.Conv2d(512, 20, kernel_size=(4, 4), bias=False),\n nn.Sigmoid()\n )\n def forward(self, input):\n return self.net(input)\n\nif __name__ == '__main__':\n experiment_file = \"baseline_0.yml\"\n cfg = get_config(f\"./experiments/{experiment_file}\")\n save_folder = experiment_file.split('.')[0] + \"_\" + cfg.MODEL\n if cfg.SAVE:\n if os.path.exists(f\"./checkpoint/{save_folder}\"):\n res = get_bool(f\"./checkpoint/{save_folder} already exists. Overwrite? (y/n)\")\n if not res: sys.exit(0)\n else:\n os.mkdir(f\"./checkpoint/{save_folder}\")\n cfg.dump(stream = open(f\"./checkpoint/{save_folder}/config.yml\", 'w'))\n\n np.random.seed(cfg.SEED)\n torch.manual_seed(cfg.SEED)\n\n if cfg.MODEL == \"unet\":\n model = smp.Unet(\n encoder_name=cfg.MODEL_ENCODER,\n encoder_weights=\"imagenet\",\n in_channels=3,\n classes=cfg.NUM_CLASS,\n ).to(DEVICE)\n else: raise ValueError(cfg.MODEL)\n\n # model_dis is the discriminator network\n model_dis = Discriminator_Net()\n model_dis.to(DEVICE)\n\n criterion = torch.nn.CrossEntropyLoss(ignore_index=0).to(DEVICE)\n # TODO: discriminator net needs binary cross entropy?\n criterion_dis = torch.nn.BCELoss().to(DEVICE)\n # TODO: should add l1 loss to the generator loss?\n criterion_l1 = nn.L1Loss()\n\n if cfg.OPTIMIZER == \"AdamW\":\n optimizer = torch.optim.AdamW(model.parameters(), lr=cfg.LR)\n optimizer_dis = torch.optim.AdamW(model_dis.parameters(), lr=cfg.LR)\n else: raise ValueError(cfg.OPTIMIZER)\n\n if cfg.LR_SCHEDULER == \"CosineAnnealingWarmRestarts\":\n scheduler = torch.optim.lr_scheduler.CosineAnnealingWarmRestarts(optimizer, T_0=cfg.T_RESTART, eta_min=cfg.LR_MIN)\n else: raise ValueError(cfg.LR_SCHEDULER)\n\n train_loader, val_loader = get_dataloaders(cfg)\n\n val_miou_ = 0\n recorder = Recorder([\"train_loss\", \"train_acc\", \"train_miou\", \"val_loss\", \"val_acc\", \"val_miou\"])\n for i in range(cfg.EPOCH):\n print(\"Epoch\", i)\n train_loss, train_acc, train_miou = train(train_loader, model, model_dis, criterion, criterion_dis, criterion_l1, optimizer, optimizer_dis, scheduler)\n print(\"train_loss:\", train_loss)\n print(\"train_acc:\", train_acc)\n print(\"train_miou:\", train_miou)\n val_loss, val_acc, val_miou = validate(val_loader, model, model_dis, criterion)\n print(\"val_loss:\", val_loss)\n print(\"val_acc:\", val_acc)\n print(\"val_miou:\", val_miou)\n recorder.update([train_loss, train_acc, train_miou, val_loss, val_acc, val_miou])\n\n torch.save(recorder.record, f\"./checkpoint/{save_folder}/trace.log\")\n if cfg.SAVE and val_miou > val_miou_:\n torch.save({\n \"epoch\": i,\n \"model\": model,\n \"optimizer\": optimizer,\n \"scheduler\": scheduler,\n }, f\"./checkpoint/{save_folder}/state.pth\")\n val_miou_ = val_miou\n print(\"model saved.\")\n\n" ]
[ [ "torch.optim.lr_scheduler.CosineAnnealingWarmRestarts", "torch.nn.CrossEntropyLoss", "torch.ones", "numpy.random.seed", "torch.cat", "torch.zeros", "torch.manual_seed", "torch.nn.Conv2d", "torch.nn.Sigmoid", "torch.nn.BCELoss", "torch.swapaxes", "torch.no_grad", "torch.nn.LeakyReLU", "torch.cuda.is_available", "torch.nn.BatchNorm2d", "torch.nn.functional.one_hot", "torch.nn.L1Loss", "torch.save" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
vincenttlzs/DualArmsRobot
[ "f8701c8d85bad63a9eca87e6f8734a6e10784a97" ]
[ "gazebo_simulation/shuang_moveit_config/scripts/s_a_tracker.py" ]
[ "#!/usr/bin/env python\n\n\"\"\"\n \n \n Move the arm to point to a target on the /target_pose topic\n \n Created for the Pi Robot Project: http://www.pirobot.org\n Copyright (c) 2014 Patrick Goebel. All rights reserved.\n\n This program is free software; you can redistribute it and/or modify\n it under the terms of the GNU General Public License as published by\n the Free Software Foundation; either version 2 of the License, or\n (at your option) any later version.5\n \n This program is distributed in the hope that it will be useful,\n but WITHOUT ANY WARRANTY; without even the implied warranty of\n MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n GNU General Public License for more details at:\n \n http://www.gnu.org/licenses/gpl.html\n\"\"\"\n\nimport rospy\nimport rospy, sys\nimport thread, copy\nimport moveit_commander\n\nfrom moveit_commander import MoveGroupCommander, RobotCommander, PlanningSceneInterface\nfrom trajectory_msgs.msg import JointTrajectoryPoint\nfrom geometry_msgs.msg import PoseStamped, Pose, PointStamped\nfrom moveit_msgs.msg import CollisionObject, AttachedCollisionObject, PlanningScene\nfrom sensor_msgs.msg import JointState\nimport tf\nfrom tf.transformations import quaternion_from_euler\nfrom scipy.spatial.distance import euclidean \nimport shlex, subprocess\nimport os\nfrom math import sqrt, acos, radians\nfrom copy import deepcopy\n\nGROUP_NAME_ARM = 'r_arm'\nGROUP_NAME_GRIPPER = 'r_gripper'\n\nGRIPPER_FRAME = 'r_gripper_link'\n\nGRIPPER_OPEN = [0.03]\nGRIPPER_CLOSED = [-0.02]\nGRIPPER_NEUTRAL = [0.01]\n\nGRIPPER_JOINT_NAMES = ['r_gripper_finger_joint']\n\nGRIPPER_EFFORT = [1.0]\n\nREFERENCE_FRAME = 'base_footprint'\n\nclass ArmTracker:\n def __init__(self):\n rospy.init_node('s_a_tracker')\n \n rospy.on_shutdown(self.shutdown)\n \n # Maximum distance of the target before the arm will lower\n self.max_target_dist = 1.2\n \n # Arm length to center of gripper frame\n self.arm_length = 0.8\n \n # Distance between the last target and the new target before we move the arm\n self.last_target_threshold = 0.01\n \n # Distance between target and end-effector before we move the arm\n self.target_ee_threshold = 0.036#0.025\n\n # Construct the initial scene object!!!!!!!!!!!!!!!!!!!!!!!!!!\n scene = PlanningSceneInterface()\n \n # Initialize the move group for the right arm\n self.r_arm = MoveGroupCommander(GROUP_NAME_ARM)\n \n # Initialize the move group for the right gripper\n r_gripper = MoveGroupCommander(GROUP_NAME_GRIPPER)\n \n # Set the reference frame for pose targets\n self.reference_frame = REFERENCE_FRAME\n \n # Keep track of the last target pose\n self.last_target_pose = PoseStamped()\n \n # Set the right arm reference frame accordingly\n self.r_arm.set_pose_reference_frame(self.reference_frame)\n \n # Allow replanning to increase the chances of a solution\n self.r_arm.allow_replanning(False)\n \n # Set a position tolerance in meters\n self.r_arm.set_goal_position_tolerance(0.005)\n \n # Set an orientation tolerance in radians\n self.r_arm.set_goal_orientation_tolerance(0.005)\n \n # What is the end effector link?\n self.ee_link = self.r_arm.get_end_effector_link()\n \n \n # Create the transform listener\n self.listener = tf.TransformListener()\n\n####################################\n scene.remove_attached_object(self.ee_link, 'marker')\n\n\n######################################\n # Queue up some tf data...\n rospy.sleep(3)\n \n # Set the gripper target to closed position using a joint value target\n r_gripper.set_joint_value_target(GRIPPER_CLOSED)\n \n # Plan and execute the gripper motion\n r_gripper.go()\n rospy.sleep(1)\n \n # Subscribe to the target topic\n rospy.wait_for_message('/target_pose', PoseStamped)\n \n # Use queue_size=1 so we don't pile up outdated target messages\n self.target_subscriber = rospy.Subscriber('/target_pose', PoseStamped, self.update_target_pose, queue_size=1)\n \n rospy.loginfo(\"Ready for action!\")\n\n timer = 0.0\n \n while not rospy.is_shutdown() and timer < 2:\n try:\n target = self.target\n except:\n rospy.sleep(0.5)\n continue\n \n # Timestamp the target with the current time\n target.header.stamp = rospy.Time()\n \n # Get the target pose in the r_arm shoulder lift frame\n #target_arm = self.listener.transformPose('r_arm_shoulder_pan_link', target)\n target_arm = self.listener.transformPose('r_1_link', target)\n \n # Convert the position values to a Python list\n p0 = [target_arm.pose.position.x, target_arm.pose.position.y, target_arm.pose.position.z]\n \n # Compute the distance between the target and the shoulder link\n dist_target_shoulder = euclidean(p0, [0, 0, 0])\n \n # If the target is too far away, then lower the arm\n if dist_target_shoulder > self.max_target_dist:\n rospy.loginfo(\"Target is too far away\")\n self.r_arm.set_named_target('rest')\n self.r_arm.go()\n rospy.sleep(1)\n continue\n \n # Transform the pose to the base reference frame\n target_base = self.listener.transformPose(self.reference_frame, target)\n \n # Compute the distance between the current target and the last target\n p1 = [target_base.pose.position.x, target_base.pose.position.y, target_base.pose.position.z]\n p2 = [self.last_target_pose.pose.position.x, self.last_target_pose.pose.position.y, self.last_target_pose.pose.position.z]\n \n dist_last_target = euclidean(p1, p2)\n\n rospy.loginfo(str(dist_last_target))\n \n # Move the arm only if we are far enough away from the previous target\n if dist_last_target < self.last_target_threshold:\n rospy.loginfo(\"Still close to last target\")\n timer = timer +1\n rospy.sleep(0.5)\n continue\n \n # Get the pose of the end effector in the base reference frame\n ee_pose = self.r_arm.get_current_pose(self.ee_link)\n \n # Convert the position values to a Python list\n p3 = [ee_pose.pose.position.x, ee_pose.pose.position.y, ee_pose.pose.position.z]\n \n # Compute the distance between the target and the end-effector\n dist_target = euclidean(p1, p3) \n \n rospy.loginfo(\"!!\" + str(dist_last_target))\n rospy.loginfo(\"$$\" + str(self.target_ee_threshold))\n rospy.loginfo(\"$$\" + str(dist_target))\n \n # Only move the arm if we are far enough away from the target\n if dist_target < self.target_ee_threshold:\n rospy.loginfo(\"Already close enough to target\")\n timer = timer + 1\n rospy.sleep(1)\n continue\n \n # We want the gripper somewhere on the line connecting the shoulder and the target.\n # Using a parametric form of the line, the parameter ranges from 0 to the\n # minimum of the arm length and the distance to the target.\n t_max = min(self.arm_length, dist_target_shoulder)\n \n # Bring it back 10% so we don't collide with the target\n t = 0.95 * t_max\n \n # Now compute the target positions from the parameter\n try:\n target_arm.pose.position.x *= (t / dist_target_shoulder)\n target_arm.pose.position.y *= (t / dist_target_shoulder)\n target_arm.pose.position.z *= (t / dist_target_shoulder)\n except:\n rospy.sleep(1)\n rospy.loginfo(\"Exception!\")\n continue\n \n # Transform to the base_footprint frame\n target_ee = self.listener.transformPose(self.reference_frame, target_arm)\n \n # Set the target gripper orientation to be horizontal\n target_ee.pose.orientation.x = 0\n target_ee.pose.orientation.y = 0\n target_ee.pose.orientation.z = 0.707\n target_ee.pose.orientation.w = 0.707\n \n # Update the current start state\n self.r_arm.set_start_state_to_current_state()\n \n # Set the target pose for the end-effector\n self.r_arm.set_pose_target(target_ee, self.ee_link)\n \n # Plan and execute the trajectory\n success = self.r_arm.go()\n \n if success:\n # Store the current target as the last target\n self.last_target_pose = target\n \n # Pause a bit between motions to keep from locking up\n rospy.sleep(0.5)\n\n\n\n scene.attach_mesh(self.ee_link, 'marker', p2)\n #self.r_arm.set_named_target('rest')\n #self.r_arm.go()\n rospy.sleep(1)\n \n \n def update_target_pose(self, target):\n self.target = target\n\n def relax_all_servos(self):\n command = 'rosrun rbx2_dynamixels arbotix_relax_all_servos.py'\n args = shlex.split(command)\n subprocess.Popen(args)\n \n def shutdown(self):\n # Stop any further target messages from being processed\n self.target_subscriber.unregister()\n \n # Stop any current arm movement\n self.r_arm.stop()\n \n # Move to the resting position\n self.r_arm.set_named_target('rest')\n self.r_arm.go()\n \n # Relax the servos\n self.relax_all_servos()\n \n os._exit(0) \n\nif __name__ == \"__main__\":\n try:\n ArmTracker()\n rospy.spin()\n except rospy.ROSInterruptException:\n rospy.loginfo(\"Arm tracker node terminated.\")\n \n\n \n \n" ]
[ [ "scipy.spatial.distance.euclidean" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [ "0.13", "1.6", "0.14", "1.10", "0.15", "1.4", "0.16", "1.9", "0.19", "1.5", "0.18", "1.2", "1.7", "0.12", "1.0", "0.17", "1.3", "1.8" ], "tensorflow": [] } ]
CofeCup/xyolo
[ "827ab6f592325cecbe1ee24c0a8e80cb33e899d1", "827ab6f592325cecbe1ee24c0a8e80cb33e899d1" ]
[ "xyolo/convert.py", "xyolo/yolo3/utils.py" ]
[ "#! /usr/bin/env python\n\"\"\"\nReads Darknet config and weights and creates Keras model with TF backend.\n\n\"\"\"\n\nimport argparse\nimport configparser\nimport io\nimport os\nfrom collections import defaultdict\n\nimport numpy as np\nimport tensorflow.keras.backend as K\nfrom loguru import logger\nfrom tensorflow.keras.layers import BatchNormalization\nfrom tensorflow.keras.layers import (Conv2D, Input, ZeroPadding2D, Add,\n UpSampling2D, MaxPooling2D, Concatenate)\nfrom tensorflow.keras.layers import LeakyReLU\nfrom tensorflow.keras.models import Model\nfrom tensorflow.keras.regularizers import l2\nfrom tensorflow.keras.utils import plot_model as plot\n\nparser = argparse.ArgumentParser(description='Darknet To Keras Converter.')\nparser.add_argument('config_path', help='Path to Darknet cfg file.')\nparser.add_argument('weights_path', help='Path to Darknet weights file.')\nparser.add_argument('output_path', help='Path to output Keras model file.')\nparser.add_argument(\n '-p',\n '--plot_model',\n help='Plot generated Keras model and save as image.',\n action='store_true')\nparser.add_argument(\n '-w',\n '--weights_only',\n help='Save as Keras weights file instead of model file.',\n action='store_true')\n\n\ndef unique_config_sections(config_file):\n \"\"\"Convert all config sections to have unique names.\n\n Adds unique suffixes to config sections for compability with configparser.\n \"\"\"\n section_counters = defaultdict(int)\n output_stream = io.StringIO()\n with open(config_file) as fin:\n for line in fin:\n if line.startswith('['):\n section = line.strip().strip('[]')\n _section = section + '_' + str(section_counters[section])\n section_counters[section] += 1\n line = line.replace(section, _section)\n output_stream.write(line)\n output_stream.seek(0)\n return output_stream\n\n\ndef convert(config_path, weights_path, output_path, weights_only=None, plot_model=None):\n output_root = os.path.splitext(output_path)[0]\n\n # Load weights and config.\n logger.info('Loading weights.')\n weights_file = open(weights_path, 'rb')\n major, minor, revision = np.ndarray(\n shape=(3,), dtype='int32', buffer=weights_file.read(12))\n if (major * 10 + minor) >= 2 and major < 1000 and minor < 1000:\n seen = np.ndarray(shape=(1,), dtype='int64', buffer=weights_file.read(8))\n else:\n seen = np.ndarray(shape=(1,), dtype='int32', buffer=weights_file.read(4))\n logger.info('Weights Header: ', major, minor, revision, seen)\n\n logger.info('Parsing Darknet config.')\n unique_config_file = unique_config_sections(config_path)\n cfg_parser = configparser.ConfigParser()\n cfg_parser.read_file(unique_config_file)\n\n logger.info('Creating Keras model.')\n input_layer = Input(shape=(None, None, 3))\n prev_layer = input_layer\n all_layers = []\n\n weight_decay = float(cfg_parser['net_0']['decay']\n ) if 'net_0' in cfg_parser.sections() else 5e-4\n count = 0\n out_index = []\n for section in cfg_parser.sections():\n logger.debug('Parsing section {}'.format(section))\n if section.startswith('convolutional'):\n filters = int(cfg_parser[section]['filters'])\n size = int(cfg_parser[section]['size'])\n stride = int(cfg_parser[section]['stride'])\n pad = int(cfg_parser[section]['pad'])\n activation = cfg_parser[section]['activation']\n batch_normalize = 'batch_normalize' in cfg_parser[section]\n\n padding = 'same' if pad == 1 and stride == 1 else 'valid'\n\n # Setting weights.\n # Darknet serializes convolutional weights as:\n # [bias/beta, [gamma, mean, variance], conv_weights]\n prev_layer_shape = K.int_shape(prev_layer)\n\n weights_shape = (size, size, prev_layer_shape[-1], filters)\n darknet_w_shape = (filters, weights_shape[2], size, size)\n weights_size = np.product(weights_shape)\n\n logger.debug(' '.join(['conv2d', 'bn' if batch_normalize else ' ',\n activation, str(weights_shape)]))\n\n conv_bias = np.ndarray(\n shape=(filters,),\n dtype='float32',\n buffer=weights_file.read(filters * 4))\n count += filters\n\n if batch_normalize:\n bn_weights = np.ndarray(\n shape=(3, filters),\n dtype='float32',\n buffer=weights_file.read(filters * 12))\n count += 3 * filters\n\n bn_weight_list = [\n bn_weights[0], # scale gamma\n conv_bias, # shift beta\n bn_weights[1], # running mean\n bn_weights[2] # running var\n ]\n\n conv_weights = np.ndarray(\n shape=darknet_w_shape,\n dtype='float32',\n buffer=weights_file.read(weights_size * 4))\n count += weights_size\n\n # DarkNet conv_weights are serialized Caffe-style:\n # (out_dim, in_dim, height, width)\n # We would like to set these to Tensorflow order:\n # (height, width, in_dim, out_dim)\n conv_weights = np.transpose(conv_weights, [2, 3, 1, 0])\n conv_weights = [conv_weights] if batch_normalize else [\n conv_weights, conv_bias\n ]\n\n # Handle activation.\n act_fn = None\n if activation == 'leaky':\n pass # Add advanced activation later.\n elif activation != 'linear':\n raise ValueError(\n 'Unknown activation function `{}` in section {}'.format(\n activation, section))\n\n # Create Conv2D layer\n if stride > 1:\n # Darknet uses left and top padding instead of 'same' mode\n prev_layer = ZeroPadding2D(((1, 0), (1, 0)))(prev_layer)\n conv_layer = (Conv2D(\n filters, (size, size),\n strides=(stride, stride),\n kernel_regularizer=l2(weight_decay),\n use_bias=not batch_normalize,\n weights=conv_weights,\n activation=act_fn,\n padding=padding))(prev_layer)\n\n if batch_normalize:\n conv_layer = (BatchNormalization(\n weights=bn_weight_list))(conv_layer)\n prev_layer = conv_layer\n\n if activation == 'linear':\n all_layers.append(prev_layer)\n elif activation == 'leaky':\n act_layer = LeakyReLU(alpha=0.1)(prev_layer)\n prev_layer = act_layer\n all_layers.append(act_layer)\n\n elif section.startswith('route'):\n ids = [int(i) for i in cfg_parser[section]['layers'].split(',')]\n layers = [all_layers[i] for i in ids]\n if len(layers) > 1:\n logger.debug('Concatenating route layers: {}'.format(layers))\n concatenate_layer = Concatenate()(layers)\n all_layers.append(concatenate_layer)\n prev_layer = concatenate_layer\n else:\n skip_layer = layers[0] # only one layer to route\n all_layers.append(skip_layer)\n prev_layer = skip_layer\n\n elif section.startswith('maxpool'):\n size = int(cfg_parser[section]['size'])\n stride = int(cfg_parser[section]['stride'])\n all_layers.append(\n MaxPooling2D(\n pool_size=(size, size),\n strides=(stride, stride),\n padding='same')(prev_layer))\n prev_layer = all_layers[-1]\n\n elif section.startswith('shortcut'):\n index = int(cfg_parser[section]['from'])\n activation = cfg_parser[section]['activation']\n assert activation == 'linear', 'Only linear activation supported.'\n all_layers.append(Add()([all_layers[index], prev_layer]))\n prev_layer = all_layers[-1]\n\n elif section.startswith('upsample'):\n stride = int(cfg_parser[section]['stride'])\n assert stride == 2, 'Only stride=2 supported.'\n all_layers.append(UpSampling2D(stride)(prev_layer))\n prev_layer = all_layers[-1]\n\n elif section.startswith('yolo'):\n out_index.append(len(all_layers) - 1)\n all_layers.append(None)\n prev_layer = all_layers[-1]\n\n elif section.startswith('net'):\n pass\n\n else:\n raise ValueError(\n 'Unsupported section header type: {}'.format(section))\n\n # Create and save model.\n if len(out_index) == 0: out_index.append(len(all_layers) - 1)\n model = Model(inputs=input_layer, outputs=[all_layers[i] for i in out_index])\n model.summary()\n if weights_only:\n model.save_weights('{}'.format(output_path))\n logger.info('Saved Keras weights to {}'.format(output_path))\n else:\n model.save('{}'.format(output_path))\n logger.info('Saved Keras model to {}'.format(output_path))\n\n # Check to see if all weights have been read.\n remaining_weights = len(weights_file.read()) / 4\n weights_file.close()\n logger.info('Read {} of {} from Darknet weights.'.format(count, count +\n remaining_weights))\n if remaining_weights > 0:\n logger.info('Warning: {} unused weights'.format(remaining_weights))\n\n if plot_model:\n plot(model, to_file='{}.png'.format(output_root), show_shapes=True)\n logger.info('Saved model plot to {}.png'.format(output_root))\n\n\n# %%\ndef _main(args):\n config_path = os.path.expanduser(args.config_path)\n weights_path = os.path.expanduser(args.weights_path)\n assert config_path.endswith('.cfg'), '{} is not a .cfg file'.format(\n config_path)\n assert weights_path.endswith(\n '.weights'), '{} is not a .weights file'.format(weights_path)\n\n output_path = os.path.expanduser(args.output_path)\n assert output_path.endswith(\n '.h5'), 'output path {} is not a .h5 file'.format(output_path)\n convert(config_path, weights_path, output_path, weights_only=args.weights_only, plot_model=args.plot_model)\n\n\nif __name__ == '__main__':\n _main(parser.parse_args())\n", "\"\"\"Miscellaneous utility functions.\"\"\"\n\nfrom functools import reduce\n\nimport numpy as np\nfrom PIL import Image\nfrom matplotlib.colors import rgb_to_hsv, hsv_to_rgb\n\n\ndef compose(*funcs):\n \"\"\"Compose arbitrarily many functions, evaluated left to right.\n\n Reference: https://mathieularose.com/function-composition-in-python/\n \"\"\"\n # return lambda x: reduce(lambda v, f: f(v), funcs, x)\n if funcs:\n return reduce(lambda f, g: lambda *a, **kw: g(f(*a, **kw)), funcs)\n else:\n raise ValueError('Composition of empty sequence not supported.')\n\n\ndef letterbox_image(image, size):\n '''resize image with unchanged aspect ratio using padding'''\n iw, ih = image.size\n w, h = size\n scale = min(w / iw, h / ih)\n nw = int(iw * scale)\n nh = int(ih * scale)\n\n image = image.resize((nw, nh), Image.BICUBIC)\n new_image = Image.new('RGB', size, (128, 128, 128))\n new_image.paste(image, ((w - nw) // 2, (h - nh) // 2))\n return new_image\n\n\ndef rand(a=0, b=1):\n return np.random.rand() * (b - a) + a\n\n\ndef get_random_data(annotation_line, input_shape, random=True, max_boxes=20, jitter=.3, hue=.1, sat=1.5, val=1.5,\n proc_img=True):\n '''random preprocessing for real-time data augmentation'''\n line = annotation_line.split()\n image = Image.open(line[0])\n iw, ih = image.size\n h, w = input_shape\n box = np.array([np.array(list(map(int, box.split(',')))) for box in line[1:]])\n\n if not random:\n # resize image\n scale = min(w / iw, h / ih)\n nw = int(iw * scale)\n nh = int(ih * scale)\n dx = (w - nw) // 2\n dy = (h - nh) // 2\n image_data = 0\n if proc_img:\n image = image.resize((nw, nh), Image.BICUBIC)\n new_image = Image.new('RGB', (w, h), (128, 128, 128))\n new_image.paste(image, (dx, dy))\n image_data = np.array(new_image) / 255.\n\n # correct boxes\n box_data = np.zeros((max_boxes, 5))\n if len(box) > 0:\n np.random.shuffle(box)\n if len(box) > max_boxes: box = box[:max_boxes]\n box[:, [0, 2]] = box[:, [0, 2]] * scale + dx\n box[:, [1, 3]] = box[:, [1, 3]] * scale + dy\n box_data[:len(box)] = box\n\n return image_data, box_data\n\n # resize image\n new_ar = w / h * rand(1 - jitter, 1 + jitter) / rand(1 - jitter, 1 + jitter)\n scale = rand(.25, 2)\n if new_ar < 1:\n nh = int(scale * h)\n nw = int(nh * new_ar)\n else:\n nw = int(scale * w)\n nh = int(nw / new_ar)\n image = image.resize((nw, nh), Image.BICUBIC)\n\n # place image\n dx = int(rand(0, w - nw))\n dy = int(rand(0, h - nh))\n new_image = Image.new('RGB', (w, h), (128, 128, 128))\n new_image.paste(image, (dx, dy))\n image = new_image\n\n # flip image or not\n flip = rand() < .5\n if flip: image = image.transpose(Image.FLIP_LEFT_RIGHT)\n\n # distort image\n hue = rand(-hue, hue)\n sat = rand(1, sat) if rand() < .5 else 1 / rand(1, sat)\n val = rand(1, val) if rand() < .5 else 1 / rand(1, val)\n x = rgb_to_hsv(np.array(image) / 255.)\n x[..., 0] += hue\n x[..., 0][x[..., 0] > 1] -= 1\n x[..., 0][x[..., 0] < 0] += 1\n x[..., 1] *= sat\n x[..., 2] *= val\n x[x > 1] = 1\n x[x < 0] = 0\n image_data = hsv_to_rgb(x) # numpy array, 0 to 1\n\n # correct boxes\n box_data = np.zeros((max_boxes, 5))\n if len(box) > 0:\n np.random.shuffle(box)\n box[:, [0, 2]] = box[:, [0, 2]] * nw / iw + dx\n box[:, [1, 3]] = box[:, [1, 3]] * nh / ih + dy\n if flip: box[:, [0, 2]] = w - box[:, [2, 0]]\n box[:, 0:2][box[:, 0:2] < 0] = 0\n box[:, 2][box[:, 2] > w] = w\n box[:, 3][box[:, 3] > h] = h\n box_w = box[:, 2] - box[:, 0]\n box_h = box[:, 3] - box[:, 1]\n box = box[np.logical_and(box_w > 1, box_h > 1)] # discard invalid box\n if len(box) > max_boxes: box = box[:max_boxes]\n box_data[:len(box)] = box\n\n return image_data, box_data" ]
[ [ "tensorflow.keras.layers.Concatenate", "numpy.product", "tensorflow.keras.layers.LeakyReLU", "tensorflow.keras.models.Model", "tensorflow.keras.backend.int_shape", "tensorflow.keras.regularizers.l2", "tensorflow.keras.layers.UpSampling2D", "tensorflow.keras.layers.BatchNormalization", "numpy.transpose", "tensorflow.keras.layers.ZeroPadding2D", "tensorflow.keras.layers.Add", "tensorflow.keras.layers.MaxPooling2D", "tensorflow.keras.layers.Input" ], [ "numpy.logical_and", "matplotlib.colors.hsv_to_rgb", "numpy.random.shuffle", "numpy.random.rand", "numpy.array", "numpy.zeros" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "2.2" ] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
JohanComparat/pyEmerge
[ "9b5bfa01959d48ea41221609b8f375f27e3e39ff", "9b5bfa01959d48ea41221609b8f375f27e3e39ff", "9b5bfa01959d48ea41221609b8f375f27e3e39ff" ]
[ "bin/remap_lc.py", "bin/plot_ricci2017.py", "bin/lc_lognlogs_agns.py" ]
[ "\"\"\"\nScript to remap coordinates into a 3 and 6 Gpc cuboid\n\nhere is how to get the transofrmation coefficients :\nimport numpy as n\nfrom astropy.cosmology import FlatLambdaCDM\nimport astropy.units as u\ncosmoMD = FlatLambdaCDM(H0=67.77*u.km/u.s/u.Mpc, Om0=0.307115)#, Ob0=0.048206)\n\ndata = n.loadtxt(\"/data17s/darksim/software/cuboidremap-1.0/genremap/list7.txt\", unpack=True, dtype='str')\nlx = data[0].astype('float')\nly = data[1].astype('float')\nlz = data[2].astype('float')\n\nsel = (ly>1.082)&(ly<1.1)#&(ly<1.)&(lz<1.)\ndata.T[sel]\n\nsel = (ly>1.085)&(ly<1.1)#&(ly<1.)&(lz<1.)\ndata.T[sel]\nsel = (lx>5.9)&(lx<6.2)&(ly<0.5)&(lz<0.5)\ndata.T[sel]\nsel = (lx>2.2)&(lx<3.5)&(ly<0.8)&(lz<0.8)\ndata.T[sel]\nL1 L2 L3 u11 u12 u13 u21 u22 u23 u31 u32 u33 (periodicity)\n#\nC2 '2.2361', '1.0954', '0.4082', '2', '1', '0', '1', '0', '1', '1', '0', '0', '(1)'\nC15 '1.4142', '1.0000', '0.7071', '1', '1', '0', '0', '0', '1', '1', '0', '0', '(12)'\nC6 '5.9161', '0.4140', '0.4082', '5', '3', '1', '1', '1', '0', '0', '1', '0', '(1)'\nC3 '2.4495', '0.7071', '0.5774', '2', '1', '1', '1', '1', '0', '0', '1', '0', '(1)'\n\nwrites in the h5 files\n\n\n\"\"\"\nimport time\nprint(\"start\", time.time())\nimport sys\nii = int(sys.argv[1])\nenv = sys.argv[2]\nL_box = float(sys.argv[3])\nprint(\"snapshot\", ii, env)\nimport h5py # HDF5 support\nimport os\nimport glob\nimport numpy as n\nfrom multiprocessing import Pool\n# imports the remapping library\nfrom remap import Cuboid\n#C15 = Cuboid(u1=(1, 1, 0), u2=(0, 0, 1), u3=(1, 0, 0))\n#C2 = Cuboid(u1=(2, 1, 0), u2=(1, 0, 1), u3=(1, 0, 0))\n#C3 = Cuboid(u1=(2, 1, 1), u2=(1, 1, 0), u3=(0, 1, 0))\n#C6 = Cuboid(u1=(5, 3, 1), u2=(1, 1, 0), u3=(0, 1, 0))\n\nC15 = Cuboid(u1=(1, 1, 0), u2=(0, 0, 1), u3=(1, 0, 0)) \nC3 = Cuboid(u1=(1, 1, 0), u2=(1, 0, 1), u3=(1, 0, 0)) \nC6 = Cuboid(u1=(1, 1, 1), u2=(1, 0, 0), u3=(0, 1, 0))\n\ndef f6(aa,bb,cc):\n\treturn C6.Transform(aa,bb,cc)\n\ndef f3(aa,bb,cc):\n\treturn C3.Transform(aa,bb,cc)\n\ndef f15(aa,bb,cc):\n\treturn C15.Transform(aa,bb,cc)\n\n#def f2(aa,bb,cc):\n\t#return C2.Transform(aa,bb,cc)\n \ndef read_data(ii, L_box = 400., env= 'MD04'):\n\t\"\"\"\n\tRead all input data and returns \n\t - the h5 file: f1\n\t - the coordinates to be mapped: x, y, z\n\t\"\"\"\n\th5_dir = os.path.join(os.environ[env], 'h5' )\n\tinput_list = n.array(glob.glob(os.path.join(h5_dir, \"hlist_?.?????_emerge.hdf5\")))\n\tinput_list.sort()\n\tfile_1 = input_list[ii]\n\tprint(\"opens \",file_1)\n\tf1 = h5py.File(file_1, \"r+\")\n\tprint( \"n halos=\",f1['/halo_properties/'].attrs['N_halos'])\n\treturn f1, f1['/halo_position/x'].value/L_box, f1['/halo_position/y'].value/L_box, f1['/halo_position/z'].value/L_box\n\ndef write_mapped_coordinates(f1, out, L_box, group_name = 'remaped_position_L6',status='create'):\n\t\"\"\"\n\tWrites the new coordinates to file\n\t:param f1: h5 file\n\t:param x1,y1,z1: new coordinates\n\t:param group_name: name of the new group containing the new data in the h5 file. Example 'remaped_position_L6'\n\t\"\"\"\n\tif status=='create':\n\t\tprint(\"writes new group \"+group_name)\n\t\thalo_data = f1.create_group(group_name)\n\t\thalo_data.attrs['L_box'] = L_box\n\t\tds = halo_data.create_dataset('xyx_Lbox', data = out )\n\t\tds.attrs['units'] = 'L box'\n\t\tds.attrs['long_name'] = 'x,y,z' \n\tif status=='update':\n\t\tprint('writes update '+group_name)\n\t\tf1['/'+group_name+'/xyx_Lbox'][:] = out\n\n\nif __name__ == '__main__':\n\tp = Pool(12)\n\t# reads the data\n\t#L_box = 400.\n\t#env= 'MD04'\n\tf1, x0, y0, z0 = read_data(ii, L_box, env)\n\t#map to L3\n\tout3 = p.starmap(f3, n.transpose([x0, y0, z0]))\n\twrite_mapped_coordinates(f1, out3, L_box, group_name = 'remaped_position_L3', status='update')\n\t#map to L6\n\tout6 = p.starmap(f6, n.transpose([x0, y0, z0]))\n\twrite_mapped_coordinates(f1, out6, L_box, group_name = 'remaped_position_L6', status='update')\n\t#map to L15\n\tout15 = p.starmap(f15, n.transpose([x0, y0, z0]))\n\twrite_mapped_coordinates(f1, out15, L_box, group_name = 'remaped_position_L15', status='update')\n\t#out2 = p.starmap(f2, n.transpose([x0, y0, z0]))\n\t#write_mapped_coordinates(f1, out2, L_box, group_name = 'remaped_position_L2', status='create')\n\tf1.close()\n\n", "import matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as p\nimport numpy as n\nfrom scipy.special import erf\nricci_ct_f = lambda z: 0.22 + 0.18 * z**0.4\nfraction_ricci = lambda lsar, z : ricci_ct_f(z)+(0.8-ricci_ct_f(z))*(0.5+0.5*erf((-lsar+32.75)/0.4))\n\nnhs = n.arange(32, 36, 0.1)\n\np.figure(1, (5,5))\nfor zz in n.arange(0.,3.,0.5):\n\tp.plot(nhs, fraction_ricci(nhs, zz), label='z='+str(zz))\n\np.axhline(0.22)\np.legend(frameon=False)\np.xlabel('lambda SAR')\np.ylabel('fraction')\np.ylim((0.,1.))\np.text(33,0.1,'thick 24<nh<26')\np.text(33,0.9,'unobscured 20<nH<22')\np.text(32,0.5,'thin')\np.text(32,0.4,'22<nH<24')\np.savefig('/home/comparat/Desktop/ricci2017.png')\np.clf()\n\n", "import h5py # HDF5 support\nimport os\nimport glob\nimport numpy as n\nfrom scipy.interpolate import interp1d\n\n\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as p\n\nplotDir = os.path.join(os.environ['HOME'], 'wwwDir', \"eRoMok\", \"logNlogS\")\n\n\nfrom astropy.cosmology import FlatLambdaCDM\nimport astropy.units as u\ncosmoMD = FlatLambdaCDM(H0=67.77*u.km/u.s/u.Mpc, Om0=0.307115, Ob0=0.048206)\n\ndef get_lognlogs(path_to_lc, area, z_max=3., ra_max=10., dec_max=10.):\n f = h5py.File(path_to_lc, 'r+')\n is_gal = (f['/sky_position/selection'].value)&(f['/sky_position/redshift_R'].value<z_max)&(abs(f['/sky_position/DEC'].value)<dec_max)&(abs(f['/sky_position/RA'].value)<ra_max)\n is_agn = (f['/sky_position/selection'].value)&(f['/agn_properties/agn_activity'].value==1)&(f['/agn_properties/rxay_flux_05_20'].value>0)\n n_gal = len(f['/sky_position/redshift_S'].value[is_gal])\n n_agn = len(f['/sky_position/redshift_S'].value[is_agn])\n z = f['/sky_position/redshift_S'].value[is_agn]\n #logm = n.log10(f['/moster_2013_data/stellar_mass'].value[is_agn])\n #lsar = f['/agn_properties/log_lambda_sar'].value[is_agn]\n #lx = logm + lsar\n log_f_05_20 = n.log10(f['/agn_properties/rxay_flux_05_20'].value[is_agn]) #- 0.6\n f.close()\n out = n.histogram(log_f_05_20, bins = n.arange(-18, -8., 0.2))\n # cumulative number density per square degrees\n x_out = 0.5*(out[1][1:] + out[1][:-1])\n N_out = n.array([n.sum(out[0][ii:]) for ii in range(len(out[0])) ])\n c_out = n.array([n.sum(out[0][ii:]) for ii in range(len(out[0])) ]) / area\n c_out_up = (1 + N_out**(-0.5)) * c_out\n c_out_low = (1 - N_out**(-0.5)) * c_out\n c_err = (n.log10(c_out_up) - n.log10(c_out_low))/2.\n return x_out, c_out, c_err\n\np.figure(1, (6,6))\n\npath_to_lc = '/data17s/darksim/MD/MD_1.0Gpc/h5_lc/lc_L3.hdf5'\narea = 6.7529257176359*2. * 2* 8.269819492449505\nx_out, c_out, c_err = get_lognlogs(path_to_lc, area, 1.1, 6.7529257176359, 8.269819492449505)\n#p.plot(x_out, n.log10(c_out), lw=2, rasterized = True, label = 'z<1.08' )\np.errorbar(x_out, n.log10(c_out), yerr = c_err, rasterized = True, label = 'L3 z<1.08, 223deg2' )\nx_out_a, c_out_a, c_err_a = x_out, c_out, c_err \np.axhline(n.log10(300), ls='dashed')\n\n#path_to_lc=='/data17s/darksim/MD/MD_1.0Gpc/h5_lc/lc_remaped_position_L3_z1.hdf5'\n#area = 3.3764628588325674*2. * 2* 4.134909746242654\n#x_out, c_out, c_err = get_lognlogs(path_to_lc, area, z_max=3.)\n#p.errorbar(x_out, n.log10(c_out), yerr = c_err, rasterized = True, label = 'L3 1.08<z<3.' )\n#p.plot(x_out, n.log10(c_out+c_out_a), ls='dashed', label='total')\n\npath_to_lc = '/data17s/darksim/MD/MD_1.0Gpc/h5_lc/lc_L6.hdf5'\narea = 1.9766516114702513*2. * 2*2.0047373031569915\nx_out, c_out, c_err = get_lognlogs(path_to_lc, area, 3., 1.9766516114702513, 2.0047373031569915)\np.errorbar(x_out, n.log10(c_out), yerr = c_err, rasterized = True, label = 'L6 z<3., 15deg2' )\n\n#p.plot(x_out-0.1, n.log10(c_out), 'k', lw=2, rasterized = True, label = 'L3 lc-0.1' )\n#p.plot(x_out, n.log10(c_out*(1-frac_err_13deg2)), 'k--', lw=1, rasterized = True, label = 'v0.6, 13.3deg2 scatter' )\n#p.plot(x_out, n.log10(c_out*(1+frac_err_13deg2)), 'k--', lw=1, rasterized = True)\n#p.plot(x_out, n.log10(c_out*(1-frac_err_3deg2)), 'r--', lw=1, rasterized = True, label = 'v0.6, 3.5deg2 scatter' )\n#p.plot(x_out, n.log10(c_out*(1+frac_err_3deg2)), 'r--', lw=1, rasterized = True)\n#p.plot(x_out_0, n.log10(c_out_0), 'm--', rasterized = True, label = 'Planck mock v0.0' )\n\npath_to_lc = '/data17s/darksim/MD/MD_1.0Gpc/h5_lc/lc_L15.hdf5'\narea = 14.323944878104827*2. * 2*20.257311381848154\nx_out, c_out, c_err = get_lognlogs(path_to_lc, area, 3., 14.323944878104827, 20.257311381848154)\np.errorbar(x_out, n.log10(c_out), yerr = c_err, rasterized = True, label = 'L15 z<0.54 1160deg2' )\n\npath_2_logNlogS_data = os.path.join(os.environ[\"DARKSIM_DIR\"], 'observations', 'logNlogS', 'logNlogS_Georgakakis_08_AGN.data')\nx_data, y_data, yerr = n.loadtxt(path_2_logNlogS_data, unpack=True)\np.fill_between(x_data, y1 = n.log10(y_data-yerr), y2=n.log10(y_data+yerr), color='b' , rasterized = True, alpha=0.5, label = 'Georgakakis 08' )\n#p.plot(x_data, n.log10(y_data))\npath_2_logNlogS_data = os.path.join(os.environ[\"DARKSIM_DIR\"], 'observations', 'logNlogS', 'logNlogS_Merloni_12_AGN.data')\nx_data, y_data = n.loadtxt(path_2_logNlogS_data, unpack=True)\np.plot(x_data, n.log10(y_data), label = 'Merloni 12' )\n\np.axhline(7, ls='dashed')\np.xlabel('log(F[0.5-2 keV])')\np.ylabel('log(>F) [/deg2]')\np.legend(frameon=False, loc=0)\n#p.yscale('log')\np.xlim((-17, -12))\np.ylim((-2, 4.))\n#p.title('Mocks')\np.grid()\np.savefig(os.path.join(plotDir, \"logN_logS_AGN.jpg\"))\np.clf()\n\n\n\n" ]
[ [ "numpy.transpose" ], [ "matplotlib.pyplot.legend", "matplotlib.pyplot.axhline", "matplotlib.use", "numpy.arange", "matplotlib.pyplot.ylim", "matplotlib.pyplot.savefig", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.clf", "matplotlib.pyplot.xlabel", "matplotlib.pyplot.text", "scipy.special.erf", "matplotlib.pyplot.figure" ], [ "matplotlib.pyplot.legend", "matplotlib.pyplot.axhline", "matplotlib.use", "matplotlib.pyplot.ylim", "numpy.arange", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.xlim", "matplotlib.pyplot.clf", "numpy.log10", "matplotlib.pyplot.grid", "matplotlib.pyplot.xlabel", "numpy.sum", "numpy.loadtxt", "matplotlib.pyplot.figure" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]
zzbjpc/deepnlp
[ "9a5717e1c7dca3247af1c9e5ca221f374cf95220" ]
[ "deepnlp/ner/ner_model.py" ]
[ "#!/usr/bin/python\n# -*- coding:utf-8 -*-\n\"\"\"\nNER tagger for building a LSTM based NER tagging model.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals # compatible with python3 unicode coding\n\nimport time\nimport numpy as np\nimport tensorflow as tf\nimport sys, os\n\npkg_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) # .../deepnlp/\nsys.path.append(pkg_path)\nfrom ner import reader # explicit relative import\n\n# language option python command line 'python ner_model.py zh'\nlang = \"zh\" if len(sys.argv)==1 else sys.argv[1] # default zh\nfile_path = os.path.dirname(os.path.abspath(__file__))\ndata_path = os.path.join(file_path, \"data\", lang)\ntrain_dir = os.path.join(file_path, \"ckpt\", lang)\n\nflags = tf.flags\nlogging = tf.logging\n\nflags.DEFINE_string(\"ner_lang\", lang, \"ner language option for model config\")\nflags.DEFINE_string(\"ner_data_path\", data_path, \"data_path\")\nflags.DEFINE_string(\"ner_train_dir\", train_dir, \"Training directory.\")\nflags.DEFINE_string(\"ner_scope_name\", \"ner_var_scope\", \"Variable scope of NER Model\")\n\nFLAGS = flags.FLAGS\n\ndef data_type():\n return tf.float32\n\nclass NERTagger(object):\n \"\"\"The NER Tagger Model.\"\"\"\n\n def __init__(self, is_training, config):\n self.batch_size = batch_size = config.batch_size\n self.num_steps = num_steps = config.num_steps\n size = config.hidden_size\n vocab_size = config.vocab_size\n target_num = config.target_num # target output number\n \n self._input_data = tf.placeholder(tf.int32, [batch_size, num_steps])\n self._targets = tf.placeholder(tf.int32, [batch_size, num_steps])\n \n # Check if Model is Training\n self.is_training = is_training\n \n lstm_cell = tf.contrib.rnn.BasicLSTMCell(size, forget_bias=0.0, state_is_tuple=True)\n if is_training and config.keep_prob < 1:\n lstm_cell = tf.contrib.rnn.DropoutWrapper(\n lstm_cell, output_keep_prob=config.keep_prob)\n cell = tf.contrib.rnn.MultiRNNCell([lstm_cell] * config.num_layers, state_is_tuple=True)\n \n self._initial_state = cell.zero_state(batch_size, data_type())\n \n with tf.device(\"/cpu:0\"):\n embedding = tf.get_variable(\n \"embedding\", [vocab_size, size], dtype=data_type())\n inputs = tf.nn.embedding_lookup(embedding, self._input_data)\n\n if is_training and config.keep_prob < 1:\n inputs = tf.nn.dropout(inputs, config.keep_prob)\n \n outputs = []\n state = self._initial_state\n with tf.variable_scope(\"ner_lstm\"):\n for time_step in range(num_steps):\n if time_step > 0: tf.get_variable_scope().reuse_variables()\n (cell_output, state) = cell(inputs[:, time_step, :], state)\n outputs.append(cell_output)\n \n output = tf.reshape(tf.concat(outputs, 1), [-1, size])\n softmax_w = tf.get_variable(\n \"softmax_w\", [size, target_num], dtype=data_type())\n softmax_b = tf.get_variable(\"softmax_b\", [target_num], dtype=data_type())\n logits = tf.matmul(output, softmax_w) + softmax_b\n loss = tf.contrib.legacy_seq2seq.sequence_loss_by_example(\n logits = [logits],\n targets = [tf.reshape(self._targets, [-1])],\n weights = [tf.ones([batch_size * num_steps], dtype=data_type())])\n \n # Fetch Reults in session.run()\n self._cost = cost = tf.reduce_sum(loss) / batch_size\n self._final_state = state\n self._logits = logits\n \n self._lr = tf.Variable(0.0, trainable=False)\n tvars = tf.trainable_variables()\n grads, _ = tf.clip_by_global_norm(tf.gradients(cost, tvars),\n config.max_grad_norm)\n optimizer = tf.train.GradientDescentOptimizer(self._lr)\n self._train_op = optimizer.apply_gradients(zip(grads, tvars))\n \n self._new_lr = tf.placeholder(\n data_type(), shape=[], name=\"new_learning_rate\")\n self._lr_update = tf.assign(self._lr, self._new_lr)\n self.saver = tf.train.Saver(tf.global_variables())\n \n def assign_lr(self, session, lr_value):\n session.run(self._lr_update, feed_dict={self._new_lr: lr_value})\n \n @property\n def input_data(self):\n return self._input_data\n\n @property\n def targets(self):\n return self._targets\n\n @property\n def initial_state(self):\n return self._initial_state\n\n @property\n def cost(self):\n return self._cost\n\n @property\n def final_state(self):\n return self._final_state\n \n @property\n def logits(self):\n return self._logits\n\n @property\n def lr(self):\n return self._lr\n\n @property\n def train_op(self):\n return self._train_op\n\n# NER Model Configuration, Set Target Num, and input vocab_Size\nclass LargeConfigChinese(object):\n \"\"\"Large config.\"\"\"\n init_scale = 0.04\n learning_rate = 0.1\n max_grad_norm = 10\n num_layers = 2\n num_steps = 30\n hidden_size = 128\n max_epoch = 14\n max_max_epoch = 55\n keep_prob = 1.00 # remember to set to 1.00 when making new prediction\n lr_decay = 1 / 1.15\n batch_size = 1 # single sample batch\n vocab_size = 60000\n target_num = 8 # NER Tag 7, nt, n, p, o, q (special), nz entity_name, nbz\n\nclass LargeConfigEnglish(object):\n \"\"\"Large config.\"\"\"\n init_scale = 0.04\n learning_rate = 0.1\n max_grad_norm = 10\n num_layers = 2\n num_steps = 30\n hidden_size = 128\n max_epoch = 14\n max_max_epoch = 55\n keep_prob = 1.00 # remember to set to 1.00 when making new prediction\n lr_decay = 1 / 1.15\n batch_size = 1 # single sample batch\n vocab_size = 52000\n target_num = 15 # NER Tag 17, n, nf, nc, ne, (name, start, continue, end) n, p, o, q (special), nz entity_name, nbz\n\ndef get_config(lang):\n if (lang == 'zh'):\n return LargeConfigChinese() \n elif (lang == 'en'):\n return LargeConfigEnglish()\n # other lang options\n \n else :\n return None\n\ndef run_epoch(session, model, word_data, tag_data, eval_op, verbose=False):\n \"\"\"Runs the model on the given data.\"\"\"\n epoch_size = ((len(word_data) // model.batch_size) - 1) // model.num_steps\n start_time = time.time()\n costs = 0.0\n iters = 0\n state = session.run(model.initial_state)\n for step, (x, y) in enumerate(reader.iterator(word_data, tag_data, model.batch_size,\n model.num_steps)):\n fetches = [model.cost, model.final_state, eval_op]\n feed_dict = {}\n feed_dict[model.input_data] = x\n feed_dict[model.targets] = y\n for i, (c, h) in enumerate(model.initial_state):\n feed_dict[c] = state[i].c\n feed_dict[h] = state[i].h\n cost, state, _ = session.run(fetches, feed_dict)\n costs += cost\n iters += model.num_steps\n \n if verbose and step % (epoch_size // 10) == 10:\n print(\"%.3f perplexity: %.3f speed: %.0f wps\" %\n (step * 1.0 / epoch_size, np.exp(costs / iters),\n iters * model.batch_size / (time.time() - start_time)))\n \n # Save Model to CheckPoint when is_training is True\n if model.is_training:\n if step % (epoch_size // 10) == 10:\n checkpoint_path = os.path.join(FLAGS.ner_train_dir, \"ner.ckpt\")\n model.saver.save(session, checkpoint_path)\n print(\"Model Saved... at time step \" + str(step))\n\n return np.exp(costs / iters)\n\n\ndef main(_):\n if not FLAGS.ner_data_path:\n raise ValueError(\"No data files found in 'data_path' folder\")\n\n raw_data = reader.load_data(FLAGS.ner_data_path)\n train_word, train_tag, dev_word, dev_tag, test_word, test_tag, vocabulary = raw_data\n \n config = get_config(FLAGS.ner_lang)\n \n eval_config = get_config(FLAGS.ner_lang)\n eval_config.batch_size = 1\n eval_config.num_steps = 1\n \n with tf.Graph().as_default(), tf.Session() as session:\n initializer = tf.random_uniform_initializer(-config.init_scale,\n config.init_scale)\n with tf.variable_scope(FLAGS.ner_scope_name, reuse=None, initializer=initializer):\n m = NERTagger(is_training=True, config=config)\n with tf.variable_scope(FLAGS.ner_scope_name, reuse=True, initializer=initializer):\n mvalid = NERTagger(is_training=False, config=config)\n mtest = NERTagger(is_training=False, config=eval_config)\n \n # CheckPoint State\n ckpt = tf.train.get_checkpoint_state(FLAGS.ner_train_dir)\n if ckpt:\n print(\"Loading model parameters from %s\" % ckpt.model_checkpoint_path)\n m.saver.restore(session, tf.train.latest_checkpoint(FLAGS.ner_train_dir))\n else:\n print(\"Created model with fresh parameters.\")\n session.run(tf.global_variables_initializer())\n \n for i in range(config.max_max_epoch):\n lr_decay = config.lr_decay ** max(i - config.max_epoch, 0.0)\n m.assign_lr(session, config.learning_rate * lr_decay)\n \n print(\"Epoch: %d Learning rate: %.3f\" % (i + 1, session.run(m.lr)))\n train_perplexity = run_epoch(session, m, train_word, train_tag, m.train_op,\n verbose=True)\n print(\"Epoch: %d Train Perplexity: %.3f\" % (i + 1, train_perplexity))\n valid_perplexity = run_epoch(session, mvalid, dev_word, dev_tag, tf.no_op())\n print(\"Epoch: %d Valid Perplexity: %.3f\" % (i + 1, valid_perplexity))\n\n test_perplexity = run_epoch(session, mtest, test_word, test_tag, tf.no_op())\n print(\"Test Perplexity: %.3f\" % test_perplexity)\n\nif __name__ == \"__main__\":\n tf.app.run()\n" ]
[ [ "tensorflow.device", "tensorflow.concat", "tensorflow.reduce_sum", "tensorflow.global_variables", "numpy.exp", "tensorflow.Graph", "tensorflow.Variable", "tensorflow.random_uniform_initializer", "tensorflow.gradients", "tensorflow.contrib.rnn.MultiRNNCell", "tensorflow.Session", "tensorflow.trainable_variables", "tensorflow.nn.dropout", "tensorflow.app.run", "tensorflow.matmul", "tensorflow.placeholder", "tensorflow.global_variables_initializer", "tensorflow.train.GradientDescentOptimizer", "tensorflow.no_op", "tensorflow.nn.embedding_lookup", "tensorflow.train.get_checkpoint_state", "tensorflow.train.latest_checkpoint", "tensorflow.contrib.rnn.DropoutWrapper", "tensorflow.contrib.rnn.BasicLSTMCell", "tensorflow.assign", "tensorflow.reshape", "tensorflow.variable_scope", "tensorflow.get_variable_scope" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10" ] } ]
krasserm/edward2
[ "311d3ad6946b543e70af1495eab9a0a9b4f69854" ]
[ "experimental/marginalization_mixup/augment.py" ]
[ "# coding=utf-8\n# Copyright 2020 The Edward2 Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"RandAugment policies for enhanced image preprocessing.\"\"\"\n\nimport math\nimport tensorflow as tf\n\n# pylint:disable=g-direct-tensorflow-import\nfrom tensorflow.python.keras.layers.preprocessing import image_preprocessing as image_ops\n# pylint:enable=g-direct-tensorflow-import\n\n# This signifies the max integer that the controller RNN could predict for the\n# augmentation scheme.\nMAX_LEVEL = 10.\n\n\ndef to_4d(image):\n \"\"\"Converts an input Tensor to 4 dimensions.\n\n 4D image => [N, H, W, C] or [N, C, H, W]\n 3D image => [1, H, W, C] or [1, C, H, W]\n 2D image => [1, H, W, 1]\n\n Args:\n image: The 2/3/4D input tensor.\n\n Returns:\n A 4D image tensor.\n\n Raises:\n `TypeError` if `image` is not a 2/3/4D tensor.\n\n \"\"\"\n shape = tf.shape(image)\n original_rank = tf.rank(image)\n left_pad = tf.cast(tf.less_equal(original_rank, 3), dtype=tf.int32)\n right_pad = tf.cast(tf.equal(original_rank, 2), dtype=tf.int32)\n new_shape = tf.concat(\n [\n tf.ones(shape=left_pad, dtype=tf.int32),\n shape,\n tf.ones(shape=right_pad, dtype=tf.int32),\n ],\n axis=0,\n )\n return tf.reshape(image, new_shape)\n\n\ndef from_4d(image, ndims):\n \"\"\"Converts a 4D image back to `ndims` rank.\"\"\"\n shape = tf.shape(image)\n begin = tf.cast(tf.less_equal(ndims, 3), dtype=tf.int32)\n end = 4 - tf.cast(tf.equal(ndims, 2), dtype=tf.int32)\n new_shape = shape[begin:end]\n return tf.reshape(image, new_shape)\n\n\ndef _convert_translation_to_transform(translations):\n \"\"\"Converts translations to a projective transform.\n\n The translation matrix looks like this:\n [[1 0 -dx]\n [0 1 -dy]\n [0 0 1]]\n\n Args:\n translations: The 2-element list representing [dx, dy], or a matrix of\n 2-element lists representing [dx dy] to translate for each image. The\n shape must be static.\n\n Returns:\n The transformation matrix of shape (num_images, 8).\n\n Raises:\n `TypeError` if\n - the shape of `translations` is not known or\n - the shape of `translations` is not rank 1 or 2.\n\n \"\"\"\n translations = tf.convert_to_tensor(translations, dtype=tf.float32)\n if translations.get_shape().ndims is None:\n raise TypeError('translations rank must be statically known')\n elif len(translations.get_shape()) == 1:\n translations = translations[None]\n elif len(translations.get_shape()) != 2:\n raise TypeError('translations should have rank 1 or 2.')\n num_translations = tf.shape(translations)[0]\n\n return tf.concat(\n values=[\n tf.ones((num_translations, 1), tf.dtypes.float32),\n tf.zeros((num_translations, 1), tf.dtypes.float32),\n -translations[:, 0, None],\n tf.zeros((num_translations, 1), tf.dtypes.float32),\n tf.ones((num_translations, 1), tf.dtypes.float32),\n -translations[:, 1, None],\n tf.zeros((num_translations, 2), tf.dtypes.float32),\n ],\n axis=1,\n )\n\n\ndef _convert_angles_to_transform(angles, image_width, image_height):\n \"\"\"Converts an angle or angles to a projective transform.\n\n Args:\n angles: A scalar to rotate all images, or a vector to rotate a batch of\n images. This must be a scalar.\n image_width: The width of the image(s) to be transformed.\n image_height: The height of the image(s) to be transformed.\n\n Returns:\n A tensor of shape (num_images, 8).\n\n Raises:\n `TypeError` if `angles` is not rank 0 or 1.\n\n \"\"\"\n angles = tf.convert_to_tensor(angles, dtype=tf.float32)\n if len(angles.get_shape()) == 0: # pylint:disable=g-explicit-length-test\n angles = angles[None]\n elif len(angles.get_shape()) != 1:\n raise TypeError('Angles should have a rank 0 or 1.')\n x_offset = ((image_width - 1) -\n (tf.math.cos(angles) * (image_width - 1) - tf.math.sin(angles) *\n (image_height - 1))) / 2.0\n y_offset = ((image_height - 1) -\n (tf.math.sin(angles) * (image_width - 1) + tf.math.cos(angles) *\n (image_height - 1))) / 2.0\n num_angles = tf.shape(angles)[0]\n return tf.concat(\n values=[\n tf.math.cos(angles)[:, None],\n -tf.math.sin(angles)[:, None],\n x_offset[:, None],\n tf.math.sin(angles)[:, None],\n tf.math.cos(angles)[:, None],\n y_offset[:, None],\n tf.zeros((num_angles, 2), tf.dtypes.float32),\n ],\n axis=1,\n )\n\n\ndef transform(image, transforms):\n \"\"\"Prepares input data for `image_ops.transform`.\"\"\"\n original_ndims = tf.rank(image)\n transforms = tf.convert_to_tensor(transforms, dtype=tf.float32)\n if len(tf.shape(transforms)) == 1:\n transforms = transforms[None]\n image = to_4d(image)\n image = image_ops.transform(\n images=image,\n transforms=transforms,\n interpolation='nearest')\n return from_4d(image, original_ndims)\n\n\ndef translate(image, translations):\n \"\"\"Translates image(s) by provided vectors.\n\n Args:\n image: An image Tensor of type uint8.\n translations: A vector or matrix representing [dx dy].\n\n Returns:\n The translated version of the image.\n\n \"\"\"\n transforms = _convert_translation_to_transform(translations)\n return transform(image, transforms=transforms)\n\n\ndef rotate(image, degrees):\n \"\"\"Rotates the image by degrees either clockwise or counterclockwise.\n\n Args:\n image: An image Tensor of type uint8.\n degrees: Float, a scalar angle in degrees to rotate all images by. If\n degrees is positive the image will be rotated clockwise otherwise it will\n be rotated counterclockwise.\n\n Returns:\n The rotated version of image.\n\n \"\"\"\n # Convert from degrees to radians.\n degrees_to_radians = math.pi / 180.0\n radians = degrees * degrees_to_radians\n\n original_ndims = tf.rank(image)\n image = to_4d(image)\n\n image_height = tf.cast(tf.shape(image)[1], tf.float32)\n image_width = tf.cast(tf.shape(image)[2], tf.float32)\n transforms = _convert_angles_to_transform(angles=radians,\n image_width=image_width,\n image_height=image_height)\n # In practice, we should randomize the rotation degrees by flipping\n # it negatively half the time, but that's done on 'degrees' outside\n # of the function.\n image = transform(image, transforms=transforms)\n return from_4d(image, original_ndims)\n\n\ndef blend(image1, image2, factor):\n \"\"\"Blend image1 and image2 using 'factor'.\n\n Factor can be above 0.0. A value of 0.0 means only image1 is used.\n A value of 1.0 means only image2 is used. A value between 0.0 and\n 1.0 means we linearly interpolate the pixel values between the two\n images. A value greater than 1.0 \"extrapolates\" the difference\n between the two pixel values, and we clip the results to values\n between 0 and 255.\n\n Args:\n image1: An image Tensor of type uint8.\n image2: An image Tensor of type uint8.\n factor: A floating point value above 0.0.\n\n Returns:\n A blended image Tensor of type uint8.\n \"\"\"\n if factor == 0.0:\n return tf.convert_to_tensor(image1)\n if factor == 1.0:\n return tf.convert_to_tensor(image2)\n\n image1 = tf.cast(image1, tf.float32)\n image2 = tf.cast(image2, tf.float32)\n\n difference = image2 - image1\n scaled = factor * difference\n\n # Do addition in float.\n temp = tf.cast(image1, tf.float32) + scaled\n\n # Interpolate\n if factor > 0.0 and factor < 1.0:\n # Interpolation means we always stay within 0 and 255.\n return tf.cast(temp, tf.uint8)\n\n # Extrapolate:\n #\n # We need to clip and then cast.\n return tf.cast(tf.clip_by_value(temp, 0.0, 255.0), tf.uint8)\n\n\ndef solarize(image, threshold=128):\n # For each pixel in the image, select the pixel\n # if the value is less than the threshold.\n # Otherwise, subtract 255 from the pixel.\n return tf.where(image < threshold, image, 255 - image)\n\n\ndef color(image, factor):\n \"\"\"Equivalent of PIL Color.\"\"\"\n degenerate = tf.image.grayscale_to_rgb(tf.image.rgb_to_grayscale(image))\n return blend(degenerate, image, factor)\n\n\ndef posterize(image, bits):\n \"\"\"Equivalent of PIL Posterize.\"\"\"\n shift = 8 - bits\n return tf.bitwise.left_shift(tf.bitwise.right_shift(image, shift), shift)\n\n\ndef wrapped_rotate(image, degrees, replace):\n \"\"\"Applies rotation with wrap/unwrap.\"\"\"\n image = rotate(wrap(image), degrees=degrees)\n return unwrap(image, replace)\n\n\ndef translate_x(image, pixels, replace):\n \"\"\"Equivalent of PIL Translate in X dimension.\"\"\"\n image = translate(wrap(image), [-pixels, 0])\n return unwrap(image, replace)\n\n\ndef translate_y(image, pixels, replace):\n \"\"\"Equivalent of PIL Translate in Y dimension.\"\"\"\n image = translate(wrap(image), [0, -pixels])\n return unwrap(image, replace)\n\n\ndef shear_x(image, level, replace):\n \"\"\"Equivalent of PIL Shearing in X dimension.\"\"\"\n # Shear parallel to x axis is a projective transform\n # with a matrix form of:\n # [1 level\n # 0 1].\n image = transform(image=wrap(image),\n transforms=[1., level, 0., 0., 1., 0., 0., 0.])\n return unwrap(image, replace)\n\n\ndef shear_y(image, level, replace):\n \"\"\"Equivalent of PIL Shearing in Y dimension.\"\"\"\n # Shear parallel to y axis is a projective transform\n # with a matrix form of:\n # [1 0\n # level 1].\n image = transform(image=wrap(image),\n transforms=[1., 0., 0., level, 1., 0., 0., 0.])\n return unwrap(image, replace)\n\n\ndef autocontrast(image):\n \"\"\"Implements Autocontrast function from PIL using TF ops.\n\n Args:\n image: A 3D uint8 tensor.\n\n Returns:\n The image after it has had autocontrast applied to it and will be of type\n uint8.\n \"\"\"\n\n def scale_channel(image):\n \"\"\"Scale the 2D image using the autocontrast rule.\"\"\"\n # A possibly cheaper version can be done using cumsum/unique_with_counts\n # over the histogram values, rather than iterating over the entire image.\n # to compute mins and maxes.\n lo = tf.cast(tf.reduce_min(image), tf.float32)\n hi = tf.cast(tf.reduce_max(image), tf.float32)\n\n # Scale the image, making the lowest value 0 and the highest value 255.\n def scale_values(im):\n scale = 255.0 / (hi - lo)\n offset = -lo * scale\n im = tf.cast(im, tf.float32) * scale + offset\n im = tf.clip_by_value(im, 0.0, 255.0)\n return tf.cast(im, tf.uint8)\n\n result = tf.cond(hi > lo, lambda: scale_values(image), lambda: image)\n return result\n\n # Assumes RGB for now. Scales each channel independently\n # and then stacks the result.\n s1 = scale_channel(image[:, :, 0])\n s2 = scale_channel(image[:, :, 1])\n s3 = scale_channel(image[:, :, 2])\n image = tf.stack([s1, s2, s3], 2)\n return image\n\n\ndef equalize(image):\n \"\"\"Implements Equalize function from PIL using TF ops.\"\"\"\n def scale_channel(im, c):\n \"\"\"Scale the data in the channel to implement equalize.\"\"\"\n im = tf.cast(im[:, :, c], tf.int32)\n # Compute the histogram of the image channel.\n histo = tf.histogram_fixed_width(im, [0, 255], nbins=256)\n\n # For the purposes of computing the step, filter out the nonzeros.\n nonzero = tf.where(tf.not_equal(histo, 0))\n nonzero_histo = tf.reshape(tf.gather(histo, nonzero), [-1])\n step = (tf.reduce_sum(nonzero_histo) - nonzero_histo[-1]) // 255\n\n def build_lut(histo, step):\n # Compute the cumulative sum, shifting by step // 2\n # and then normalization by step.\n lut = (tf.cumsum(histo) + (step // 2)) // step\n # Shift lut, prepending with 0.\n lut = tf.concat([[0], lut[:-1]], 0)\n # Clip the counts to be in range. This is done\n # in the C code for image.point.\n return tf.clip_by_value(lut, 0, 255)\n\n # If step is zero, return the original image. Otherwise, build\n # lut from the full histogram and step and then index from it.\n result = tf.cond(tf.equal(step, 0),\n lambda: im,\n lambda: tf.gather(build_lut(histo, step), im))\n\n return tf.cast(result, tf.uint8)\n\n # Assumes RGB for now. Scales each channel independently\n # and then stacks the result.\n s1 = scale_channel(image, 0)\n s2 = scale_channel(image, 1)\n s3 = scale_channel(image, 2)\n image = tf.stack([s1, s2, s3], 2)\n return image\n\n\ndef wrap(image):\n \"\"\"Returns 'image' with an extra channel set to all 1s.\"\"\"\n shape = tf.shape(image)\n extended_channel = tf.ones([shape[0], shape[1], 1], image.dtype)\n extended = tf.concat([image, extended_channel], axis=2)\n return extended\n\n\ndef unwrap(image, replace):\n \"\"\"Unwraps an image produced by wrap.\n\n Where there is a 0 in the last channel for every spatial position,\n the rest of the three channels in that spatial dimension are grayed\n (set to 128). Operations like translate and shear on a wrapped\n Tensor will leave 0s in empty locations. Some transformations look\n at the intensity of values to do preprocessing, and we want these\n empty pixels to assume the 'average' value, rather than pure black.\n\n\n Args:\n image: A 3D Image Tensor with 4 channels.\n replace: A one or three value 1D tensor to fill empty pixels.\n\n Returns:\n image: A 3D image Tensor with 3 channels.\n \"\"\"\n image_shape = tf.shape(image)\n # Flatten the spatial dimensions.\n flattened_image = tf.reshape(image, [-1, image_shape[2]])\n\n # Find all pixels where the last channel is zero.\n alpha_channel = tf.expand_dims(flattened_image[:, 3], axis=-1)\n\n replace = tf.concat([replace, tf.ones([1], image.dtype)], 0)\n\n # Where they are zero, fill them in with 'replace'.\n flattened_image = tf.where(\n tf.equal(alpha_channel, 0),\n tf.ones_like(flattened_image, dtype=image.dtype) * replace,\n flattened_image)\n\n image = tf.reshape(flattened_image, image_shape)\n image = tf.slice(image, [0, 0, 0], [image_shape[0], image_shape[1], 3])\n return image\n\n\ndef _randomly_negate_tensor(tensor):\n \"\"\"With 50% prob turn the tensor negative.\"\"\"\n should_flip = tf.cast(tf.floor(tf.random.uniform([]) + 0.5), tf.bool)\n final_tensor = tf.cond(should_flip, lambda: tensor, lambda: -tensor)\n return final_tensor\n\n\ndef _rotate_level_to_arg(level):\n level = (level/MAX_LEVEL) * 30.\n level = _randomly_negate_tensor(level)\n return (level,)\n\n\ndef _shrink_level_to_arg(level):\n \"\"\"Converts level to ratio by which we shrink the image content.\"\"\"\n if level == 0:\n return (1.0,) # if level is zero, do not shrink the image\n # Maximum shrinking ratio is 2.9.\n level = 2. / (MAX_LEVEL / level) + 0.9\n return (level,)\n\n\ndef _enhance_level_to_arg(level):\n return ((level/MAX_LEVEL) * 1.8 + 0.1,)\n\n\ndef _shear_level_to_arg(level):\n level = (level/MAX_LEVEL) * 0.3\n # Flip level to negative with 50% chance.\n level = _randomly_negate_tensor(level)\n return (level,)\n\n\ndef _translate_level_to_arg(level, translate_const):\n level = (level/MAX_LEVEL) * float(translate_const)\n # Flip level to negative with 50% chance.\n level = _randomly_negate_tensor(level)\n return (level,)\n\n\ndef _mult_to_arg(level, multiplier=1.):\n return (int((level / MAX_LEVEL) * multiplier),)\n\n\ndef level_to_arg(translate_const):\n \"\"\"Creates a dict mapping image operation names to their arguments.\"\"\"\n\n no_arg = lambda level: ()\n posterize_arg = lambda level: _mult_to_arg(level, 4)\n solarize_arg = lambda level: _mult_to_arg(level, 256)\n translate_arg = lambda level: _translate_level_to_arg(level, translate_const)\n\n args = {\n 'AutoContrast': no_arg,\n 'Equalize': no_arg,\n 'Rotate': _rotate_level_to_arg,\n 'Posterize': posterize_arg,\n 'Solarize': solarize_arg,\n 'Color': _enhance_level_to_arg,\n 'ShearX': _shear_level_to_arg,\n 'ShearY': _shear_level_to_arg,\n 'TranslateX': translate_arg,\n 'TranslateY': translate_arg,\n }\n return args\n\n\nNAME_TO_FUNC = {\n 'AutoContrast': autocontrast,\n 'Equalize': equalize,\n 'Rotate': wrapped_rotate,\n 'Posterize': posterize,\n 'Solarize': solarize,\n 'Color': color,\n 'ShearX': shear_x,\n 'ShearY': shear_y,\n 'TranslateX': translate_x,\n 'TranslateY': translate_y,\n}\n\n# Functions that have a 'replace' parameter\nREPLACE_FUNCS = frozenset({\n 'Rotate',\n 'TranslateX',\n 'ShearX',\n 'ShearY',\n 'TranslateY',\n})\n\n\ndef _parse_policy_info(name,\n prob,\n level,\n replace_value,\n translate_const):\n \"\"\"Return the function that corresponds to `name` and update `level` param.\"\"\"\n func = NAME_TO_FUNC[name]\n args = level_to_arg(translate_const)[name](level)\n\n if name in REPLACE_FUNCS:\n # Add in replace arg if it is required for the function that is called.\n args = tuple(list(args) + [replace_value])\n\n return func, prob, args\n\n\nclass RandAugment(object):\n \"\"\"Applies the RandAugment policy to images.\n\n RandAugment is from the paper https://arxiv.org/abs/1909.13719,\n \"\"\"\n\n def __init__(self,\n num_layers=1,\n magnitude=10,\n translate_const=100):\n \"\"\"Applies the RandAugment policy to images.\n\n Args:\n num_layers: Integer, the number of augmentation transformations to apply\n sequentially to an image. Represented as (N) in the paper. Usually best\n values will be in the range [1, 3].\n magnitude: Integer, shared magnitude across all augmentation operations.\n Represented as (M) in the paper. Usually best values are in the range\n [5, 10].\n translate_const: multiplier for applying translation.\n \"\"\"\n self.num_layers = num_layers\n self.magnitude = float(magnitude)\n self.translate_const = float(translate_const)\n self.available_ops = [\n 'AutoContrast', 'Equalize', 'Rotate', 'Posterize', 'Solarize',\n 'Color', 'ShearX', 'ShearY', 'TranslateX', 'TranslateY',\n ]\n\n def distort(self, image):\n \"\"\"Applies the RandAugment policy to `image`.\n\n Args:\n image: `Tensor` of shape [height, width, 3] representing an image.\n\n Returns:\n The augmented version of `image`.\n \"\"\"\n input_image_type = image.dtype\n\n if input_image_type != tf.uint8:\n image = tf.clip_by_value(image, 0.0, 255.0)\n image = tf.cast(image, dtype=tf.uint8)\n\n replace_value = [128] * 3\n min_prob, max_prob = 0.2, 0.8\n\n for _ in range(self.num_layers):\n op_to_select = tf.random.uniform(\n [], maxval=len(self.available_ops) + 1, dtype=tf.int32)\n\n branch_fns = []\n for (i, op_name) in enumerate(self.available_ops):\n prob = tf.random.uniform([],\n minval=min_prob,\n maxval=max_prob,\n dtype=tf.float32)\n func, _, args = _parse_policy_info(op_name,\n prob,\n self.magnitude,\n replace_value,\n self.translate_const)\n branch_fns.append((\n i,\n # pylint:disable=g-long-lambda\n lambda selected_func=func, selected_args=args: selected_func(\n image, *selected_args)))\n # pylint:enable=g-long-lambda\n\n image = tf.switch_case(branch_index=op_to_select,\n branch_fns=branch_fns,\n default=lambda: tf.identity(image))\n\n image = tf.cast(image, dtype=input_image_type)\n return image\n" ]
[ [ "tensorflow.convert_to_tensor", "tensorflow.cond", "tensorflow.concat", "tensorflow.zeros", "tensorflow.stack", "tensorflow.reduce_sum", "tensorflow.cast", "tensorflow.equal", "tensorflow.where", "tensorflow.rank", "tensorflow.cumsum", "tensorflow.bitwise.right_shift", "tensorflow.python.keras.layers.preprocessing.image_preprocessing.transform", "tensorflow.image.rgb_to_grayscale", "tensorflow.gather", "tensorflow.math.sin", "tensorflow.math.cos", "tensorflow.shape", "tensorflow.less_equal", "tensorflow.random.uniform", "tensorflow.identity", "tensorflow.clip_by_value", "tensorflow.not_equal", "tensorflow.reduce_max", "tensorflow.slice", "tensorflow.reshape", "tensorflow.ones_like", "tensorflow.ones", "tensorflow.expand_dims", "tensorflow.histogram_fixed_width", "tensorflow.reduce_min" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [ "1.10", "1.12", "1.4", "1.5", "1.7", "0.12", "1.0", "1.2" ] } ]
lumicks/pylake
[ "b5875d156d6416793a371198f3f2590fca2be4cd", "b5875d156d6416793a371198f3f2590fca2be4cd", "b5875d156d6416793a371198f3f2590fca2be4cd" ]
[ "lumicks/pylake/tests/data/mock_widefield.py", "lumicks/pylake/force_calibration/tests/test_active_calibration.py", "lumicks/pylake/kymotracker/tests/test_io.py" ]
[ "import numpy as np\nimport json\nimport tifffile\n\n\nclass MockTag():\n def __init__(self, value):\n self._value = value\n\n @property\n def value(self):\n return self._value\n\n\nclass MockTiffPage:\n def __init__(self, data, start_time, end_time, description=\"\", bit_depth=8):\n self._data = data\n bit_depth = bit_depth if data.ndim == 2 else (bit_depth, bit_depth, bit_depth)\n self.tags = {\"DateTime\": MockTag(f\"{start_time}:{end_time}\"),\n \"ImageDescription\": MockTag(description),\n \"BitsPerSample\": MockTag(bit_depth),\n \"SamplesPerPixel\": MockTag(1 if (data.ndim==2) else data.shape[2]),\n \"ImageWidth\": MockTag(data.shape[1]),\n \"ImageLength\": MockTag(data.shape[0])}\n\n def asarray(self):\n return self._data.copy()\n\n @property\n def description(self):\n return self.tags[\"ImageDescription\"].value\n\n\nclass MockTiffFile:\n def __init__(self, data, times, description=\"\", bit_depth=8):\n self.pages = []\n for d, r in zip(data, times):\n self.pages.append(MockTiffPage(d, r[0], r[1], description=description, bit_depth=bit_depth))\n\n @property\n def num_frames(self):\n return len(self._src.pages)\n\n\ndef apply_transform(spots, Tx, Ty, theta, offsets=None):\n theta = np.radians(theta)\n transform_matrix = np.array([[np.cos(theta), -np.sin(theta), Tx],\n [np.sin(theta), np.cos(theta), Ty],\n [0, 0, 1]])\n\n # translate origin by offsets if necessary\n offsets = np.zeros((2, 1)) if offsets is None else np.array(offsets, ndmin=2).T\n spots = spots - offsets\n # reshape spots into coordinate matrix; [x,y,z] as columns\n spots = np.vstack((spots, np.ones(spots.shape[1])))\n # affine transformation\n transformed_spots = np.dot(transform_matrix, spots)[:2]\n # back-translate origin if necessary\n transformed_spots = transformed_spots + offsets\n\n return transform_matrix, transformed_spots\n\n\ndef make_image(spots, bit_depth):\n # RGB image, 2D (normalized) gaussians at spot locations\n sigma = np.eye(2)*5\n X, Y = np.meshgrid(np.arange(0, 200), np.arange(0, 100))\n img = np.zeros(X.shape)\n\n for x, y, in spots.T:\n mu = np.array([x,y])[:,np.newaxis]\n XX = np.vstack((X.ravel(), Y.ravel())) - mu\n quad_form = np.sum(np.dot(XX.T, np.linalg.inv(sigma)) * XX.T, axis=1)\n Z = np.exp(-0.5 * quad_form)\n img += Z.reshape(X.shape)\n img = img / img.max()\n\n return (img * (2**bit_depth - 1)).astype(f\"uint{bit_depth}\")\n\n\ndef _make_base_description(version, bit_depth):\n # version == 1 corresponds to metadata generated by Bluelake v1.7.0-beta1.c\n # version == 2 corresponds to metadata generated by Bluelake v1.7.0\n\n laser_on = lambda c: f\"{c} Excitation Laser on\"\n laser_wavelength = lambda c: f\"{c} Excitation Laser wavelength (nm)\"\n laser_power = lambda c, suff: f\"{c} Excitation Laser power {suff}(%)\"\n colors = (\"Blue\", \"Green\", \"Red\")\n\n description = {\n \"Background subtraction\": None,\n \"Exposure time (ms)\": None,\n \"Focus lock\": None,\n \"Frame averaging\": None,\n \"Frame rate (Hz)\": None,\n \"Pixel clock (MHz)\": None,\n \"Region of interest (x, y, width, height)\": [0, 0, 200, 100]\n }\n for c in colors:\n description[laser_wavelength(c)] = None\n description[laser_power(c, \"\" if version == 1 else \"level \")] = None\n if version > 1:\n for c in colors:\n description[laser_on(c)] = None\n description[\"Bit depth\"] = bit_depth\n description[\"Exposure sync available\"] = None\n description[\"Exposure sync enabled\"] = None\n return description\n\n\ndef make_irm_description(version, bit_depth):\n description = _make_base_description(version, bit_depth)\n description[\"Camera\"] = \"IRM\"\n return description\n\n\ndef make_wt_description(version, bit_depth, m_red, m_blue, offsets):\n if version == 1:\n alignment_matrices = lambda color: f\"Alignment {color} channel\"\n channel_choices = (\"red\", \"green\", \"blue\")\n else:\n alignment_matrices = lambda index: f\"Channel {index} alignment\"\n channel_choices = range(3)\n\n offsets = [0, 0] if offsets is None else offsets\n matrices = (m_red, np.eye(3), m_blue)\n\n description = _make_base_description(version, bit_depth)\n description[\"Camera\"] = \"WT\"\n for c, mat in zip(channel_choices, matrices):\n description[alignment_matrices(c)] = mat[:2].ravel().tolist()\n description[\"Alignment region of interest (x, y, width, height)\"] = [offsets[0], offsets[1], 200, 100]\n description[\"TIRF\"] = None\n description[\"TIRF angle (device units)\"] = None\n return description\n\n\ndef make_alignment_image_data(spots, red_warp_parameters, blue_warp_parameters, bit_depth,\n offsets=None, camera=\"wt\", version=1):\n\n spots = np.array(spots).T # [2 x N]\n m_red, red_spots = apply_transform(spots, offsets=offsets, **red_warp_parameters)\n m_blue, blue_spots = apply_transform(spots, offsets=offsets, **blue_warp_parameters)\n\n red_image = make_image(red_spots, bit_depth)\n green_image = make_image(spots, bit_depth)\n blue_image = make_image(blue_spots, bit_depth)\n\n reference_image = np.repeat(green_image[:,:,np.newaxis], 3, axis=2)\n warped_image = np.stack((red_image, green_image, blue_image), axis=2).squeeze()\n if camera == \"wt\":\n description = make_wt_description(version, bit_depth, m_red, m_blue, offsets)\n elif camera == \"irm\":\n description = make_irm_description(version, bit_depth)\n # IRM images are grayscale so they only have 1 channel\n reference_image = reference_image[:,:,1]\n warped_image = warped_image[:,:,1]\n else:\n raise ValueError(\"camera argument must be 'wt' or 'irm'\")\n\n return reference_image, warped_image, description, bit_depth\n\n\ndef write_tiff_file(image_args, n_frames, filename):\n _, warped_image, description, _ = image_args\n\n # We use the dimension of image data to evaluate the number of color channels\n channels = 1 if warped_image.ndim == 2 else 3\n movie = np.stack([warped_image for n in range(n_frames)], axis=0)\n\n tag_orientation = (274, 'H', 1, 1, False) # Orientation = ORIENTATION.TOPLEFT\n tag_sample_format = (339, 'H', channels, (1, )*channels, False) # SampleFormat = SAMPLEFORMAT.UINT\n\n with tifffile.TiffWriter(filename) as tif:\n for n, frame in enumerate(movie):\n str_datetime = f\"{n*10+10}:{n*10+18}\"\n tag_datetime = (306, 's', len(str_datetime), str_datetime, False)\n tif.save(frame,\n description=json.dumps(description, indent=4),\n software=\"Bluelake Unknown\",\n metadata=None, contiguous=False,\n extratags=(tag_orientation, tag_sample_format, tag_datetime))\n", "import numpy as np\nimport scipy.constants\nimport pytest\nfrom lumicks.pylake.force_calibration.detail.power_models import sphere_friction_coefficient\nfrom lumicks.pylake.force_calibration.power_spectrum_calibration import (\n calculate_power_spectrum,\n fit_power_spectrum,\n)\nfrom lumicks.pylake.force_calibration.calibration_models import ActiveCalibrationModel\nfrom .data.simulate_calibration_data import generate_active_calibration_test_data\n\n\[email protected](\n \"sample_rate, bead_diameter, stiffness, viscosity, temperature, pos_response_um_volt, \"\n \"driving_sinusoid, diode, driving_frequency_guess, power_density\",\n [\n [78125, 1.03, 0.1, 1.002e-3, 20, 0.618, (500, 31.95633), (0.4, 15000), 32, 1.958068e-5],\n [78125, 1.03, 0.2, 1.012e-3, 20, 1.618, (500, 31.95633), (0.4, 14000), 32, 7.28664e-07],\n [78125, 1.03, 0.3, 1.002e-3, 50, 1.618, (300, 30.42633), (0.4, 16000), 29, 1.098337e-07],\n ],\n)\ndef test_integration_active_calibration(\n sample_rate,\n bead_diameter,\n stiffness,\n viscosity,\n temperature,\n pos_response_um_volt,\n driving_sinusoid,\n diode,\n driving_frequency_guess,\n power_density,\n):\n \"\"\"Functional end to end test for active calibration\"\"\"\n\n np.random.seed(0)\n force_voltage_data, driving_data = generate_active_calibration_test_data(\n duration=20,\n sample_rate=sample_rate,\n bead_diameter=bead_diameter,\n stiffness=stiffness,\n viscosity=viscosity,\n temperature=temperature,\n pos_response_um_volt=pos_response_um_volt,\n driving_sinusoid=driving_sinusoid,\n diode=diode,\n )\n\n model = ActiveCalibrationModel(\n driving_data,\n force_voltage_data,\n sample_rate,\n bead_diameter,\n driving_frequency_guess,\n viscosity,\n temperature,\n )\n\n # Validate estimation of the driving input\n np.testing.assert_allclose(model.driving_amplitude, driving_sinusoid[0] * 1e-9, rtol=1e-5)\n np.testing.assert_allclose(model.driving_frequency, driving_sinusoid[1], rtol=1e-5)\n\n np.testing.assert_allclose(model._response_power_density, power_density, rtol=1e-5)\n num_points_per_window = int(np.round(sample_rate * model.num_windows / model.driving_frequency))\n freq_axis = np.fft.rfftfreq(num_points_per_window, 1.0 / sample_rate)\n np.testing.assert_allclose(model._frequency_bin_width, freq_axis[1] - freq_axis[0])\n\n power_spectrum = calculate_power_spectrum(force_voltage_data, sample_rate)\n fit = fit_power_spectrum(power_spectrum, model)\n\n np.testing.assert_allclose(fit[\"kappa\"].value, stiffness, rtol=5e-2)\n np.testing.assert_allclose(fit[\"alpha\"].value, diode[0], rtol=5e-2)\n np.testing.assert_allclose(fit[\"f_diode\"].value, diode[1], rtol=5e-2)\n np.testing.assert_allclose(fit[\"Rd\"].value, pos_response_um_volt, rtol=5e-2)\n\n response_calc = fit[\"Rd\"].value * fit[\"kappa\"].value * 1e3\n np.testing.assert_allclose(fit[\"Rf\"].value, response_calc, rtol=1e-9)\n\n kt = scipy.constants.k * scipy.constants.convert_temperature(temperature, \"C\", \"K\")\n drag_coeff_calc = kt / (fit[\"D\"].value * fit[\"Rd\"].value ** 2)\n np.testing.assert_allclose(\n fit[\"gamma_0\"].value,\n sphere_friction_coefficient(viscosity, bead_diameter * 1e-6),\n rtol=1e-9,\n )\n np.testing.assert_allclose(fit[\"gamma_ex\"].value, drag_coeff_calc * 1e12, rtol=1e-9)\n\n np.testing.assert_allclose(fit[\"Bead diameter\"].value, bead_diameter)\n np.testing.assert_allclose(fit[\"Driving frequency (guess)\"].value, driving_frequency_guess)\n np.testing.assert_allclose(fit[\"Sample rate\"].value, sample_rate)\n np.testing.assert_allclose(fit[\"Viscosity\"].value, viscosity)\n np.testing.assert_allclose(fit[\"num_windows\"].value, 5)\n\n\ndef test_bias_correction():\n \"\"\"Functional end to end test for active calibration\"\"\"\n\n np.random.seed(0)\n force_voltage_data, driving_data = generate_active_calibration_test_data(\n duration=20,\n sample_rate=78125,\n bead_diameter=1.03,\n stiffness=0.2,\n viscosity=1.002e-3,\n temperature=20,\n pos_response_um_volt=0.618,\n driving_sinusoid=(500, 31.95633),\n diode=(0.4, 13000),\n )\n\n model = ActiveCalibrationModel(driving_data, force_voltage_data, 78125, 1.03, 32, 1.002e-3, 20)\n\n # Low blocking deliberately leads to higher bias (so it's easier to measure)\n block_size = 3\n power_spectrum_low = calculate_power_spectrum(\n force_voltage_data, 78125, num_points_per_block=block_size\n )\n\n fit_biased = fit_power_spectrum(power_spectrum_low, model, bias_correction=False)\n fit_debiased = fit_power_spectrum(power_spectrum_low, model, bias_correction=True)\n\n bias_corr = block_size / (block_size + 1)\n np.testing.assert_allclose(fit_debiased[\"D\"].value, fit_biased[\"D\"].value * bias_corr)\n np.testing.assert_allclose(fit_debiased[\"err_D\"].value, fit_biased[\"err_D\"].value * bias_corr)\n\n # Biased vs debiased estimates (in comments are the reference values for N_pts_per_block = 150\n # Note how the estimates are better on the right.\n comparisons = {\n \"fc\": [3310.651532245893, 3310.651532245893], # Ref: 3277.6576037747836\n \"D\": [1.472922058628551, 1.1046915439714131], # Ref: 1.0896306365192108\n \"kappa\": [0.15317517466591019, 0.2043759281959786], # Ref: 0.20106518840690035\n \"Rd\": [0.6108705452113169, 0.6106577513480039], # Ref: 0.6168083172053238\n \"Rf\": [93.57020246100325, 124.8037447418174], # Ref: 124.0186805098316\n }\n\n for key, values in comparisons.items():\n for fit, value in zip([fit_biased, fit_debiased], values):\n np.testing.assert_allclose(fit[key].value, value)\n\n assert fit_biased.params[\"Bias correction\"].value is False\n assert fit_debiased.params[\"Bias correction\"].value is True\n", "from lumicks.pylake.kymotracker.detail.calibrated_images import CalibratedKymographChannel\nfrom lumicks.pylake.kymotracker.kymoline import KymoLine, KymoLineGroup, import_kymolinegroup_from_csv\nimport numpy as np\nimport pytest\nfrom lumicks.pylake.tests.data.mock_confocal import generate_kymo\n\n\[email protected](scope=\"session\")\ndef kymolinegroup_io_data():\n test_data = np.zeros((8, 8))\n\n test_img = CalibratedKymographChannel(\"test\", data=test_data, time_step_ns=100e9, pixel_size=2)\n k1 = KymoLine([1, 2, 3], np.array([2, 3, 4]), test_img)\n k2 = KymoLine([2, 3, 4], np.array([3, 4, 5]), test_img)\n k3 = KymoLine([3, 4, 5], np.array([4, 5, 6]), test_img)\n k4 = KymoLine([4, 5, 6], np.array([5, 6, 7]), test_img)\n lines = KymoLineGroup([k1, k2, k3, k4])\n\n for k in lines:\n test_data[k.coordinate_idx, k.time_idx] = 2\n test_data[np.array(k.coordinate_idx) - 1, k.time_idx] = 1\n\n return test_img, lines\n\n\ndef read_txt(testfile, delimiter):\n raw_data = np.loadtxt(testfile, delimiter=delimiter, unpack=True)\n with open(testfile, \"r\") as f:\n data = {}\n header = f.readline().rstrip().split(delimiter)\n line_idx = raw_data[0, :]\n for key, col in zip(header, raw_data):\n data[key] = [col[np.argwhere(line_idx == idx).flatten()] for idx in np.unique(line_idx)]\n\n return data\n\n\[email protected](\"dt, dx, delimiter, sampling_width, sampling_outcome\",\n [[int(1e9), 1.0, ';', 0, 2],\n [int(2e9), 1.0, ';', 0, 2],\n [int(1e9), 2.0, ';', 0, 2],\n [int(1e9), 1.0, ',', 0, 2],\n [int(1e9), 1.0, ';', 1, 3],\n [int(1e9), 2.0, ';', None, None]])\ndef test_kymolinegroup_io(tmpdir_factory, kymolinegroup_io_data, dt, dx, delimiter, sampling_width, sampling_outcome):\n test_img, lines = kymolinegroup_io_data\n\n kymo = generate_kymo(\n \"test\",\n test_img.data,\n dx*1000,\n start=4,\n dt=dt,\n samples_per_pixel=5,\n line_padding=3\n )\n\n # Test round trip through the API\n testfile = f\"{tmpdir_factory.mktemp('pylake')}/test.csv\"\n lines.save(testfile, delimiter, sampling_width)\n read_file = import_kymolinegroup_from_csv(testfile, kymo, \"red\", delimiter=delimiter)\n\n # Test raw fields\n data = read_txt(testfile, delimiter)\n assert len(read_file) == len(lines)\n\n for line1, line2 in zip(lines, read_file):\n np.testing.assert_allclose(np.array(line1.coordinate_idx), np.array(line2.coordinate_idx))\n np.testing.assert_allclose(np.array(line1.time_idx), np.array(line2.time_idx))\n\n for line1, time in zip(lines, data[\"time\"]):\n np.testing.assert_allclose(line1.seconds, time)\n\n for line1, coord in zip(lines, data[\"position\"]):\n np.testing.assert_allclose(line1.position, coord)\n\n if sampling_width is None:\n assert len([key for key in data.keys() if \"counts\" in key]) == 0\n else:\n count_field = [key for key in data.keys() if \"counts\" in key][0]\n for line1, cnt in zip(lines, data[count_field]):\n np.testing.assert_allclose([sampling_outcome] * len(line1.coordinate_idx), cnt)\n" ]
[ [ "numpy.array", "numpy.dot", "numpy.radians", "numpy.linalg.inv", "numpy.arange", "numpy.eye", "numpy.cos", "numpy.stack", "numpy.ones", "numpy.sin", "numpy.repeat", "numpy.exp", "numpy.zeros" ], [ "numpy.fft.rfftfreq", "numpy.round", "numpy.random.seed", "numpy.testing.assert_allclose" ], [ "numpy.unique", "numpy.argwhere", "numpy.testing.assert_allclose", "numpy.array", "numpy.zeros", "numpy.loadtxt" ] ]
[ { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] }, { "matplotlib": [], "numpy": [], "pandas": [], "scipy": [], "tensorflow": [] } ]